xref: /freebsd/sys/dev/ixl/if_ixl.c (revision c65f571c898b67f8c455bcf5447f8c1397880c69)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	1
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	/* required last entry */
86 	PVID_END
87 };
88 
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 /*** IFLIB interface ***/
93 static void	*ixl_register(device_t dev);
94 static int	 ixl_if_attach_pre(if_ctx_t ctx);
95 static int	 ixl_if_attach_post(if_ctx_t ctx);
96 static int	 ixl_if_detach(if_ctx_t ctx);
97 static int	 ixl_if_shutdown(if_ctx_t ctx);
98 static int	 ixl_if_suspend(if_ctx_t ctx);
99 static int	 ixl_if_resume(if_ctx_t ctx);
100 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void	 ixl_if_enable_intr(if_ctx_t ctx);
102 static void	 ixl_if_disable_intr(if_ctx_t ctx);
103 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void	 ixl_if_queues_free(if_ctx_t ctx);
108 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
109 static void	 ixl_if_multi_set(if_ctx_t ctx);
110 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int	 ixl_if_media_change(if_ctx_t ctx);
113 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
119 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
120 #ifdef PCI_IOV
121 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
122 #endif
123 
124 /*** Other ***/
125 static u_int	 ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
126 static void	 ixl_save_pf_tunables(struct ixl_pf *);
127 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
128 
129 /*********************************************************************
130  *  FreeBSD Device Interface Entry Points
131  *********************************************************************/
132 
133 static device_method_t ixl_methods[] = {
134 	/* Device interface */
135 	DEVMETHOD(device_register, ixl_register),
136 	DEVMETHOD(device_probe, iflib_device_probe),
137 	DEVMETHOD(device_attach, iflib_device_attach),
138 	DEVMETHOD(device_detach, iflib_device_detach),
139 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 #ifdef PCI_IOV
141 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
142 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
143 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
144 #endif
145 	DEVMETHOD_END
146 };
147 
148 static driver_t ixl_driver = {
149 	"ixl", ixl_methods, sizeof(struct ixl_pf),
150 };
151 
152 devclass_t ixl_devclass;
153 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
154 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
155 MODULE_VERSION(ixl, 3);
156 
157 MODULE_DEPEND(ixl, pci, 1, 1, 1);
158 MODULE_DEPEND(ixl, ether, 1, 1, 1);
159 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
160 
161 static device_method_t ixl_if_methods[] = {
162 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
163 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
164 	DEVMETHOD(ifdi_detach, ixl_if_detach),
165 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
166 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
167 	DEVMETHOD(ifdi_resume, ixl_if_resume),
168 	DEVMETHOD(ifdi_init, ixl_if_init),
169 	DEVMETHOD(ifdi_stop, ixl_if_stop),
170 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
171 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
172 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
173 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
174 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
175 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
176 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
177 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
178 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
179 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
180 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
181 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
182 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
183 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
184 	DEVMETHOD(ifdi_timer, ixl_if_timer),
185 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
186 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
187 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
188 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
189 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
190 #ifdef PCI_IOV
191 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
192 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
193 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
194 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
195 #endif
196 	// ifdi_led_func
197 	// ifdi_debug
198 	DEVMETHOD_END
199 };
200 
201 static driver_t ixl_if_driver = {
202 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
203 };
204 
205 /*
206 ** TUNEABLE PARAMETERS:
207 */
208 
209 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
210     "ixl driver parameters");
211 
212 /*
213  * Leave this on unless you need to send flow control
214  * frames (or other control frames) from software
215  */
216 static int ixl_enable_tx_fc_filter = 1;
217 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
218     &ixl_enable_tx_fc_filter);
219 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
220     &ixl_enable_tx_fc_filter, 0,
221     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
222 
223 static int ixl_i2c_access_method = 0;
224 TUNABLE_INT("hw.ixl.i2c_access_method",
225     &ixl_i2c_access_method);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
227     &ixl_i2c_access_method, 0,
228     IXL_SYSCTL_HELP_I2C_METHOD);
229 
230 static int ixl_enable_vf_loopback = 1;
231 TUNABLE_INT("hw.ixl.enable_vf_loopback",
232     &ixl_enable_vf_loopback);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
234     &ixl_enable_vf_loopback, 0,
235     IXL_SYSCTL_HELP_VF_LOOPBACK);
236 
237 /*
238  * Different method for processing TX descriptor
239  * completion.
240  */
241 static int ixl_enable_head_writeback = 1;
242 TUNABLE_INT("hw.ixl.enable_head_writeback",
243     &ixl_enable_head_writeback);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
245     &ixl_enable_head_writeback, 0,
246     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
247 
248 static int ixl_core_debug_mask = 0;
249 TUNABLE_INT("hw.ixl.core_debug_mask",
250     &ixl_core_debug_mask);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
252     &ixl_core_debug_mask, 0,
253     "Display debug statements that are printed in non-shared code");
254 
255 static int ixl_shared_debug_mask = 0;
256 TUNABLE_INT("hw.ixl.shared_debug_mask",
257     &ixl_shared_debug_mask);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
259     &ixl_shared_debug_mask, 0,
260     "Display debug statements that are printed in shared code");
261 
262 #if 0
263 /*
264 ** Controls for Interrupt Throttling
265 **	- true/false for dynamic adjustment
266 ** 	- default values for static ITR
267 */
268 static int ixl_dynamic_rx_itr = 0;
269 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
271     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
272 
273 static int ixl_dynamic_tx_itr = 0;
274 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
276     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
277 #endif
278 
279 static int ixl_rx_itr = IXL_ITR_8K;
280 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
282     &ixl_rx_itr, 0, "RX Interrupt Rate");
283 
284 static int ixl_tx_itr = IXL_ITR_4K;
285 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
286 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
287     &ixl_tx_itr, 0, "TX Interrupt Rate");
288 
289 #ifdef IXL_IW
290 int ixl_enable_iwarp = 0;
291 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
293     &ixl_enable_iwarp, 0, "iWARP enabled");
294 
295 #if __FreeBSD_version < 1100000
296 int ixl_limit_iwarp_msix = 1;
297 #else
298 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
299 #endif
300 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
301 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
302     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
303 #endif
304 
305 extern struct if_txrx ixl_txrx_hwb;
306 extern struct if_txrx ixl_txrx_dwb;
307 
308 static struct if_shared_ctx ixl_sctx_init = {
309 	.isc_magic = IFLIB_MAGIC,
310 	.isc_q_align = PAGE_SIZE,
311 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
312 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
313 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
314 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
315 	.isc_rx_maxsize = 16384,
316 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
317 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
318 	.isc_nfl = 1,
319 	.isc_ntxqs = 1,
320 	.isc_nrxqs = 1,
321 
322 	.isc_admin_intrcnt = 1,
323 	.isc_vendor_info = ixl_vendor_info_array,
324 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
325 	.isc_driver = &ixl_if_driver,
326 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
327 
328 	.isc_nrxd_min = {IXL_MIN_RING},
329 	.isc_ntxd_min = {IXL_MIN_RING},
330 	.isc_nrxd_max = {IXL_MAX_RING},
331 	.isc_ntxd_max = {IXL_MAX_RING},
332 	.isc_nrxd_default = {IXL_DEFAULT_RING},
333 	.isc_ntxd_default = {IXL_DEFAULT_RING},
334 };
335 
336 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
337 
338 /*** Functions ***/
339 static void *
340 ixl_register(device_t dev)
341 {
342 	return (ixl_sctx);
343 }
344 
345 static int
346 ixl_allocate_pci_resources(struct ixl_pf *pf)
347 {
348 	device_t dev = iflib_get_dev(pf->vsi.ctx);
349 	struct i40e_hw *hw = &pf->hw;
350 	int             rid;
351 
352 	/* Map BAR0 */
353 	rid = PCIR_BAR(0);
354 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
355 	    &rid, RF_ACTIVE);
356 
357 	if (!(pf->pci_mem)) {
358 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
359 		return (ENXIO);
360 	}
361 
362 	/* Save off the PCI information */
363 	hw->vendor_id = pci_get_vendor(dev);
364 	hw->device_id = pci_get_device(dev);
365 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
366 	hw->subsystem_vendor_id =
367 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
368 	hw->subsystem_device_id =
369 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
370 
371 	hw->bus.device = pci_get_slot(dev);
372 	hw->bus.func = pci_get_function(dev);
373 
374 	/* Save off register access information */
375 	pf->osdep.mem_bus_space_tag =
376 		rman_get_bustag(pf->pci_mem);
377 	pf->osdep.mem_bus_space_handle =
378 		rman_get_bushandle(pf->pci_mem);
379 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
380 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
381 	pf->osdep.dev = dev;
382 
383 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
384 	pf->hw.back = &pf->osdep;
385 
386  	return (0);
387  }
388 
389 static int
390 ixl_if_attach_pre(if_ctx_t ctx)
391 {
392 	device_t dev;
393 	struct ixl_pf *pf;
394 	struct i40e_hw *hw;
395 	struct ixl_vsi *vsi;
396 	if_softc_ctx_t scctx;
397 	struct i40e_filter_control_settings filter;
398 	enum i40e_status_code status;
399 	int error = 0;
400 
401 	dev = iflib_get_dev(ctx);
402 	pf = iflib_get_softc(ctx);
403 
404 	INIT_DBG_DEV(dev, "begin");
405 
406 	vsi = &pf->vsi;
407 	vsi->back = pf;
408 	pf->dev = dev;
409 	hw = &pf->hw;
410 
411 	vsi->dev = dev;
412 	vsi->hw = &pf->hw;
413 	vsi->id = 0;
414 	vsi->num_vlans = 0;
415 	vsi->ctx = ctx;
416 	vsi->media = iflib_get_media(ctx);
417 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
418 
419 	/* Save tunable values */
420 	ixl_save_pf_tunables(pf);
421 
422 	/* Do PCI setup - map BAR0, etc */
423 	if (ixl_allocate_pci_resources(pf)) {
424 		device_printf(dev, "Allocation of PCI resources failed\n");
425 		error = ENXIO;
426 		goto err_pci_res;
427 	}
428 
429 	/* Establish a clean starting point */
430 	i40e_clear_hw(hw);
431 	status = i40e_pf_reset(hw);
432 	if (status) {
433 		device_printf(dev, "PF reset failure %s\n",
434 		    i40e_stat_str(hw, status));
435 		error = EIO;
436 		goto err_out;
437 	}
438 
439 	/* Initialize the shared code */
440 	status = i40e_init_shared_code(hw);
441 	if (status) {
442 		device_printf(dev, "Unable to initialize shared code, error %s\n",
443 		    i40e_stat_str(hw, status));
444 		error = EIO;
445 		goto err_out;
446 	}
447 
448 	/* Set up the admin queue */
449 	hw->aq.num_arq_entries = IXL_AQ_LEN;
450 	hw->aq.num_asq_entries = IXL_AQ_LEN;
451 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
452 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
453 
454 	status = i40e_init_adminq(hw);
455 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
456 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
457 		    i40e_stat_str(hw, status));
458 		error = EIO;
459 		goto err_out;
460 	}
461 	ixl_print_nvm_version(pf);
462 
463 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
464 		device_printf(dev, "The driver for the device stopped "
465 		    "because the NVM image is newer than expected.\n");
466 		device_printf(dev, "You must install the most recent version of "
467 		    "the network driver.\n");
468 		error = EIO;
469 		goto err_out;
470 	}
471 
472         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
473 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
474 		device_printf(dev, "The driver for the device detected "
475 		    "a newer version of the NVM image than expected.\n");
476 		device_printf(dev, "Please install the most recent version "
477 		    "of the network driver.\n");
478 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
479 		device_printf(dev, "The driver for the device detected "
480 		    "an older version of the NVM image than expected.\n");
481 		device_printf(dev, "Please update the NVM image.\n");
482 	}
483 
484 	/* Clear PXE mode */
485 	i40e_clear_pxe_mode(hw);
486 
487 	/* Get capabilities from the device */
488 	error = ixl_get_hw_capabilities(pf);
489 	if (error) {
490 		device_printf(dev, "get_hw_capabilities failed: %d\n",
491 		    error);
492 		goto err_get_cap;
493 	}
494 
495 	/* Set up host memory cache */
496 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
497 	    hw->func_caps.num_rx_qp, 0, 0);
498 	if (status) {
499 		device_printf(dev, "init_lan_hmc failed: %s\n",
500 		    i40e_stat_str(hw, status));
501 		goto err_get_cap;
502 	}
503 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
504 	if (status) {
505 		device_printf(dev, "configure_lan_hmc failed: %s\n",
506 		    i40e_stat_str(hw, status));
507 		goto err_mac_hmc;
508 	}
509 
510 	/* Disable LLDP from the firmware for certain NVM versions */
511 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
512 	    (pf->hw.aq.fw_maj_ver < 4)) {
513 		i40e_aq_stop_lldp(hw, TRUE, NULL);
514 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
515 	}
516 
517 	/* Get MAC addresses from hardware */
518 	i40e_get_mac_addr(hw, hw->mac.addr);
519 	error = i40e_validate_mac_addr(hw->mac.addr);
520 	if (error) {
521 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
522 		goto err_mac_hmc;
523 	}
524 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
525 	iflib_set_mac(ctx, hw->mac.addr);
526 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
527 
528 	/* Set up the device filtering */
529 	bzero(&filter, sizeof(filter));
530 	filter.enable_ethtype = TRUE;
531 	filter.enable_macvlan = TRUE;
532 	filter.enable_fdir = FALSE;
533 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
534 	if (i40e_set_filter_control(hw, &filter))
535 		device_printf(dev, "i40e_set_filter_control() failed\n");
536 
537 	/* Query device FW LLDP status */
538 	ixl_get_fw_lldp_status(pf);
539 	/* Tell FW to apply DCB config on link up */
540 	i40e_aq_set_dcb_parameters(hw, true, NULL);
541 
542 	/* Fill out iflib parameters */
543 	if (hw->mac.type == I40E_MAC_X722)
544 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
545 	else
546 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
547 	if (vsi->enable_head_writeback) {
548 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
549 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
550 		scctx->isc_txrx = &ixl_txrx_hwb;
551 	} else {
552 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
553 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
554 		scctx->isc_txrx = &ixl_txrx_dwb;
555 	}
556 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
557 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
558 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
559 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
560 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
561 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
562 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
563 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
564 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
565 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
566 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
567 
568 	INIT_DBG_DEV(dev, "end");
569 	return (0);
570 
571 err_mac_hmc:
572 	i40e_shutdown_lan_hmc(hw);
573 err_get_cap:
574 	i40e_shutdown_adminq(hw);
575 err_out:
576 	ixl_free_pci_resources(pf);
577 err_pci_res:
578 	return (error);
579 }
580 
581 static int
582 ixl_if_attach_post(if_ctx_t ctx)
583 {
584 	device_t dev;
585 	struct ixl_pf *pf;
586 	struct i40e_hw *hw;
587 	struct ixl_vsi *vsi;
588 	int error = 0;
589 	enum i40e_status_code status;
590 
591 	dev = iflib_get_dev(ctx);
592 	pf = iflib_get_softc(ctx);
593 
594 	INIT_DBG_DEV(dev, "begin");
595 
596 	vsi = &pf->vsi;
597 	vsi->ifp = iflib_get_ifp(ctx);
598 	hw = &pf->hw;
599 
600 	/* Save off determined number of queues for interface */
601 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
602 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
603 
604 	/* Setup OS network interface / ifnet */
605 	if (ixl_setup_interface(dev, pf)) {
606 		device_printf(dev, "interface setup failed!\n");
607 		error = EIO;
608 		goto err;
609 	}
610 
611 	/* Determine link state */
612 	if (ixl_attach_get_link_status(pf)) {
613 		error = EINVAL;
614 		goto err;
615 	}
616 
617 	error = ixl_switch_config(pf);
618 	if (error) {
619 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
620 		     error);
621 		goto err;
622 	}
623 
624 	/* Add protocol filters to list */
625 	ixl_init_filters(vsi);
626 
627 	/* Init queue allocation manager */
628 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
629 	if (error) {
630 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
631 		    error);
632 		goto err;
633 	}
634 	/* reserve a contiguous allocation for the PF's VSI */
635 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
636 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
637 	if (error) {
638 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
639 		    error);
640 		goto err;
641 	}
642 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
643 	    pf->qtag.num_allocated, pf->qtag.num_active);
644 
645 	/* Limit PHY interrupts to link, autoneg, and modules failure */
646 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
647 	    NULL);
648         if (status) {
649 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
650 		    " aq_err %s\n", i40e_stat_str(hw, status),
651 		    i40e_aq_str(hw, hw->aq.asq_last_status));
652 		goto err;
653 	}
654 
655 	/* Get the bus configuration and set the shared code */
656 	ixl_get_bus_info(pf);
657 
658 	/* Keep admin queue interrupts active while driver is loaded */
659 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
660  		ixl_configure_intr0_msix(pf);
661  		ixl_enable_intr0(hw);
662 	}
663 
664 	/* Set initial advertised speed sysctl value */
665 	ixl_set_initial_advertised_speeds(pf);
666 
667 	/* Initialize statistics & add sysctls */
668 	ixl_add_device_sysctls(pf);
669 	ixl_pf_reset_stats(pf);
670 	ixl_update_stats_counters(pf);
671 	ixl_add_hw_stats(pf);
672 
673 	hw->phy.get_link_info = true;
674 	i40e_get_link_status(hw, &pf->link_up);
675 	ixl_update_link_status(pf);
676 
677 #ifdef PCI_IOV
678 	ixl_initialize_sriov(pf);
679 #endif
680 
681 #ifdef IXL_IW
682 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
683 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
684 		if (pf->iw_enabled) {
685 			error = ixl_iw_pf_attach(pf);
686 			if (error) {
687 				device_printf(dev,
688 				    "interfacing to iWARP driver failed: %d\n",
689 				    error);
690 				goto err;
691 			} else
692 				device_printf(dev, "iWARP ready\n");
693 		} else
694 			device_printf(dev, "iWARP disabled on this device "
695 			    "(no MSI-X vectors)\n");
696 	} else {
697 		pf->iw_enabled = false;
698 		device_printf(dev, "The device is not iWARP enabled\n");
699 	}
700 #endif
701 
702 	INIT_DBG_DEV(dev, "end");
703 	return (0);
704 
705 err:
706 	INIT_DEBUGOUT("end: error %d", error);
707 	/* ixl_if_detach() is called on error from this */
708 	return (error);
709 }
710 
711 /**
712  * XXX: iflib always ignores the return value of detach()
713  * -> This means that this isn't allowed to fail
714  */
715 static int
716 ixl_if_detach(if_ctx_t ctx)
717 {
718 	struct ixl_pf *pf = iflib_get_softc(ctx);
719 	struct ixl_vsi *vsi = &pf->vsi;
720 	struct i40e_hw *hw = &pf->hw;
721 	device_t dev = pf->dev;
722 	enum i40e_status_code	status;
723 #ifdef IXL_IW
724 	int			error;
725 #endif
726 
727 	INIT_DBG_DEV(dev, "begin");
728 
729 #ifdef IXL_IW
730 	if (ixl_enable_iwarp && pf->iw_enabled) {
731 		error = ixl_iw_pf_detach(pf);
732 		if (error == EBUSY) {
733 			device_printf(dev, "iwarp in use; stop it first.\n");
734 			//return (error);
735 		}
736 	}
737 #endif
738 	/* Remove all previously allocated media types */
739 	ifmedia_removeall(vsi->media);
740 
741 	/* Shutdown LAN HMC */
742 	if (hw->hmc.hmc_obj) {
743 		status = i40e_shutdown_lan_hmc(hw);
744 		if (status)
745 			device_printf(dev,
746 			    "i40e_shutdown_lan_hmc() failed with status %s\n",
747 			    i40e_stat_str(hw, status));
748 	}
749 
750 	/* Shutdown admin queue */
751 	ixl_disable_intr0(hw);
752 	status = i40e_shutdown_adminq(hw);
753 	if (status)
754 		device_printf(dev,
755 		    "i40e_shutdown_adminq() failed with status %s\n",
756 		    i40e_stat_str(hw, status));
757 
758 	ixl_pf_qmgr_destroy(&pf->qmgr);
759 	ixl_free_pci_resources(pf);
760 	ixl_free_mac_filters(vsi);
761 	INIT_DBG_DEV(dev, "end");
762 	return (0);
763 }
764 
765 static int
766 ixl_if_shutdown(if_ctx_t ctx)
767 {
768 	int error = 0;
769 
770 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
771 
772 	/* TODO: Call ixl_if_stop()? */
773 
774 	/* TODO: Then setup low power mode */
775 
776 	return (error);
777 }
778 
779 static int
780 ixl_if_suspend(if_ctx_t ctx)
781 {
782 	int error = 0;
783 
784 	INIT_DEBUGOUT("ixl_if_suspend: begin");
785 
786 	/* TODO: Call ixl_if_stop()? */
787 
788 	/* TODO: Then setup low power mode */
789 
790 	return (error);
791 }
792 
793 static int
794 ixl_if_resume(if_ctx_t ctx)
795 {
796 	struct ifnet *ifp = iflib_get_ifp(ctx);
797 
798 	INIT_DEBUGOUT("ixl_if_resume: begin");
799 
800 	/* Read & clear wake-up registers */
801 
802 	/* Required after D3->D0 transition */
803 	if (ifp->if_flags & IFF_UP)
804 		ixl_if_init(ctx);
805 
806 	return (0);
807 }
808 
809 void
810 ixl_if_init(if_ctx_t ctx)
811 {
812 	struct ixl_pf *pf = iflib_get_softc(ctx);
813 	struct ixl_vsi *vsi = &pf->vsi;
814 	struct i40e_hw	*hw = &pf->hw;
815 	struct ifnet *ifp = iflib_get_ifp(ctx);
816 	device_t 	dev = iflib_get_dev(ctx);
817 	u8		tmpaddr[ETHER_ADDR_LEN];
818 	int		ret;
819 
820 	/*
821 	 * If the aq is dead here, it probably means something outside of the driver
822 	 * did something to the adapter, like a PF reset.
823 	 * So, rebuild the driver's state here if that occurs.
824 	 */
825 	if (!i40e_check_asq_alive(&pf->hw)) {
826 		device_printf(dev, "Admin Queue is down; resetting...\n");
827 		ixl_teardown_hw_structs(pf);
828 		ixl_rebuild_hw_structs_after_reset(pf);
829 	}
830 
831 	/* Get the latest mac address... User might use a LAA */
832 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
833 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
834 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
835 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
836 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
837 		ret = i40e_aq_mac_address_write(hw,
838 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
839 		    hw->mac.addr, NULL);
840 		if (ret) {
841 			device_printf(dev, "LLA address change failed!!\n");
842 			return;
843 		}
844 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
845 	}
846 
847 	iflib_set_mac(ctx, hw->mac.addr);
848 
849 	/* Prepare the VSI: rings, hmc contexts, etc... */
850 	if (ixl_initialize_vsi(vsi)) {
851 		device_printf(dev, "initialize vsi failed!!\n");
852 		return;
853 	}
854 
855 	/* Reconfigure multicast filters in HW */
856 	ixl_if_multi_set(ctx);
857 
858 	/* Set up RSS */
859 	ixl_config_rss(pf);
860 
861 	/* Set up MSI-X routing and the ITR settings */
862 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
863 		ixl_configure_queue_intr_msix(pf);
864 		ixl_configure_itr(pf);
865 	} else
866 		ixl_configure_legacy(pf);
867 
868 	if (vsi->enable_head_writeback)
869 		ixl_init_tx_cidx(vsi);
870 	else
871 		ixl_init_tx_rsqs(vsi);
872 
873 	ixl_enable_rings(vsi);
874 
875 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
876 
877 	/* Re-add configure filters to HW */
878 	ixl_reconfigure_filters(vsi);
879 
880 	/* Configure promiscuous mode */
881 	ixl_if_promisc_set(ctx, if_getflags(ifp));
882 
883 #ifdef IXL_IW
884 	if (ixl_enable_iwarp && pf->iw_enabled) {
885 		ret = ixl_iw_pf_init(pf);
886 		if (ret)
887 			device_printf(dev,
888 			    "initialize iwarp failed, code %d\n", ret);
889 	}
890 #endif
891 }
892 
893 void
894 ixl_if_stop(if_ctx_t ctx)
895 {
896 	struct ixl_pf *pf = iflib_get_softc(ctx);
897 	struct ixl_vsi *vsi = &pf->vsi;
898 
899 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
900 
901 	// TODO: This may need to be reworked
902 #ifdef IXL_IW
903 	/* Stop iWARP device */
904 	if (ixl_enable_iwarp && pf->iw_enabled)
905 		ixl_iw_pf_stop(pf);
906 #endif
907 
908 	ixl_disable_rings_intr(vsi);
909 	ixl_disable_rings(pf, vsi, &pf->qtag);
910 }
911 
912 static int
913 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
914 {
915 	struct ixl_pf *pf = iflib_get_softc(ctx);
916 	struct ixl_vsi *vsi = &pf->vsi;
917 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
918 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
919 	int err, i, rid, vector = 0;
920 	char buf[16];
921 
922 	MPASS(vsi->shared->isc_nrxqsets > 0);
923 	MPASS(vsi->shared->isc_ntxqsets > 0);
924 
925 	/* Admin Que must use vector 0*/
926 	rid = vector + 1;
927 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
928 	    ixl_msix_adminq, pf, 0, "aq");
929 	if (err) {
930 		iflib_irq_free(ctx, &vsi->irq);
931 		device_printf(iflib_get_dev(ctx),
932 		    "Failed to register Admin Que handler");
933 		return (err);
934 	}
935 	/* Create soft IRQ for handling VFLRs */
936 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
937 
938 	/* Now set up the stations */
939 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
940 		rid = vector + 1;
941 
942 		snprintf(buf, sizeof(buf), "rxq%d", i);
943 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
944 		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
945 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
946 		 * what's expected in the iflib context? */
947 		if (err) {
948 			device_printf(iflib_get_dev(ctx),
949 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
950 			vsi->num_rx_queues = i + 1;
951 			goto fail;
952 		}
953 		rx_que->msix = vector;
954 	}
955 
956 	bzero(buf, sizeof(buf));
957 
958 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
959 		snprintf(buf, sizeof(buf), "txq%d", i);
960 		iflib_softirq_alloc_generic(ctx,
961 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
962 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
963 
964 		/* TODO: Maybe call a strategy function for this to figure out which
965 		* interrupts to map Tx queues to. I don't know if there's an immediately
966 		* better way than this other than a user-supplied map, though. */
967 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
968 	}
969 
970 	return (0);
971 fail:
972 	iflib_irq_free(ctx, &vsi->irq);
973 	rx_que = vsi->rx_queues;
974 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
975 		iflib_irq_free(ctx, &rx_que->que_irq);
976 	return (err);
977 }
978 
979 /*
980  * Enable all interrupts
981  *
982  * Called in:
983  * iflib_init_locked, after ixl_if_init()
984  */
985 static void
986 ixl_if_enable_intr(if_ctx_t ctx)
987 {
988 	struct ixl_pf *pf = iflib_get_softc(ctx);
989 	struct ixl_vsi *vsi = &pf->vsi;
990 	struct i40e_hw		*hw = vsi->hw;
991 	struct ixl_rx_queue	*que = vsi->rx_queues;
992 
993 	ixl_enable_intr0(hw);
994 	/* Enable queue interrupts */
995 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
996 		/* TODO: Queue index parameter is probably wrong */
997 		ixl_enable_queue(hw, que->rxr.me);
998 }
999 
1000 /*
1001  * Disable queue interrupts
1002  *
1003  * Other interrupt causes need to remain active.
1004  */
1005 static void
1006 ixl_if_disable_intr(if_ctx_t ctx)
1007 {
1008 	struct ixl_pf *pf = iflib_get_softc(ctx);
1009 	struct ixl_vsi *vsi = &pf->vsi;
1010 	struct i40e_hw		*hw = vsi->hw;
1011 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1012 
1013 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1014 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1015 			ixl_disable_queue(hw, rx_que->msix - 1);
1016 	} else {
1017 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1018 		// stops queues from triggering interrupts
1019 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1020 	}
1021 }
1022 
1023 static int
1024 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1025 {
1026 	struct ixl_pf *pf = iflib_get_softc(ctx);
1027 	struct ixl_vsi *vsi = &pf->vsi;
1028 	struct i40e_hw		*hw = vsi->hw;
1029 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1030 
1031 	ixl_enable_queue(hw, rx_que->msix - 1);
1032 	return (0);
1033 }
1034 
1035 static int
1036 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1037 {
1038 	struct ixl_pf *pf = iflib_get_softc(ctx);
1039 	struct ixl_vsi *vsi = &pf->vsi;
1040 	struct i40e_hw *hw = vsi->hw;
1041 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1042 
1043 	ixl_enable_queue(hw, tx_que->msix - 1);
1044 	return (0);
1045 }
1046 
1047 static int
1048 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1049 {
1050 	struct ixl_pf *pf = iflib_get_softc(ctx);
1051 	struct ixl_vsi *vsi = &pf->vsi;
1052 	if_softc_ctx_t scctx = vsi->shared;
1053 	struct ixl_tx_queue *que;
1054 	int i, j, error = 0;
1055 
1056 	MPASS(scctx->isc_ntxqsets > 0);
1057 	MPASS(ntxqs == 1);
1058 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1059 
1060 	/* Allocate queue structure memory */
1061 	if (!(vsi->tx_queues =
1062 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1063 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1064 		return (ENOMEM);
1065 	}
1066 
1067 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1068 		struct tx_ring *txr = &que->txr;
1069 
1070 		txr->me = i;
1071 		que->vsi = vsi;
1072 
1073 		if (!vsi->enable_head_writeback) {
1074 			/* Allocate report status array */
1075 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1076 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1077 				error = ENOMEM;
1078 				goto fail;
1079 			}
1080 			/* Init report status array */
1081 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1082 				txr->tx_rsq[j] = QIDX_INVALID;
1083 		}
1084 		/* get the virtual and physical address of the hardware queues */
1085 		txr->tail = I40E_QTX_TAIL(txr->me);
1086 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1087 		txr->tx_paddr = paddrs[i * ntxqs];
1088 		txr->que = que;
1089 	}
1090 
1091 	return (0);
1092 fail:
1093 	ixl_if_queues_free(ctx);
1094 	return (error);
1095 }
1096 
1097 static int
1098 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1099 {
1100 	struct ixl_pf *pf = iflib_get_softc(ctx);
1101 	struct ixl_vsi *vsi = &pf->vsi;
1102 	struct ixl_rx_queue *que;
1103 	int i, error = 0;
1104 
1105 #ifdef INVARIANTS
1106 	if_softc_ctx_t scctx = vsi->shared;
1107 	MPASS(scctx->isc_nrxqsets > 0);
1108 	MPASS(nrxqs == 1);
1109 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1110 #endif
1111 
1112 	/* Allocate queue structure memory */
1113 	if (!(vsi->rx_queues =
1114 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1115 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1116 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1117 		error = ENOMEM;
1118 		goto fail;
1119 	}
1120 
1121 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1122 		struct rx_ring *rxr = &que->rxr;
1123 
1124 		rxr->me = i;
1125 		que->vsi = vsi;
1126 
1127 		/* get the virtual and physical address of the hardware queues */
1128 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1129 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1130 		rxr->rx_paddr = paddrs[i * nrxqs];
1131 		rxr->que = que;
1132 	}
1133 
1134 	return (0);
1135 fail:
1136 	ixl_if_queues_free(ctx);
1137 	return (error);
1138 }
1139 
1140 static void
1141 ixl_if_queues_free(if_ctx_t ctx)
1142 {
1143 	struct ixl_pf *pf = iflib_get_softc(ctx);
1144 	struct ixl_vsi *vsi = &pf->vsi;
1145 
1146 	if (!vsi->enable_head_writeback) {
1147 		struct ixl_tx_queue *que;
1148 		int i = 0;
1149 
1150 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1151 			struct tx_ring *txr = &que->txr;
1152 			if (txr->tx_rsq != NULL) {
1153 				free(txr->tx_rsq, M_IXL);
1154 				txr->tx_rsq = NULL;
1155 			}
1156 		}
1157 	}
1158 
1159 	if (vsi->tx_queues != NULL) {
1160 		free(vsi->tx_queues, M_IXL);
1161 		vsi->tx_queues = NULL;
1162 	}
1163 	if (vsi->rx_queues != NULL) {
1164 		free(vsi->rx_queues, M_IXL);
1165 		vsi->rx_queues = NULL;
1166 	}
1167 }
1168 
1169 void
1170 ixl_update_link_status(struct ixl_pf *pf)
1171 {
1172 	struct ixl_vsi *vsi = &pf->vsi;
1173 	struct i40e_hw *hw = &pf->hw;
1174 	u64 baudrate;
1175 
1176 	if (pf->link_up) {
1177 		if (vsi->link_active == FALSE) {
1178 			vsi->link_active = TRUE;
1179 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1180 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1181 			ixl_link_up_msg(pf);
1182 #ifdef PCI_IOV
1183 			ixl_broadcast_link_state(pf);
1184 #endif
1185 
1186 		}
1187 	} else { /* Link down */
1188 		if (vsi->link_active == TRUE) {
1189 			vsi->link_active = FALSE;
1190 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1191 #ifdef PCI_IOV
1192 			ixl_broadcast_link_state(pf);
1193 #endif
1194 		}
1195 	}
1196 }
1197 
1198 static void
1199 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1200 {
1201 	device_t dev = pf->dev;
1202 	u32 rxq_idx, qtx_ctl;
1203 
1204 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1205 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1206 	qtx_ctl = e->desc.params.external.param1;
1207 
1208 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1209 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1210 }
1211 
1212 static int
1213 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1214 {
1215 	enum i40e_status_code status = I40E_SUCCESS;
1216 	struct i40e_arq_event_info event;
1217 	struct i40e_hw *hw = &pf->hw;
1218 	device_t dev = pf->dev;
1219 	u16 opcode;
1220 	u32 loop = 0, reg;
1221 
1222 	event.buf_len = IXL_AQ_BUF_SZ;
1223 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1224 	if (!event.msg_buf) {
1225 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1226 		    " Queue event!\n", __func__);
1227 		return (ENOMEM);
1228 	}
1229 
1230 	/* clean and process any events */
1231 	do {
1232 		status = i40e_clean_arq_element(hw, &event, pending);
1233 		if (status)
1234 			break;
1235 		opcode = LE16_TO_CPU(event.desc.opcode);
1236 		ixl_dbg(pf, IXL_DBG_AQ,
1237 		    "Admin Queue event: %#06x\n", opcode);
1238 		switch (opcode) {
1239 		case i40e_aqc_opc_get_link_status:
1240 			ixl_link_event(pf, &event);
1241 			break;
1242 		case i40e_aqc_opc_send_msg_to_pf:
1243 #ifdef PCI_IOV
1244 			ixl_handle_vf_msg(pf, &event);
1245 #endif
1246 			break;
1247 		/*
1248 		 * This should only occur on no-drop queues, which
1249 		 * aren't currently configured.
1250 		 */
1251 		case i40e_aqc_opc_event_lan_overflow:
1252 			ixl_handle_lan_overflow_event(pf, &event);
1253 			break;
1254 		default:
1255 			break;
1256 		}
1257 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1258 
1259 	free(event.msg_buf, M_IXL);
1260 
1261 	/* Re-enable admin queue interrupt cause */
1262 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1263 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1264 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1265 
1266 	return (status);
1267 }
1268 
1269 static void
1270 ixl_if_update_admin_status(if_ctx_t ctx)
1271 {
1272 	struct ixl_pf			*pf = iflib_get_softc(ctx);
1273 	struct i40e_hw			*hw = &pf->hw;
1274 	u16				pending;
1275 
1276 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1277 		ixl_handle_empr_reset(pf);
1278 
1279 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1280 		ixl_handle_mdd_event(pf);
1281 
1282 	ixl_process_adminq(pf, &pending);
1283 	ixl_update_link_status(pf);
1284 	ixl_update_stats_counters(pf);
1285 
1286 	/*
1287 	 * If there are still messages to process, reschedule ourselves.
1288 	 * Otherwise, re-enable our interrupt and go to sleep.
1289 	 */
1290 	if (pending > 0)
1291 		iflib_admin_intr_deferred(ctx);
1292 	else
1293 		ixl_enable_intr0(hw);
1294 }
1295 
1296 static void
1297 ixl_if_multi_set(if_ctx_t ctx)
1298 {
1299 	struct ixl_pf *pf = iflib_get_softc(ctx);
1300 	struct ixl_vsi *vsi = &pf->vsi;
1301 	struct i40e_hw *hw = vsi->hw;
1302 	int mcnt, flags;
1303 	int del_mcnt;
1304 
1305 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1306 
1307 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1308 	/* Delete filters for removed multicast addresses */
1309 	del_mcnt = ixl_del_multi(vsi);
1310 	vsi->num_macs -= del_mcnt;
1311 
1312 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1313 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1314 		    vsi->seid, TRUE, NULL);
1315 		return;
1316 	}
1317 	/* (re-)install filters for all mcast addresses */
1318 	/* XXX: This bypasses filter count tracking code! */
1319 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1320 	if (mcnt > 0) {
1321 		vsi->num_macs += mcnt;
1322 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1323 		ixl_add_hw_filters(vsi, flags, mcnt);
1324 	}
1325 
1326 	ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1327 	    __func__, vsi->num_macs);
1328 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1329 }
1330 
1331 static int
1332 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1333 {
1334 	struct ixl_pf *pf = iflib_get_softc(ctx);
1335 	struct ixl_vsi *vsi = &pf->vsi;
1336 
1337 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1338 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1339 		ETHER_VLAN_ENCAP_LEN)
1340 		return (EINVAL);
1341 
1342 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1343 		ETHER_VLAN_ENCAP_LEN;
1344 
1345 	return (0);
1346 }
1347 
1348 static void
1349 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1350 {
1351 	struct ixl_pf *pf = iflib_get_softc(ctx);
1352 	struct i40e_hw  *hw = &pf->hw;
1353 
1354 	INIT_DEBUGOUT("ixl_media_status: begin");
1355 
1356 	ifmr->ifm_status = IFM_AVALID;
1357 	ifmr->ifm_active = IFM_ETHER;
1358 
1359 	if (!pf->link_up) {
1360 		return;
1361 	}
1362 
1363 	ifmr->ifm_status |= IFM_ACTIVE;
1364 	/* Hardware is always full-duplex */
1365 	ifmr->ifm_active |= IFM_FDX;
1366 
1367 	switch (hw->phy.link_info.phy_type) {
1368 		/* 100 M */
1369 		case I40E_PHY_TYPE_100BASE_TX:
1370 			ifmr->ifm_active |= IFM_100_TX;
1371 			break;
1372 		/* 1 G */
1373 		case I40E_PHY_TYPE_1000BASE_T:
1374 			ifmr->ifm_active |= IFM_1000_T;
1375 			break;
1376 		case I40E_PHY_TYPE_1000BASE_SX:
1377 			ifmr->ifm_active |= IFM_1000_SX;
1378 			break;
1379 		case I40E_PHY_TYPE_1000BASE_LX:
1380 			ifmr->ifm_active |= IFM_1000_LX;
1381 			break;
1382 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1383 			ifmr->ifm_active |= IFM_1000_T;
1384 			break;
1385 		/* 10 G */
1386 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1387 			ifmr->ifm_active |= IFM_10G_TWINAX;
1388 			break;
1389 		case I40E_PHY_TYPE_10GBASE_SR:
1390 			ifmr->ifm_active |= IFM_10G_SR;
1391 			break;
1392 		case I40E_PHY_TYPE_10GBASE_LR:
1393 			ifmr->ifm_active |= IFM_10G_LR;
1394 			break;
1395 		case I40E_PHY_TYPE_10GBASE_T:
1396 			ifmr->ifm_active |= IFM_10G_T;
1397 			break;
1398 		case I40E_PHY_TYPE_XAUI:
1399 		case I40E_PHY_TYPE_XFI:
1400 			ifmr->ifm_active |= IFM_10G_TWINAX;
1401 			break;
1402 		case I40E_PHY_TYPE_10GBASE_AOC:
1403 			ifmr->ifm_active |= IFM_10G_AOC;
1404 			break;
1405 		/* 25 G */
1406 		case I40E_PHY_TYPE_25GBASE_KR:
1407 			ifmr->ifm_active |= IFM_25G_KR;
1408 			break;
1409 		case I40E_PHY_TYPE_25GBASE_CR:
1410 			ifmr->ifm_active |= IFM_25G_CR;
1411 			break;
1412 		case I40E_PHY_TYPE_25GBASE_SR:
1413 			ifmr->ifm_active |= IFM_25G_SR;
1414 			break;
1415 		case I40E_PHY_TYPE_25GBASE_LR:
1416 			ifmr->ifm_active |= IFM_25G_LR;
1417 			break;
1418 		case I40E_PHY_TYPE_25GBASE_AOC:
1419 			ifmr->ifm_active |= IFM_25G_AOC;
1420 			break;
1421 		case I40E_PHY_TYPE_25GBASE_ACC:
1422 			ifmr->ifm_active |= IFM_25G_ACC;
1423 			break;
1424 		/* 40 G */
1425 		case I40E_PHY_TYPE_40GBASE_CR4:
1426 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1427 			ifmr->ifm_active |= IFM_40G_CR4;
1428 			break;
1429 		case I40E_PHY_TYPE_40GBASE_SR4:
1430 			ifmr->ifm_active |= IFM_40G_SR4;
1431 			break;
1432 		case I40E_PHY_TYPE_40GBASE_LR4:
1433 			ifmr->ifm_active |= IFM_40G_LR4;
1434 			break;
1435 		case I40E_PHY_TYPE_XLAUI:
1436 			ifmr->ifm_active |= IFM_OTHER;
1437 			break;
1438 		case I40E_PHY_TYPE_1000BASE_KX:
1439 			ifmr->ifm_active |= IFM_1000_KX;
1440 			break;
1441 		case I40E_PHY_TYPE_SGMII:
1442 			ifmr->ifm_active |= IFM_1000_SGMII;
1443 			break;
1444 		/* ERJ: What's the difference between these? */
1445 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1446 		case I40E_PHY_TYPE_10GBASE_CR1:
1447 			ifmr->ifm_active |= IFM_10G_CR1;
1448 			break;
1449 		case I40E_PHY_TYPE_10GBASE_KX4:
1450 			ifmr->ifm_active |= IFM_10G_KX4;
1451 			break;
1452 		case I40E_PHY_TYPE_10GBASE_KR:
1453 			ifmr->ifm_active |= IFM_10G_KR;
1454 			break;
1455 		case I40E_PHY_TYPE_SFI:
1456 			ifmr->ifm_active |= IFM_10G_SFI;
1457 			break;
1458 		/* Our single 20G media type */
1459 		case I40E_PHY_TYPE_20GBASE_KR2:
1460 			ifmr->ifm_active |= IFM_20G_KR2;
1461 			break;
1462 		case I40E_PHY_TYPE_40GBASE_KR4:
1463 			ifmr->ifm_active |= IFM_40G_KR4;
1464 			break;
1465 		case I40E_PHY_TYPE_XLPPI:
1466 		case I40E_PHY_TYPE_40GBASE_AOC:
1467 			ifmr->ifm_active |= IFM_40G_XLPPI;
1468 			break;
1469 		/* Unknown to driver */
1470 		default:
1471 			ifmr->ifm_active |= IFM_UNKNOWN;
1472 			break;
1473 	}
1474 	/* Report flow control status as well */
1475 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1476 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1477 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1478 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1479 }
1480 
1481 static int
1482 ixl_if_media_change(if_ctx_t ctx)
1483 {
1484 	struct ifmedia *ifm = iflib_get_media(ctx);
1485 
1486 	INIT_DEBUGOUT("ixl_media_change: begin");
1487 
1488 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1489 		return (EINVAL);
1490 
1491 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1492 	return (ENODEV);
1493 }
1494 
1495 static int
1496 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1497 {
1498 	struct ixl_pf *pf = iflib_get_softc(ctx);
1499 	struct ixl_vsi *vsi = &pf->vsi;
1500 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1501 	struct i40e_hw	*hw = vsi->hw;
1502 	int		err;
1503 	bool		uni = FALSE, multi = FALSE;
1504 
1505 	if (flags & IFF_PROMISC)
1506 		uni = multi = TRUE;
1507 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1508 	    MAX_MULTICAST_ADDR)
1509 		multi = TRUE;
1510 
1511 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1512 	    vsi->seid, uni, NULL, true);
1513 	if (err)
1514 		return (err);
1515 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1516 	    vsi->seid, multi, NULL);
1517 	return (err);
1518 }
1519 
1520 static void
1521 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1522 {
1523 	if (qid != 0)
1524 		return;
1525 
1526 	/* Fire off the adminq task */
1527 	iflib_admin_intr_deferred(ctx);
1528 }
1529 
1530 static void
1531 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1532 {
1533 	struct ixl_pf *pf = iflib_get_softc(ctx);
1534 	struct ixl_vsi *vsi = &pf->vsi;
1535 	struct i40e_hw	*hw = vsi->hw;
1536 
1537 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1538 		return;
1539 
1540 	++vsi->num_vlans;
1541 	ixl_add_filter(vsi, hw->mac.addr, vtag);
1542 }
1543 
1544 static void
1545 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1546 {
1547 	struct ixl_pf *pf = iflib_get_softc(ctx);
1548 	struct ixl_vsi *vsi = &pf->vsi;
1549 	struct i40e_hw	*hw = vsi->hw;
1550 
1551 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1552 		return;
1553 
1554 	--vsi->num_vlans;
1555 	ixl_del_filter(vsi, hw->mac.addr, vtag);
1556 }
1557 
1558 static uint64_t
1559 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1560 {
1561 	struct ixl_pf *pf = iflib_get_softc(ctx);
1562 	struct ixl_vsi *vsi = &pf->vsi;
1563 	if_t ifp = iflib_get_ifp(ctx);
1564 
1565 	switch (cnt) {
1566 	case IFCOUNTER_IPACKETS:
1567 		return (vsi->ipackets);
1568 	case IFCOUNTER_IERRORS:
1569 		return (vsi->ierrors);
1570 	case IFCOUNTER_OPACKETS:
1571 		return (vsi->opackets);
1572 	case IFCOUNTER_OERRORS:
1573 		return (vsi->oerrors);
1574 	case IFCOUNTER_COLLISIONS:
1575 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1576 		return (0);
1577 	case IFCOUNTER_IBYTES:
1578 		return (vsi->ibytes);
1579 	case IFCOUNTER_OBYTES:
1580 		return (vsi->obytes);
1581 	case IFCOUNTER_IMCASTS:
1582 		return (vsi->imcasts);
1583 	case IFCOUNTER_OMCASTS:
1584 		return (vsi->omcasts);
1585 	case IFCOUNTER_IQDROPS:
1586 		return (vsi->iqdrops);
1587 	case IFCOUNTER_OQDROPS:
1588 		return (vsi->oqdrops);
1589 	case IFCOUNTER_NOPROTO:
1590 		return (vsi->noproto);
1591 	default:
1592 		return (if_get_counter_default(ifp, cnt));
1593 	}
1594 }
1595 
1596 #ifdef PCI_IOV
1597 static void
1598 ixl_if_vflr_handle(if_ctx_t ctx)
1599 {
1600 	struct ixl_pf *pf = iflib_get_softc(ctx);
1601 
1602 	ixl_handle_vflr(pf);
1603 }
1604 #endif
1605 
1606 static int
1607 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1608 {
1609 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1610 
1611 	if (pf->read_i2c_byte == NULL)
1612 		return (EINVAL);
1613 
1614 	for (int i = 0; i < req->len; i++)
1615 		if (pf->read_i2c_byte(pf, req->offset + i,
1616 		    req->dev_addr, &req->data[i]))
1617 			return (EIO);
1618 	return (0);
1619 }
1620 
1621 static int
1622 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1623 {
1624 	struct ixl_pf *pf = iflib_get_softc(ctx);
1625 	struct ifdrv *ifd = (struct ifdrv *)data;
1626 	int error = 0;
1627 
1628 	/*
1629 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1630 	 * performing privilege checks. It is important that this function
1631 	 * perform the necessary checks for commands which should only be
1632 	 * executed by privileged threads.
1633 	 */
1634 
1635 	switch(command) {
1636 	case SIOCGDRVSPEC:
1637 	case SIOCSDRVSPEC:
1638 		/* NVM update command */
1639 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1640 			error = priv_check(curthread, PRIV_DRIVER);
1641 			if (error)
1642 				break;
1643 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1644 		} else {
1645 			error = EINVAL;
1646 		}
1647 		break;
1648 	default:
1649 		error = EOPNOTSUPP;
1650 	}
1651 
1652 	return (error);
1653 }
1654 
1655 static u_int
1656 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1657 {
1658 	struct ixl_vsi *vsi = arg;
1659 
1660 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
1661 	return (1);
1662 }
1663 
1664 /*
1665  * Sanity check and save off tunable values.
1666  */
1667 static void
1668 ixl_save_pf_tunables(struct ixl_pf *pf)
1669 {
1670 	device_t dev = pf->dev;
1671 
1672 	/* Save tunable information */
1673 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1674 	pf->dbg_mask = ixl_core_debug_mask;
1675 	pf->hw.debug_mask = ixl_shared_debug_mask;
1676 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1677 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1678 #if 0
1679 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1680 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1681 #endif
1682 
1683 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1684 		pf->i2c_access_method = 0;
1685 	else
1686 		pf->i2c_access_method = ixl_i2c_access_method;
1687 
1688 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1689 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1690 		    ixl_tx_itr);
1691 		device_printf(dev, "tx_itr must be between %d and %d, "
1692 		    "inclusive\n",
1693 		    0, IXL_MAX_ITR);
1694 		device_printf(dev, "Using default value of %d instead\n",
1695 		    IXL_ITR_4K);
1696 		pf->tx_itr = IXL_ITR_4K;
1697 	} else
1698 		pf->tx_itr = ixl_tx_itr;
1699 
1700 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1701 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1702 		    ixl_rx_itr);
1703 		device_printf(dev, "rx_itr must be between %d and %d, "
1704 		    "inclusive\n",
1705 		    0, IXL_MAX_ITR);
1706 		device_printf(dev, "Using default value of %d instead\n",
1707 		    IXL_ITR_8K);
1708 		pf->rx_itr = IXL_ITR_8K;
1709 	} else
1710 		pf->rx_itr = ixl_rx_itr;
1711 }
1712 
1713