xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 342af4d5efec74bb4bc11261fdd9991c53616f54)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40 
41 #include "ixl.h"
42 #include "ixl_pf.h"
43 
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47 
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixl_driver_version[] = "1.4.3";
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixl_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {
65 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73 #ifdef X722_SUPPORT
74 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
75 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
76 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
77 #endif
78 	/* required last entry */
79 	{0, 0, 0, 0, 0}
80 };
81 
82 /*********************************************************************
83  *  Table of branding strings
84  *********************************************************************/
85 
86 static char    *ixl_strings[] = {
87 	"Intel(R) Ethernet Connection XL710 Driver"
88 };
89 
90 
91 /*********************************************************************
92  *  Function prototypes
93  *********************************************************************/
94 static int      ixl_probe(device_t);
95 static int      ixl_attach(device_t);
96 static int      ixl_detach(device_t);
97 static int      ixl_shutdown(device_t);
98 static int	ixl_get_hw_capabilities(struct ixl_pf *);
99 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
100 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
101 static void	ixl_init(void *);
102 static void	ixl_init_locked(struct ixl_pf *);
103 static void     ixl_stop(struct ixl_pf *);
104 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
105 static int      ixl_media_change(struct ifnet *);
106 static void     ixl_update_link_status(struct ixl_pf *);
107 static int      ixl_allocate_pci_resources(struct ixl_pf *);
108 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
109 static int	ixl_setup_stations(struct ixl_pf *);
110 static int	ixl_switch_config(struct ixl_pf *);
111 static int	ixl_initialize_vsi(struct ixl_vsi *);
112 static int	ixl_assign_vsi_msix(struct ixl_pf *);
113 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
114 static int	ixl_init_msix(struct ixl_pf *);
115 static void	ixl_configure_msix(struct ixl_pf *);
116 static void	ixl_configure_itr(struct ixl_pf *);
117 static void	ixl_configure_legacy(struct ixl_pf *);
118 static void	ixl_init_taskqueues(struct ixl_pf *);
119 static void	ixl_free_taskqueues(struct ixl_pf *);
120 static void	ixl_free_pci_resources(struct ixl_pf *);
121 static void	ixl_local_timer(void *);
122 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
123 static void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124 static void	ixl_config_rss(struct ixl_vsi *);
125 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
126 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
127 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
128 
129 static int	ixl_enable_rings(struct ixl_vsi *);
130 static int	ixl_disable_rings(struct ixl_vsi *);
131 static void	ixl_enable_intr(struct ixl_vsi *);
132 static void	ixl_disable_intr(struct ixl_vsi *);
133 static void	ixl_disable_rings_intr(struct ixl_vsi *);
134 
135 static void     ixl_enable_adminq(struct i40e_hw *);
136 static void     ixl_disable_adminq(struct i40e_hw *);
137 static void     ixl_enable_queue(struct i40e_hw *, int);
138 static void     ixl_disable_queue(struct i40e_hw *, int);
139 static void     ixl_enable_legacy(struct i40e_hw *);
140 static void     ixl_disable_legacy(struct i40e_hw *);
141 
142 static void     ixl_set_promisc(struct ixl_vsi *);
143 static void     ixl_add_multi(struct ixl_vsi *);
144 static void     ixl_del_multi(struct ixl_vsi *);
145 static void	ixl_register_vlan(void *, struct ifnet *, u16);
146 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
147 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
148 
149 static void	ixl_init_filters(struct ixl_vsi *);
150 static void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
151 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
154 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
155 static struct ixl_mac_filter *
156 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158 static void	ixl_free_mac_filters(struct ixl_vsi *vsi);
159 
160 
161 /* Sysctl debug interface */
162 #ifdef IXL_DEBUG_SYSCTL
163 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
164 static void	ixl_print_debug_info(struct ixl_pf *);
165 #endif
166 
167 /* The MSI/X Interrupt handlers */
168 static void	ixl_intr(void *);
169 static void	ixl_msix_que(void *);
170 static void	ixl_msix_adminq(void *);
171 static void	ixl_handle_mdd_event(struct ixl_pf *);
172 
173 /* Deferred interrupt tasklets */
174 static void	ixl_do_adminq(void *, int);
175 
176 /* Sysctl handlers */
177 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
178 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
179 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
180 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
181 
182 /* Statistics */
183 static void     ixl_add_hw_stats(struct ixl_pf *);
184 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
185 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
186 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
187 		    struct sysctl_oid_list *,
188 		    struct i40e_eth_stats *);
189 static void	ixl_update_stats_counters(struct ixl_pf *);
190 static void	ixl_update_eth_stats(struct ixl_vsi *);
191 static void	ixl_update_vsi_stats(struct ixl_vsi *);
192 static void	ixl_pf_reset_stats(struct ixl_pf *);
193 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
194 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
195 		    u64 *, u64 *);
196 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
197 		    u64 *, u64 *);
198 
199 #ifdef IXL_DEBUG_SYSCTL
200 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
201 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
202 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
203 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
204 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
205 #endif
206 
207 #ifdef PCI_IOV
208 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
209 
210 static int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
211 static void	ixl_iov_uninit(device_t dev);
212 static int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
213 
214 static void	ixl_handle_vf_msg(struct ixl_pf *,
215 		    struct i40e_arq_event_info *);
216 static void	ixl_handle_vflr(void *arg, int pending);
217 
218 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
219 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
220 #endif
221 
222 /*********************************************************************
223  *  FreeBSD Device Interface Entry Points
224  *********************************************************************/
225 
226 static device_method_t ixl_methods[] = {
227 	/* Device interface */
228 	DEVMETHOD(device_probe, ixl_probe),
229 	DEVMETHOD(device_attach, ixl_attach),
230 	DEVMETHOD(device_detach, ixl_detach),
231 	DEVMETHOD(device_shutdown, ixl_shutdown),
232 #ifdef PCI_IOV
233 	DEVMETHOD(pci_iov_init, ixl_iov_init),
234 	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
235 	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
236 #endif
237 	{0, 0}
238 };
239 
240 static driver_t ixl_driver = {
241 	"ixl", ixl_methods, sizeof(struct ixl_pf),
242 };
243 
244 devclass_t ixl_devclass;
245 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
246 
247 MODULE_DEPEND(ixl, pci, 1, 1, 1);
248 MODULE_DEPEND(ixl, ether, 1, 1, 1);
249 #ifdef DEV_NETMAP
250 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
251 #endif /* DEV_NETMAP */
252 
253 /*
254 ** Global reset mutex
255 */
256 static struct mtx ixl_reset_mtx;
257 
258 /*
259 ** TUNEABLE PARAMETERS:
260 */
261 
262 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
263                    "IXL driver parameters");
264 
265 /*
266  * MSIX should be the default for best performance,
267  * but this allows it to be forced off for testing.
268  */
269 static int ixl_enable_msix = 1;
270 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
272     "Enable MSI-X interrupts");
273 
274 /*
275 ** Number of descriptors per ring:
276 **   - TX and RX are the same size
277 */
278 static int ixl_ringsz = DEFAULT_RING;
279 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
280 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
281     &ixl_ringsz, 0, "Descriptor Ring Size");
282 
283 /*
284 ** This can be set manually, if left as 0 the
285 ** number of queues will be calculated based
286 ** on cpus and msix vectors available.
287 */
288 int ixl_max_queues = 0;
289 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
290 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
291     &ixl_max_queues, 0, "Number of Queues");
292 
293 /*
294 ** Controls for Interrupt Throttling
295 **	- true/false for dynamic adjustment
296 ** 	- default values for static ITR
297 */
298 int ixl_dynamic_rx_itr = 0;
299 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
300 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
301     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
302 
303 int ixl_dynamic_tx_itr = 0;
304 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
305 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
306     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
307 
308 int ixl_rx_itr = IXL_ITR_8K;
309 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
310 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
311     &ixl_rx_itr, 0, "RX Interrupt Rate");
312 
313 int ixl_tx_itr = IXL_ITR_4K;
314 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
315 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
316     &ixl_tx_itr, 0, "TX Interrupt Rate");
317 
318 #ifdef IXL_FDIR
319 static int ixl_enable_fdir = 1;
320 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
321 /* Rate at which we sample */
322 int ixl_atr_rate = 20;
323 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
324 #endif
325 
326 #ifdef DEV_NETMAP
327 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
328 #include <dev/netmap/if_ixl_netmap.h>
329 #endif /* DEV_NETMAP */
330 
331 static char *ixl_fc_string[6] = {
332 	"None",
333 	"Rx",
334 	"Tx",
335 	"Full",
336 	"Priority",
337 	"Default"
338 };
339 
340 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
341 
342 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
343     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
344 
345 /*********************************************************************
346  *  Device identification routine
347  *
348  *  ixl_probe determines if the driver should be loaded on
349  *  the hardware based on PCI vendor/device id of the device.
350  *
351  *  return BUS_PROBE_DEFAULT on success, positive on failure
352  *********************************************************************/
353 
354 static int
355 ixl_probe(device_t dev)
356 {
357 	ixl_vendor_info_t *ent;
358 
359 	u16	pci_vendor_id, pci_device_id;
360 	u16	pci_subvendor_id, pci_subdevice_id;
361 	char	device_name[256];
362 	static bool lock_init = FALSE;
363 
364 	INIT_DEBUGOUT("ixl_probe: begin");
365 
366 	pci_vendor_id = pci_get_vendor(dev);
367 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
368 		return (ENXIO);
369 
370 	pci_device_id = pci_get_device(dev);
371 	pci_subvendor_id = pci_get_subvendor(dev);
372 	pci_subdevice_id = pci_get_subdevice(dev);
373 
374 	ent = ixl_vendor_info_array;
375 	while (ent->vendor_id != 0) {
376 		if ((pci_vendor_id == ent->vendor_id) &&
377 		    (pci_device_id == ent->device_id) &&
378 
379 		    ((pci_subvendor_id == ent->subvendor_id) ||
380 		     (ent->subvendor_id == 0)) &&
381 
382 		    ((pci_subdevice_id == ent->subdevice_id) ||
383 		     (ent->subdevice_id == 0))) {
384 			sprintf(device_name, "%s, Version - %s",
385 				ixl_strings[ent->index],
386 				ixl_driver_version);
387 			device_set_desc_copy(dev, device_name);
388 			/* One shot mutex init */
389 			if (lock_init == FALSE) {
390 				lock_init = TRUE;
391 				mtx_init(&ixl_reset_mtx,
392 				    "ixl_reset",
393 				    "IXL RESET Lock", MTX_DEF);
394 			}
395 			return (BUS_PROBE_DEFAULT);
396 		}
397 		ent++;
398 	}
399 	return (ENXIO);
400 }
401 
402 /*********************************************************************
403  *  Device initialization routine
404  *
405  *  The attach entry point is called when the driver is being loaded.
406  *  This routine identifies the type of hardware, allocates all resources
407  *  and initializes the hardware.
408  *
409  *  return 0 on success, positive on failure
410  *********************************************************************/
411 
412 static int
413 ixl_attach(device_t dev)
414 {
415 	struct ixl_pf	*pf;
416 	struct i40e_hw	*hw;
417 	struct ixl_vsi *vsi;
418 	u16		bus;
419 	int             error = 0;
420 #ifdef PCI_IOV
421 	nvlist_t	*pf_schema, *vf_schema;
422 	int		iov_error;
423 #endif
424 
425 	INIT_DEBUGOUT("ixl_attach: begin");
426 
427 	/* Allocate, clear, and link in our primary soft structure */
428 	pf = device_get_softc(dev);
429 	pf->dev = pf->osdep.dev = dev;
430 	hw = &pf->hw;
431 
432 	/*
433 	** Note this assumes we have a single embedded VSI,
434 	** this could be enhanced later to allocate multiple
435 	*/
436 	vsi = &pf->vsi;
437 	vsi->dev = pf->dev;
438 
439 	/* Core Lock Init*/
440 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
441 
442 	/* Set up the timer callout */
443 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
444 
445 	/* Set up sysctls */
446 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
449 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
450 
451 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
454 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
455 
456 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
459 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
460 
461 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
464 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
465 
466 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
467 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
469 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
470 
471 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
472 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
474 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
475 
476 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
477 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
478 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
479 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
480 
481 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
482 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
484 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
485 
486 #ifdef IXL_DEBUG_SYSCTL
487 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
490 	    ixl_debug_info, "I", "Debug Information");
491 
492 	/* Debug shared-code message level */
493 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
494 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 	    OID_AUTO, "debug_mask", CTLFLAG_RW,
496 	    &pf->hw.debug_mask, 0, "Debug Message Level");
497 
498 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
499 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
500 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
501 	    0, "PF/VF Virtual Channel debug level");
502 
503 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
504 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
505 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
506 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
507 
508 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
511 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
512 
513 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
516 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
517 
518 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
519 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
520 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
521 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
522 
523 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
524 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
525 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
526 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
527 #endif
528 
529 	/* Save off the PCI information */
530 	hw->vendor_id = pci_get_vendor(dev);
531 	hw->device_id = pci_get_device(dev);
532 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
533 	hw->subsystem_vendor_id =
534 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
535 	hw->subsystem_device_id =
536 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
537 
538 	hw->bus.device = pci_get_slot(dev);
539 	hw->bus.func = pci_get_function(dev);
540 
541 	pf->vc_debug_lvl = 1;
542 
543 	/* Do PCI setup - map BAR0, etc */
544 	if (ixl_allocate_pci_resources(pf)) {
545 		device_printf(dev, "Allocation of PCI resources failed\n");
546 		error = ENXIO;
547 		goto err_out;
548 	}
549 
550 	/* Establish a clean starting point */
551 	i40e_clear_hw(hw);
552 	error = i40e_pf_reset(hw);
553 	if (error) {
554 		device_printf(dev,"PF reset failure %x\n", error);
555 		error = EIO;
556 		goto err_out;
557 	}
558 
559 	/* Set admin queue parameters */
560 	hw->aq.num_arq_entries = IXL_AQ_LEN;
561 	hw->aq.num_asq_entries = IXL_AQ_LEN;
562 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
563 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
564 
565 	/* Initialize the shared code */
566 	error = i40e_init_shared_code(hw);
567 	if (error) {
568 		device_printf(dev,"Unable to initialize the shared code\n");
569 		error = EIO;
570 		goto err_out;
571 	}
572 
573 	/* Set up the admin queue */
574 	error = i40e_init_adminq(hw);
575 	if (error) {
576 		device_printf(dev, "The driver for the device stopped "
577 		    "because the NVM image is newer than expected.\n"
578 		    "You must install the most recent version of "
579 		    " the network driver.\n");
580 		goto err_out;
581 	}
582 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
583 
584         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
585 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
586 		device_printf(dev, "The driver for the device detected "
587 		    "a newer version of the NVM image than expected.\n"
588 		    "Please install the most recent version of the network driver.\n");
589 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
590 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
591 		device_printf(dev, "The driver for the device detected "
592 		    "an older version of the NVM image than expected.\n"
593 		    "Please update the NVM image.\n");
594 
595 	/* Clear PXE mode */
596 	i40e_clear_pxe_mode(hw);
597 
598 	/* Get capabilities from the device */
599 	error = ixl_get_hw_capabilities(pf);
600 	if (error) {
601 		device_printf(dev, "HW capabilities failure!\n");
602 		goto err_get_cap;
603 	}
604 
605 	/* Set up host memory cache */
606 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
607 	    hw->func_caps.num_rx_qp, 0, 0);
608 	if (error) {
609 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
610 		goto err_get_cap;
611 	}
612 
613 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
614 	if (error) {
615 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
616 		goto err_mac_hmc;
617 	}
618 
619 	/* Disable LLDP from the firmware */
620 	i40e_aq_stop_lldp(hw, TRUE, NULL);
621 
622 	i40e_get_mac_addr(hw, hw->mac.addr);
623 	error = i40e_validate_mac_addr(hw->mac.addr);
624 	if (error) {
625 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
626 		goto err_mac_hmc;
627 	}
628 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
629 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
630 
631 	/* Set up VSI and queues */
632 	if (ixl_setup_stations(pf) != 0) {
633 		device_printf(dev, "setup stations failed!\n");
634 		error = ENOMEM;
635 		goto err_mac_hmc;
636 	}
637 
638 	/* Initialize mac filter list for VSI */
639 	SLIST_INIT(&vsi->ftl);
640 
641 	/* Set up interrupt routing here */
642 	if (pf->msix > 1)
643 		error = ixl_assign_vsi_msix(pf);
644 	else
645 		error = ixl_assign_vsi_legacy(pf);
646 	if (error)
647 		goto err_mac_hmc;
648 
649 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
650 	    (hw->aq.fw_maj_ver < 4)) {
651 		i40e_msec_delay(75);
652 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
653 		if (error)
654 			device_printf(dev, "link restart failed, aq_err=%d\n",
655 			    pf->hw.aq.asq_last_status);
656 	}
657 
658 	/* Determine link state */
659 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
660 	i40e_get_link_status(hw, &pf->link_up);
661 
662 	/* Setup OS specific network interface */
663 	if (ixl_setup_interface(dev, vsi) != 0) {
664 		device_printf(dev, "interface setup failed!\n");
665 		error = EIO;
666 		goto err_late;
667 	}
668 
669 	error = ixl_switch_config(pf);
670 	if (error) {
671 		device_printf(dev, "Initial switch config failed: %d\n", error);
672 		goto err_late;
673 	}
674 
675 	/* Limit phy interrupts to link and modules failure */
676 	error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN |
677 		I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
678 	if (error)
679 		device_printf(dev, "set phy mask failed: %d\n", error);
680 
681 	/* Get the bus configuration and set the shared code */
682 	bus = ixl_get_bus_info(hw, dev);
683 	i40e_set_pci_config_data(hw, bus);
684 
685 	/* Initialize taskqueues */
686 	ixl_init_taskqueues(pf);
687 
688 	/* Initialize statistics */
689 	ixl_pf_reset_stats(pf);
690 	ixl_update_stats_counters(pf);
691 	ixl_add_hw_stats(pf);
692 
693 	/* Register for VLAN events */
694 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
695 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
696 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
697 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
698 
699 #ifdef PCI_IOV
700 	/* SR-IOV is only supported when MSI-X is in use. */
701 	if (pf->msix > 1) {
702 		pf_schema = pci_iov_schema_alloc_node();
703 		vf_schema = pci_iov_schema_alloc_node();
704 		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
705 		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
706 		    IOV_SCHEMA_HASDEFAULT, TRUE);
707 		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
708 		    IOV_SCHEMA_HASDEFAULT, FALSE);
709 		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
710 		    IOV_SCHEMA_HASDEFAULT, FALSE);
711 
712 		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
713 		if (iov_error != 0)
714 			device_printf(dev,
715 			    "Failed to initialize SR-IOV (error=%d)\n",
716 			    iov_error);
717 	}
718 #endif
719 
720 #ifdef DEV_NETMAP
721 	ixl_netmap_attach(vsi);
722 #endif /* DEV_NETMAP */
723 	INIT_DEBUGOUT("ixl_attach: end");
724 	return (0);
725 
726 err_late:
727 	if (vsi->ifp != NULL)
728 		if_free(vsi->ifp);
729 err_mac_hmc:
730 	i40e_shutdown_lan_hmc(hw);
731 err_get_cap:
732 	i40e_shutdown_adminq(hw);
733 err_out:
734 	ixl_free_pci_resources(pf);
735 	ixl_free_vsi(vsi);
736 	IXL_PF_LOCK_DESTROY(pf);
737 	return (error);
738 }
739 
740 /*********************************************************************
741  *  Device removal routine
742  *
743  *  The detach entry point is called when the driver is being removed.
744  *  This routine stops the adapter and deallocates all the resources
745  *  that were allocated for driver operation.
746  *
747  *  return 0 on success, positive on failure
748  *********************************************************************/
749 
750 static int
751 ixl_detach(device_t dev)
752 {
753 	struct ixl_pf		*pf = device_get_softc(dev);
754 	struct i40e_hw		*hw = &pf->hw;
755 	struct ixl_vsi		*vsi = &pf->vsi;
756 	i40e_status		status;
757 #ifdef PCI_IOV
758 	int			error;
759 #endif
760 
761 	INIT_DEBUGOUT("ixl_detach: begin");
762 
763 	/* Make sure VLANS are not using driver */
764 	if (vsi->ifp->if_vlantrunk != NULL) {
765 		device_printf(dev,"Vlan in use, detach first\n");
766 		return (EBUSY);
767 	}
768 
769 #ifdef PCI_IOV
770 	error = pci_iov_detach(dev);
771 	if (error != 0) {
772 		device_printf(dev, "SR-IOV in use; detach first.\n");
773 		return (error);
774 	}
775 #endif
776 
777 	ether_ifdetach(vsi->ifp);
778 	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
779 		IXL_PF_LOCK(pf);
780 		ixl_stop(pf);
781 		IXL_PF_UNLOCK(pf);
782 	}
783 
784 	ixl_free_taskqueues(pf);
785 
786 	/* Shutdown LAN HMC */
787 	status = i40e_shutdown_lan_hmc(hw);
788 	if (status)
789 		device_printf(dev,
790 		    "Shutdown LAN HMC failed with code %d\n", status);
791 
792 	/* Shutdown admin queue */
793 	status = i40e_shutdown_adminq(hw);
794 	if (status)
795 		device_printf(dev,
796 		    "Shutdown Admin queue failed with code %d\n", status);
797 
798 	/* Unregister VLAN events */
799 	if (vsi->vlan_attach != NULL)
800 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
801 	if (vsi->vlan_detach != NULL)
802 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
803 
804 	callout_drain(&pf->timer);
805 #ifdef DEV_NETMAP
806 	netmap_detach(vsi->ifp);
807 #endif /* DEV_NETMAP */
808 	ixl_free_pci_resources(pf);
809 	bus_generic_detach(dev);
810 	if_free(vsi->ifp);
811 	ixl_free_vsi(vsi);
812 	IXL_PF_LOCK_DESTROY(pf);
813 	return (0);
814 }
815 
816 /*********************************************************************
817  *
818  *  Shutdown entry point
819  *
820  **********************************************************************/
821 
822 static int
823 ixl_shutdown(device_t dev)
824 {
825 	struct ixl_pf *pf = device_get_softc(dev);
826 	IXL_PF_LOCK(pf);
827 	ixl_stop(pf);
828 	IXL_PF_UNLOCK(pf);
829 	return (0);
830 }
831 
832 
833 /*********************************************************************
834  *
835  *  Get the hardware capabilities
836  *
837  **********************************************************************/
838 
839 static int
840 ixl_get_hw_capabilities(struct ixl_pf *pf)
841 {
842 	struct i40e_aqc_list_capabilities_element_resp *buf;
843 	struct i40e_hw	*hw = &pf->hw;
844 	device_t 	dev = pf->dev;
845 	int             error, len;
846 	u16		needed;
847 	bool		again = TRUE;
848 
849 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
850 retry:
851 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
852 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
853 		device_printf(dev, "Unable to allocate cap memory\n");
854                 return (ENOMEM);
855 	}
856 
857 	/* This populates the hw struct */
858         error = i40e_aq_discover_capabilities(hw, buf, len,
859 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
860 	free(buf, M_DEVBUF);
861 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
862 	    (again == TRUE)) {
863 		/* retry once with a larger buffer */
864 		again = FALSE;
865 		len = needed;
866 		goto retry;
867 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
868 		device_printf(dev, "capability discovery failed: %d\n",
869 		    pf->hw.aq.asq_last_status);
870 		return (ENODEV);
871 	}
872 
873 	/* Capture this PF's starting queue pair */
874 	pf->qbase = hw->func_caps.base_queue;
875 
876 #ifdef IXL_DEBUG
877 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
878 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
879 	    hw->pf_id, hw->func_caps.num_vfs,
880 	    hw->func_caps.num_msix_vectors,
881 	    hw->func_caps.num_msix_vectors_vf,
882 	    hw->func_caps.fd_filters_guaranteed,
883 	    hw->func_caps.fd_filters_best_effort,
884 	    hw->func_caps.num_tx_qp,
885 	    hw->func_caps.num_rx_qp,
886 	    hw->func_caps.base_queue);
887 #endif
888 	return (error);
889 }
890 
891 static void
892 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
893 {
894 	device_t 	dev = vsi->dev;
895 
896 	/* Enable/disable TXCSUM/TSO4 */
897 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
898 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
899 		if (mask & IFCAP_TXCSUM) {
900 			ifp->if_capenable |= IFCAP_TXCSUM;
901 			/* enable TXCSUM, restore TSO if previously enabled */
902 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
903 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
904 				ifp->if_capenable |= IFCAP_TSO4;
905 			}
906 		}
907 		else if (mask & IFCAP_TSO4) {
908 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
909 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
910 			device_printf(dev,
911 			    "TSO4 requires txcsum, enabling both...\n");
912 		}
913 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
914 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
915 		if (mask & IFCAP_TXCSUM)
916 			ifp->if_capenable &= ~IFCAP_TXCSUM;
917 		else if (mask & IFCAP_TSO4)
918 			ifp->if_capenable |= IFCAP_TSO4;
919 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
920 	    && (ifp->if_capenable & IFCAP_TSO4)) {
921 		if (mask & IFCAP_TXCSUM) {
922 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
923 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
924 			device_printf(dev,
925 			    "TSO4 requires txcsum, disabling both...\n");
926 		} else if (mask & IFCAP_TSO4)
927 			ifp->if_capenable &= ~IFCAP_TSO4;
928 	}
929 
930 	/* Enable/disable TXCSUM_IPV6/TSO6 */
931 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
932 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
933 		if (mask & IFCAP_TXCSUM_IPV6) {
934 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
935 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
936 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
937 				ifp->if_capenable |= IFCAP_TSO6;
938 			}
939 		} else if (mask & IFCAP_TSO6) {
940 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
941 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
942 			device_printf(dev,
943 			    "TSO6 requires txcsum6, enabling both...\n");
944 		}
945 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
946 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
947 		if (mask & IFCAP_TXCSUM_IPV6)
948 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
949 		else if (mask & IFCAP_TSO6)
950 			ifp->if_capenable |= IFCAP_TSO6;
951 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
952 	    && (ifp->if_capenable & IFCAP_TSO6)) {
953 		if (mask & IFCAP_TXCSUM_IPV6) {
954 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
955 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
956 			device_printf(dev,
957 			    "TSO6 requires txcsum6, disabling both...\n");
958 		} else if (mask & IFCAP_TSO6)
959 			ifp->if_capenable &= ~IFCAP_TSO6;
960 	}
961 }
962 
963 /*********************************************************************
964  *  Ioctl entry point
965  *
966  *  ixl_ioctl is called when the user wants to configure the
967  *  interface.
968  *
969  *  return 0 on success, positive on failure
970  **********************************************************************/
971 
972 static int
973 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
974 {
975 	struct ixl_vsi	*vsi = ifp->if_softc;
976 	struct ixl_pf	*pf = vsi->back;
977 	struct ifreq	*ifr = (struct ifreq *) data;
978 #if defined(INET) || defined(INET6)
979 	struct ifaddr *ifa = (struct ifaddr *)data;
980 	bool		avoid_reset = FALSE;
981 #endif
982 	int             error = 0;
983 
984 	switch (command) {
985 
986         case SIOCSIFADDR:
987 #ifdef INET
988 		if (ifa->ifa_addr->sa_family == AF_INET)
989 			avoid_reset = TRUE;
990 #endif
991 #ifdef INET6
992 		if (ifa->ifa_addr->sa_family == AF_INET6)
993 			avoid_reset = TRUE;
994 #endif
995 #if defined(INET) || defined(INET6)
996 		/*
997 		** Calling init results in link renegotiation,
998 		** so we avoid doing it when possible.
999 		*/
1000 		if (avoid_reset) {
1001 			ifp->if_flags |= IFF_UP;
1002 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1003 				ixl_init(pf);
1004 #ifdef INET
1005 			if (!(ifp->if_flags & IFF_NOARP))
1006 				arp_ifinit(ifp, ifa);
1007 #endif
1008 		} else
1009 			error = ether_ioctl(ifp, command, data);
1010 		break;
1011 #endif
1012 	case SIOCSIFMTU:
1013 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1014 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1015 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1016 			error = EINVAL;
1017 		} else {
1018 			IXL_PF_LOCK(pf);
1019 			ifp->if_mtu = ifr->ifr_mtu;
1020 			vsi->max_frame_size =
1021 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1022 			    + ETHER_VLAN_ENCAP_LEN;
1023 			ixl_init_locked(pf);
1024 			IXL_PF_UNLOCK(pf);
1025 		}
1026 		break;
1027 	case SIOCSIFFLAGS:
1028 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1029 		IXL_PF_LOCK(pf);
1030 		if (ifp->if_flags & IFF_UP) {
1031 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1032 				if ((ifp->if_flags ^ pf->if_flags) &
1033 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1034 					ixl_set_promisc(vsi);
1035 				}
1036 			} else
1037 				ixl_init_locked(pf);
1038 		} else
1039 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1040 				ixl_stop(pf);
1041 		pf->if_flags = ifp->if_flags;
1042 		IXL_PF_UNLOCK(pf);
1043 		break;
1044 	case SIOCADDMULTI:
1045 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1046 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1047 			IXL_PF_LOCK(pf);
1048 			ixl_disable_intr(vsi);
1049 			ixl_add_multi(vsi);
1050 			ixl_enable_intr(vsi);
1051 			IXL_PF_UNLOCK(pf);
1052 		}
1053 		break;
1054 	case SIOCDELMULTI:
1055 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1056 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1057 			IXL_PF_LOCK(pf);
1058 			ixl_disable_intr(vsi);
1059 			ixl_del_multi(vsi);
1060 			ixl_enable_intr(vsi);
1061 			IXL_PF_UNLOCK(pf);
1062 		}
1063 		break;
1064 	case SIOCSIFMEDIA:
1065 	case SIOCGIFMEDIA:
1066 #ifdef IFM_ETH_XTYPE
1067 	case SIOCGIFXMEDIA:
1068 #endif
1069 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1070 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1071 		break;
1072 	case SIOCSIFCAP:
1073 	{
1074 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1075 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1076 
1077 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1078 
1079 		if (mask & IFCAP_RXCSUM)
1080 			ifp->if_capenable ^= IFCAP_RXCSUM;
1081 		if (mask & IFCAP_RXCSUM_IPV6)
1082 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1083 		if (mask & IFCAP_LRO)
1084 			ifp->if_capenable ^= IFCAP_LRO;
1085 		if (mask & IFCAP_VLAN_HWTAGGING)
1086 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1087 		if (mask & IFCAP_VLAN_HWFILTER)
1088 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1089 		if (mask & IFCAP_VLAN_HWTSO)
1090 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1091 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1092 			IXL_PF_LOCK(pf);
1093 			ixl_init_locked(pf);
1094 			IXL_PF_UNLOCK(pf);
1095 		}
1096 		VLAN_CAPABILITIES(ifp);
1097 
1098 		break;
1099 	}
1100 
1101 	default:
1102 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1103 		error = ether_ioctl(ifp, command, data);
1104 		break;
1105 	}
1106 
1107 	return (error);
1108 }
1109 
1110 
1111 /*********************************************************************
1112  *  Init entry point
1113  *
1114  *  This routine is used in two ways. It is used by the stack as
1115  *  init entry point in network interface structure. It is also used
1116  *  by the driver as a hw/sw initialization routine to get to a
1117  *  consistent state.
1118  *
1119  *  return 0 on success, positive on failure
1120  **********************************************************************/
1121 
1122 static void
1123 ixl_init_locked(struct ixl_pf *pf)
1124 {
1125 	struct i40e_hw	*hw = &pf->hw;
1126 	struct ixl_vsi	*vsi = &pf->vsi;
1127 	struct ifnet	*ifp = vsi->ifp;
1128 	device_t 	dev = pf->dev;
1129 	struct i40e_filter_control_settings	filter;
1130 	u8		tmpaddr[ETHER_ADDR_LEN];
1131 	int		ret;
1132 
1133 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1134 	INIT_DEBUGOUT("ixl_init: begin");
1135 	ixl_stop(pf);
1136 
1137 	/* Get the latest mac address... User might use a LAA */
1138 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1139 	      I40E_ETH_LENGTH_OF_ADDRESS);
1140 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1141 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1142 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1143 		bcopy(tmpaddr, hw->mac.addr,
1144 		    I40E_ETH_LENGTH_OF_ADDRESS);
1145 		ret = i40e_aq_mac_address_write(hw,
1146 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1147 		    hw->mac.addr, NULL);
1148 		if (ret) {
1149 			device_printf(dev, "LLA address"
1150 			 "change failed!!\n");
1151 			return;
1152 		} else {
1153 			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1154 		}
1155 	}
1156 
1157 	/* Set the various hardware offload abilities */
1158 	ifp->if_hwassist = 0;
1159 	if (ifp->if_capenable & IFCAP_TSO)
1160 		ifp->if_hwassist |= CSUM_TSO;
1161 	if (ifp->if_capenable & IFCAP_TXCSUM)
1162 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1163 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1164 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1165 
1166 	/* Set up the device filtering */
1167 	bzero(&filter, sizeof(filter));
1168 	filter.enable_ethtype = TRUE;
1169 	filter.enable_macvlan = TRUE;
1170 #ifdef IXL_FDIR
1171 	filter.enable_fdir = TRUE;
1172 #endif
1173 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
1174 	if (i40e_set_filter_control(hw, &filter))
1175 		device_printf(dev, "set_filter_control() failed\n");
1176 
1177 	/* Set up RSS */
1178 	ixl_config_rss(vsi);
1179 
1180 	/*
1181 	** Prepare the VSI: rings, hmc contexts, etc...
1182 	*/
1183 	if (ixl_initialize_vsi(vsi)) {
1184 		device_printf(dev, "initialize vsi failed!!\n");
1185 		return;
1186 	}
1187 
1188 	/* Add protocol filters to list */
1189 	ixl_init_filters(vsi);
1190 
1191 	/* Setup vlan's if needed */
1192 	ixl_setup_vlan_filters(vsi);
1193 
1194 	/* Start the local timer */
1195 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1196 
1197 	/* Set up MSI/X routing and the ITR settings */
1198 	if (ixl_enable_msix) {
1199 		ixl_configure_msix(pf);
1200 		ixl_configure_itr(pf);
1201 	} else
1202 		ixl_configure_legacy(pf);
1203 
1204 	ixl_enable_rings(vsi);
1205 
1206 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1207 
1208 	ixl_reconfigure_filters(vsi);
1209 
1210 	/* Set MTU in hardware*/
1211 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1212 	    TRUE, 0, NULL);
1213 	if (aq_error)
1214 		device_printf(vsi->dev,
1215 			"aq_set_mac_config in init error, code %d\n",
1216 		    aq_error);
1217 
1218 	/* And now turn on interrupts */
1219 	ixl_enable_intr(vsi);
1220 
1221 	/* Now inform the stack we're ready */
1222 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1223 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1224 
1225 	return;
1226 }
1227 
1228 static void
1229 ixl_init(void *arg)
1230 {
1231 	struct ixl_pf *pf = arg;
1232 
1233 	IXL_PF_LOCK(pf);
1234 	ixl_init_locked(pf);
1235 	IXL_PF_UNLOCK(pf);
1236 	return;
1237 }
1238 
1239 /*
1240 **
1241 ** MSIX Interrupt Handlers and Tasklets
1242 **
1243 */
1244 static void
1245 ixl_handle_que(void *context, int pending)
1246 {
1247 	struct ixl_queue *que = context;
1248 	struct ixl_vsi *vsi = que->vsi;
1249 	struct i40e_hw  *hw = vsi->hw;
1250 	struct tx_ring  *txr = &que->txr;
1251 	struct ifnet    *ifp = vsi->ifp;
1252 	bool		more;
1253 
1254 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1255 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1256 		IXL_TX_LOCK(txr);
1257 		ixl_txeof(que);
1258 		if (!drbr_empty(ifp, txr->br))
1259 			ixl_mq_start_locked(ifp, txr);
1260 		IXL_TX_UNLOCK(txr);
1261 		if (more) {
1262 			taskqueue_enqueue(que->tq, &que->task);
1263 			return;
1264 		}
1265 	}
1266 
1267 	/* Reenable this interrupt - hmmm */
1268 	ixl_enable_queue(hw, que->me);
1269 	return;
1270 }
1271 
1272 
1273 /*********************************************************************
1274  *
1275  *  Legacy Interrupt Service routine
1276  *
1277  **********************************************************************/
1278 void
1279 ixl_intr(void *arg)
1280 {
1281 	struct ixl_pf		*pf = arg;
1282 	struct i40e_hw		*hw =  &pf->hw;
1283 	struct ixl_vsi		*vsi = &pf->vsi;
1284 	struct ixl_queue	*que = vsi->queues;
1285 	struct ifnet		*ifp = vsi->ifp;
1286 	struct tx_ring		*txr = &que->txr;
1287         u32			reg, icr0, mask;
1288 	bool			more_tx, more_rx;
1289 
1290 	++que->irqs;
1291 
1292 	/* Protect against spurious interrupts */
1293 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1294 		return;
1295 
1296 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1297 
1298 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1299 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1300 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1301 
1302         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1303 
1304 #ifdef PCI_IOV
1305 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1306 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1307 #endif
1308 
1309 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1310 		taskqueue_enqueue(pf->tq, &pf->adminq);
1311 		return;
1312 	}
1313 
1314 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1315 
1316 	IXL_TX_LOCK(txr);
1317 	more_tx = ixl_txeof(que);
1318 	if (!drbr_empty(vsi->ifp, txr->br))
1319 		more_tx = 1;
1320 	IXL_TX_UNLOCK(txr);
1321 
1322 	/* re-enable other interrupt causes */
1323 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1324 
1325 	/* And now the queues */
1326 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1327 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1328 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1329 
1330 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1331 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1332 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1333 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1334 
1335 	ixl_enable_legacy(hw);
1336 
1337 	return;
1338 }
1339 
1340 
1341 /*********************************************************************
1342  *
1343  *  MSIX VSI Interrupt Service routine
1344  *
1345  **********************************************************************/
1346 void
1347 ixl_msix_que(void *arg)
1348 {
1349 	struct ixl_queue	*que = arg;
1350 	struct ixl_vsi	*vsi = que->vsi;
1351 	struct i40e_hw	*hw = vsi->hw;
1352 	struct tx_ring	*txr = &que->txr;
1353 	bool		more_tx, more_rx;
1354 
1355 	/* Protect against spurious interrupts */
1356 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1357 		return;
1358 
1359 	++que->irqs;
1360 
1361 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1362 
1363 	IXL_TX_LOCK(txr);
1364 	more_tx = ixl_txeof(que);
1365 	/*
1366 	** Make certain that if the stack
1367 	** has anything queued the task gets
1368 	** scheduled to handle it.
1369 	*/
1370 	if (!drbr_empty(vsi->ifp, txr->br))
1371 		more_tx = 1;
1372 	IXL_TX_UNLOCK(txr);
1373 
1374 	ixl_set_queue_rx_itr(que);
1375 	ixl_set_queue_tx_itr(que);
1376 
1377 	if (more_tx || more_rx)
1378 		taskqueue_enqueue(que->tq, &que->task);
1379 	else
1380 		ixl_enable_queue(hw, que->me);
1381 
1382 	return;
1383 }
1384 
1385 
1386 /*********************************************************************
1387  *
1388  *  MSIX Admin Queue Interrupt Service routine
1389  *
1390  **********************************************************************/
1391 static void
1392 ixl_msix_adminq(void *arg)
1393 {
1394 	struct ixl_pf	*pf = arg;
1395 	struct i40e_hw	*hw = &pf->hw;
1396 	u32		reg, mask;
1397 
1398 	++pf->admin_irq;
1399 
1400 	reg = rd32(hw, I40E_PFINT_ICR0);
1401 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1402 
1403 	/* Check on the cause */
1404 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1405 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1406 
1407 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1408 		ixl_handle_mdd_event(pf);
1409 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1410 	}
1411 
1412 #ifdef PCI_IOV
1413 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1414 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1415 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1416 	}
1417 #endif
1418 
1419 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1420 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1421 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1422 
1423 	taskqueue_enqueue(pf->tq, &pf->adminq);
1424 	return;
1425 }
1426 
1427 /*********************************************************************
1428  *
1429  *  Media Ioctl callback
1430  *
1431  *  This routine is called whenever the user queries the status of
1432  *  the interface using ifconfig.
1433  *
1434  **********************************************************************/
1435 static void
1436 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1437 {
1438 	struct ixl_vsi	*vsi = ifp->if_softc;
1439 	struct ixl_pf	*pf = vsi->back;
1440 	struct i40e_hw  *hw = &pf->hw;
1441 
1442 	INIT_DEBUGOUT("ixl_media_status: begin");
1443 	IXL_PF_LOCK(pf);
1444 
1445 	hw->phy.get_link_info = TRUE;
1446 	i40e_get_link_status(hw, &pf->link_up);
1447 	ixl_update_link_status(pf);
1448 
1449 	ifmr->ifm_status = IFM_AVALID;
1450 	ifmr->ifm_active = IFM_ETHER;
1451 
1452 	if (!pf->link_up) {
1453 		IXL_PF_UNLOCK(pf);
1454 		return;
1455 	}
1456 
1457 	ifmr->ifm_status |= IFM_ACTIVE;
1458 	/* Hardware is always full-duplex */
1459 	ifmr->ifm_active |= IFM_FDX;
1460 
1461 	switch (hw->phy.link_info.phy_type) {
1462 		/* 100 M */
1463 		case I40E_PHY_TYPE_100BASE_TX:
1464 			ifmr->ifm_active |= IFM_100_TX;
1465 			break;
1466 		/* 1 G */
1467 		case I40E_PHY_TYPE_1000BASE_T:
1468 			ifmr->ifm_active |= IFM_1000_T;
1469 			break;
1470 		case I40E_PHY_TYPE_1000BASE_SX:
1471 			ifmr->ifm_active |= IFM_1000_SX;
1472 			break;
1473 		case I40E_PHY_TYPE_1000BASE_LX:
1474 			ifmr->ifm_active |= IFM_1000_LX;
1475 			break;
1476 		/* 10 G */
1477 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1478 			ifmr->ifm_active |= IFM_10G_TWINAX;
1479 			break;
1480 		case I40E_PHY_TYPE_10GBASE_SR:
1481 			ifmr->ifm_active |= IFM_10G_SR;
1482 			break;
1483 		case I40E_PHY_TYPE_10GBASE_LR:
1484 			ifmr->ifm_active |= IFM_10G_LR;
1485 			break;
1486 		case I40E_PHY_TYPE_10GBASE_T:
1487 			ifmr->ifm_active |= IFM_10G_T;
1488 			break;
1489 		/* 40 G */
1490 		case I40E_PHY_TYPE_40GBASE_CR4:
1491 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1492 			ifmr->ifm_active |= IFM_40G_CR4;
1493 			break;
1494 		case I40E_PHY_TYPE_40GBASE_SR4:
1495 			ifmr->ifm_active |= IFM_40G_SR4;
1496 			break;
1497 		case I40E_PHY_TYPE_40GBASE_LR4:
1498 			ifmr->ifm_active |= IFM_40G_LR4;
1499 			break;
1500 #ifndef IFM_ETH_XTYPE
1501 		case I40E_PHY_TYPE_1000BASE_KX:
1502 			ifmr->ifm_active |= IFM_1000_CX;
1503 			break;
1504 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1505 		case I40E_PHY_TYPE_10GBASE_CR1:
1506 			ifmr->ifm_active |= IFM_10G_TWINAX;
1507 			break;
1508 		case I40E_PHY_TYPE_10GBASE_KX4:
1509 			ifmr->ifm_active |= IFM_10G_CX4;
1510 			break;
1511 		case I40E_PHY_TYPE_10GBASE_KR:
1512 			ifmr->ifm_active |= IFM_10G_SR;
1513 			break;
1514 		case I40E_PHY_TYPE_40GBASE_KR4:
1515 		case I40E_PHY_TYPE_XLPPI:
1516 			ifmr->ifm_active |= IFM_40G_SR4;
1517 			break;
1518 #else
1519 		case I40E_PHY_TYPE_1000BASE_KX:
1520 			ifmr->ifm_active |= IFM_1000_KX;
1521 			break;
1522 		/* ERJ: What's the difference between these? */
1523 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1524 		case I40E_PHY_TYPE_10GBASE_CR1:
1525 			ifmr->ifm_active |= IFM_10G_CR1;
1526 			break;
1527 		case I40E_PHY_TYPE_10GBASE_KX4:
1528 			ifmr->ifm_active |= IFM_10G_KX4;
1529 			break;
1530 		case I40E_PHY_TYPE_10GBASE_KR:
1531 			ifmr->ifm_active |= IFM_10G_KR;
1532 			break;
1533 		case I40E_PHY_TYPE_20GBASE_KR2:
1534 			ifmr->ifm_active |= IFM_20G_KR2;
1535 			break;
1536 		case I40E_PHY_TYPE_40GBASE_KR4:
1537 			ifmr->ifm_active |= IFM_40G_KR4;
1538 			break;
1539 		case I40E_PHY_TYPE_XLPPI:
1540 			ifmr->ifm_active |= IFM_40G_XLPPI;
1541 			break;
1542 #endif
1543 		default:
1544 			ifmr->ifm_active |= IFM_UNKNOWN;
1545 			break;
1546 	}
1547 	/* Report flow control status as well */
1548 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1549 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1550 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1551 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1552 
1553 	IXL_PF_UNLOCK(pf);
1554 
1555 	return;
1556 }
1557 
1558 /*********************************************************************
1559  *
1560  *  Media Ioctl callback
1561  *
1562  *  This routine is called when the user changes speed/duplex using
1563  *  media/mediopt option with ifconfig.
1564  *
1565  **********************************************************************/
1566 static int
1567 ixl_media_change(struct ifnet * ifp)
1568 {
1569 	struct ixl_vsi *vsi = ifp->if_softc;
1570 	struct ifmedia *ifm = &vsi->media;
1571 
1572 	INIT_DEBUGOUT("ixl_media_change: begin");
1573 
1574 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1575 		return (EINVAL);
1576 
1577 	if_printf(ifp, "Media change is currently not supported.\n");
1578 
1579 	return (ENODEV);
1580 }
1581 
1582 
1583 #ifdef IXL_FDIR
1584 /*
1585 ** ATR: Application Targetted Receive - creates a filter
1586 **	based on TX flow info that will keep the receive
1587 **	portion of the flow on the same queue. Based on the
1588 **	implementation this is only available for TCP connections
1589 */
1590 void
1591 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1592 {
1593 	struct ixl_vsi			*vsi = que->vsi;
1594 	struct tx_ring			*txr = &que->txr;
1595 	struct i40e_filter_program_desc	*FDIR;
1596 	u32				ptype, dtype;
1597 	int				idx;
1598 
1599 	/* check if ATR is enabled and sample rate */
1600 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1601 		return;
1602 	/*
1603 	** We sample all TCP SYN/FIN packets,
1604 	** or at the selected sample rate
1605 	*/
1606 	txr->atr_count++;
1607 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1608 	    (txr->atr_count < txr->atr_rate))
1609                 return;
1610 	txr->atr_count = 0;
1611 
1612 	/* Get a descriptor to use */
1613 	idx = txr->next_avail;
1614 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1615 	if (++idx == que->num_desc)
1616 		idx = 0;
1617 	txr->avail--;
1618 	txr->next_avail = idx;
1619 
1620 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1621 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1622 
1623 	ptype |= (etype == ETHERTYPE_IP) ?
1624 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1625 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1626 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1627 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1628 
1629 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1630 
1631 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1632 
1633 	/*
1634 	** We use the TCP TH_FIN as a trigger to remove
1635 	** the filter, otherwise its an update.
1636 	*/
1637 	dtype |= (th->th_flags & TH_FIN) ?
1638 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1639 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1640 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1641 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1642 
1643 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1644 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1645 
1646 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1647 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1648 
1649 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1650 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1651 	return;
1652 }
1653 #endif
1654 
1655 
1656 static void
1657 ixl_set_promisc(struct ixl_vsi *vsi)
1658 {
1659 	struct ifnet	*ifp = vsi->ifp;
1660 	struct i40e_hw	*hw = vsi->hw;
1661 	int		err, mcnt = 0;
1662 	bool		uni = FALSE, multi = FALSE;
1663 
1664 	if (ifp->if_flags & IFF_ALLMULTI)
1665                 multi = TRUE;
1666 	else { /* Need to count the multicast addresses */
1667 		struct  ifmultiaddr *ifma;
1668 		if_maddr_rlock(ifp);
1669 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1670                         if (ifma->ifma_addr->sa_family != AF_LINK)
1671                                 continue;
1672                         if (mcnt == MAX_MULTICAST_ADDR)
1673                                 break;
1674                         mcnt++;
1675 		}
1676 		if_maddr_runlock(ifp);
1677 	}
1678 
1679 	if (mcnt >= MAX_MULTICAST_ADDR)
1680                 multi = TRUE;
1681         if (ifp->if_flags & IFF_PROMISC)
1682 		uni = TRUE;
1683 
1684 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1685 	    vsi->seid, uni, NULL);
1686 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1687 	    vsi->seid, multi, NULL);
1688 	return;
1689 }
1690 
1691 /*********************************************************************
1692  * 	Filter Routines
1693  *
1694  *	Routines for multicast and vlan filter management.
1695  *
1696  *********************************************************************/
1697 static void
1698 ixl_add_multi(struct ixl_vsi *vsi)
1699 {
1700 	struct	ifmultiaddr	*ifma;
1701 	struct ifnet		*ifp = vsi->ifp;
1702 	struct i40e_hw		*hw = vsi->hw;
1703 	int			mcnt = 0, flags;
1704 
1705 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1706 
1707 	if_maddr_rlock(ifp);
1708 	/*
1709 	** First just get a count, to decide if we
1710 	** we simply use multicast promiscuous.
1711 	*/
1712 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1713 		if (ifma->ifma_addr->sa_family != AF_LINK)
1714 			continue;
1715 		mcnt++;
1716 	}
1717 	if_maddr_runlock(ifp);
1718 
1719 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1720 		/* delete existing MC filters */
1721 		ixl_del_hw_filters(vsi, mcnt);
1722 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1723 		    vsi->seid, TRUE, NULL);
1724 		return;
1725 	}
1726 
1727 	mcnt = 0;
1728 	if_maddr_rlock(ifp);
1729 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1730 		if (ifma->ifma_addr->sa_family != AF_LINK)
1731 			continue;
1732 		ixl_add_mc_filter(vsi,
1733 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1734 		mcnt++;
1735 	}
1736 	if_maddr_runlock(ifp);
1737 	if (mcnt > 0) {
1738 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1739 		ixl_add_hw_filters(vsi, flags, mcnt);
1740 	}
1741 
1742 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1743 	return;
1744 }
1745 
1746 static void
1747 ixl_del_multi(struct ixl_vsi *vsi)
1748 {
1749 	struct ifnet		*ifp = vsi->ifp;
1750 	struct ifmultiaddr	*ifma;
1751 	struct ixl_mac_filter	*f;
1752 	int			mcnt = 0;
1753 	bool		match = FALSE;
1754 
1755 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1756 
1757 	/* Search for removed multicast addresses */
1758 	if_maddr_rlock(ifp);
1759 	SLIST_FOREACH(f, &vsi->ftl, next) {
1760 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1761 			match = FALSE;
1762 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1763 				if (ifma->ifma_addr->sa_family != AF_LINK)
1764 					continue;
1765 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1766 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1767 					match = TRUE;
1768 					break;
1769 				}
1770 			}
1771 			if (match == FALSE) {
1772 				f->flags |= IXL_FILTER_DEL;
1773 				mcnt++;
1774 			}
1775 		}
1776 	}
1777 	if_maddr_runlock(ifp);
1778 
1779 	if (mcnt > 0)
1780 		ixl_del_hw_filters(vsi, mcnt);
1781 }
1782 
1783 
1784 /*********************************************************************
1785  *  Timer routine
1786  *
1787  *  This routine checks for link status,updates statistics,
1788  *  and runs the watchdog check.
1789  *
1790  **********************************************************************/
1791 
1792 static void
1793 ixl_local_timer(void *arg)
1794 {
1795 	struct ixl_pf		*pf = arg;
1796 	struct i40e_hw		*hw = &pf->hw;
1797 	struct ixl_vsi		*vsi = &pf->vsi;
1798 	struct ixl_queue	*que = vsi->queues;
1799 	device_t		dev = pf->dev;
1800 	int			hung = 0;
1801 	u32			mask;
1802 
1803 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1804 
1805 	/* Fire off the adminq task */
1806 	taskqueue_enqueue(pf->tq, &pf->adminq);
1807 
1808 	/* Update stats */
1809 	ixl_update_stats_counters(pf);
1810 
1811 	/*
1812 	** Check status of the queues
1813 	*/
1814 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1815 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1816 
1817 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1818 		/* Any queues with outstanding work get a sw irq */
1819 		if (que->busy)
1820 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1821 		/*
1822 		** Each time txeof runs without cleaning, but there
1823 		** are uncleaned descriptors it increments busy. If
1824 		** we get to 5 we declare it hung.
1825 		*/
1826 		if (que->busy == IXL_QUEUE_HUNG) {
1827 			++hung;
1828 			/* Mark the queue as inactive */
1829 			vsi->active_queues &= ~((u64)1 << que->me);
1830 			continue;
1831 		} else {
1832 			/* Check if we've come back from hung */
1833 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1834 				vsi->active_queues |= ((u64)1 << que->me);
1835 		}
1836 		if (que->busy >= IXL_MAX_TX_BUSY) {
1837 #ifdef IXL_DEBUG
1838 			device_printf(dev,"Warning queue %d "
1839 			    "appears to be hung!\n", i);
1840 #endif
1841 			que->busy = IXL_QUEUE_HUNG;
1842 			++hung;
1843 		}
1844 	}
1845 	/* Only reinit if all queues show hung */
1846 	if (hung == vsi->num_queues)
1847 		goto hung;
1848 
1849 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1850 	return;
1851 
1852 hung:
1853 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1854 	ixl_init_locked(pf);
1855 }
1856 
1857 /*
1858 ** Note: this routine updates the OS on the link state
1859 **	the real check of the hardware only happens with
1860 **	a link interrupt.
1861 */
1862 static void
1863 ixl_update_link_status(struct ixl_pf *pf)
1864 {
1865 	struct ixl_vsi		*vsi = &pf->vsi;
1866 	struct i40e_hw		*hw = &pf->hw;
1867 	struct ifnet		*ifp = vsi->ifp;
1868 	device_t		dev = pf->dev;
1869 
1870 	if (pf->link_up){
1871 		if (vsi->link_active == FALSE) {
1872 			pf->fc = hw->fc.current_mode;
1873 			if (bootverbose) {
1874 				device_printf(dev,"Link is up %d Gbps %s,"
1875 				    " Flow Control: %s\n",
1876 				    ((pf->link_speed ==
1877 				    I40E_LINK_SPEED_40GB)? 40:10),
1878 				    "Full Duplex", ixl_fc_string[pf->fc]);
1879 			}
1880 			vsi->link_active = TRUE;
1881 			/*
1882 			** Warn user if link speed on NPAR enabled
1883 			** partition is not at least 10GB
1884 			*/
1885 			if (hw->func_caps.npar_enable &&
1886 			   (hw->phy.link_info.link_speed ==
1887 			   I40E_LINK_SPEED_1GB ||
1888 			   hw->phy.link_info.link_speed ==
1889 			   I40E_LINK_SPEED_100MB))
1890 				device_printf(dev, "The partition detected"
1891 				    "link speed that is less than 10Gbps\n");
1892 			if_link_state_change(ifp, LINK_STATE_UP);
1893 		}
1894 	} else { /* Link down */
1895 		if (vsi->link_active == TRUE) {
1896 			if (bootverbose)
1897 				device_printf(dev,"Link is Down\n");
1898 			if_link_state_change(ifp, LINK_STATE_DOWN);
1899 			vsi->link_active = FALSE;
1900 		}
1901 	}
1902 
1903 	return;
1904 }
1905 
1906 /*********************************************************************
1907  *
1908  *  This routine disables all traffic on the adapter by issuing a
1909  *  global reset on the MAC and deallocates TX/RX buffers.
1910  *
1911  **********************************************************************/
1912 
1913 static void
1914 ixl_stop(struct ixl_pf *pf)
1915 {
1916 	struct ixl_vsi	*vsi = &pf->vsi;
1917 	struct ifnet	*ifp = vsi->ifp;
1918 
1919 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1920 
1921 	INIT_DEBUGOUT("ixl_stop: begin\n");
1922 	if (pf->num_vfs == 0)
1923 		ixl_disable_intr(vsi);
1924 	else
1925 		ixl_disable_rings_intr(vsi);
1926 	ixl_disable_rings(vsi);
1927 
1928 	/* Tell the stack that the interface is no longer active */
1929 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1930 
1931 	/* Stop the local timer */
1932 	callout_stop(&pf->timer);
1933 
1934 	return;
1935 }
1936 
1937 
1938 /*********************************************************************
1939  *
1940  *  Setup MSIX Interrupt resources and handlers for the VSI
1941  *
1942  **********************************************************************/
1943 static int
1944 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1945 {
1946 	device_t        dev = pf->dev;
1947 	struct 		ixl_vsi *vsi = &pf->vsi;
1948 	struct		ixl_queue *que = vsi->queues;
1949 	int 		error, rid = 0;
1950 
1951 	if (pf->msix == 1)
1952 		rid = 1;
1953 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1954 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1955 	if (pf->res == NULL) {
1956 		device_printf(dev,"Unable to allocate"
1957 		    " bus resource: vsi legacy/msi interrupt\n");
1958 		return (ENXIO);
1959 	}
1960 
1961 	/* Set the handler function */
1962 	error = bus_setup_intr(dev, pf->res,
1963 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1964 	    ixl_intr, pf, &pf->tag);
1965 	if (error) {
1966 		pf->res = NULL;
1967 		device_printf(dev, "Failed to register legacy/msi handler");
1968 		return (error);
1969 	}
1970 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1971 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1972 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1973 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1974 	    taskqueue_thread_enqueue, &que->tq);
1975 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1976 	    device_get_nameunit(dev));
1977 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1978 
1979 #ifdef PCI_IOV
1980 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1981 #endif
1982 
1983 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1984 	    taskqueue_thread_enqueue, &pf->tq);
1985 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1986 	    device_get_nameunit(dev));
1987 
1988 	return (0);
1989 }
1990 
1991 static void
1992 ixl_init_taskqueues(struct ixl_pf *pf)
1993 {
1994        struct ixl_vsi *vsi = &pf->vsi;
1995        struct ixl_queue *que = vsi->queues;
1996        device_t dev = pf->dev;
1997 
1998        /* Tasklet for Admin Queue */
1999        TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2000 #ifdef PCI_IOV
2001        /* VFLR Tasklet */
2002        TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2003 #endif
2004 
2005        /* Create and start PF taskqueue */
2006        pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2007            taskqueue_thread_enqueue, &pf->tq);
2008        taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2009            device_get_nameunit(dev));
2010 
2011        /* Create queue tasks and start queue taskqueues */
2012        for (int i = 0; i < vsi->num_queues; i++, que++) {
2013                TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2014                TASK_INIT(&que->task, 0, ixl_handle_que, que);
2015                que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2016                    taskqueue_thread_enqueue, &que->tq);
2017 #ifdef RSS
2018                CPU_SETOF(cpu_id, &cpu_mask);
2019                taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2020                    &cpu_mask, "%s (bucket %d)",
2021                    device_get_nameunit(dev), cpu_id);
2022 #else
2023                taskqueue_start_threads(&que->tq, 1, PI_NET,
2024                    "%s (que %d)", device_get_nameunit(dev), que->me);
2025 #endif
2026        }
2027 
2028 }
2029 
2030 static void
2031 ixl_free_taskqueues(struct ixl_pf *pf)
2032 {
2033        struct ixl_vsi          *vsi = &pf->vsi;
2034        struct ixl_queue        *que = vsi->queues;
2035 
2036        if (pf->tq)
2037                taskqueue_free(pf->tq);
2038        for (int i = 0; i < vsi->num_queues; i++, que++) {
2039                if (que->tq)
2040                        taskqueue_free(que->tq);
2041        }
2042 }
2043 
2044 /*********************************************************************
2045  *
2046  *  Setup MSIX Interrupt resources and handlers for the VSI
2047  *
2048  **********************************************************************/
2049 static int
2050 ixl_assign_vsi_msix(struct ixl_pf *pf)
2051 {
2052 	device_t	dev = pf->dev;
2053 	struct 		ixl_vsi *vsi = &pf->vsi;
2054 	struct 		ixl_queue *que = vsi->queues;
2055 	struct		tx_ring	 *txr;
2056 	int 		error, rid, vector = 0;
2057 #ifdef	RSS
2058 	cpuset_t cpu_mask;
2059 #endif
2060 
2061 	/* Admin Que is vector 0*/
2062 	rid = vector + 1;
2063 	pf->res = bus_alloc_resource_any(dev,
2064     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2065 	if (!pf->res) {
2066 		device_printf(dev,"Unable to allocate"
2067     	    " bus resource: Adminq interrupt [%d]\n", rid);
2068 		return (ENXIO);
2069 	}
2070 	/* Set the adminq vector and handler */
2071 	error = bus_setup_intr(dev, pf->res,
2072 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2073 	    ixl_msix_adminq, pf, &pf->tag);
2074 	if (error) {
2075 		pf->res = NULL;
2076 		device_printf(dev, "Failed to register Admin que handler");
2077 		return (error);
2078 	}
2079 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2080 	pf->admvec = vector;
2081 	++vector;
2082 
2083 	/* Now set up the stations */
2084 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2085 		int cpu_id = i;
2086 		rid = vector + 1;
2087 		txr = &que->txr;
2088 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2089 		    RF_SHAREABLE | RF_ACTIVE);
2090 		if (que->res == NULL) {
2091 			device_printf(dev,"Unable to allocate"
2092 		    	    " bus resource: que interrupt [%d]\n", vector);
2093 			return (ENXIO);
2094 		}
2095 		/* Set the handler function */
2096 		error = bus_setup_intr(dev, que->res,
2097 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2098 		    ixl_msix_que, que, &que->tag);
2099 		if (error) {
2100 			que->res = NULL;
2101 			device_printf(dev, "Failed to register que handler");
2102 			return (error);
2103 		}
2104 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2105 		/* Bind the vector to a CPU */
2106 #ifdef RSS
2107 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2108 #endif
2109 		bus_bind_intr(dev, que->res, cpu_id);
2110 		que->msix = vector;
2111 	}
2112 
2113 	return (0);
2114 }
2115 
2116 
2117 /*
2118  * Allocate MSI/X vectors
2119  */
2120 static int
2121 ixl_init_msix(struct ixl_pf *pf)
2122 {
2123 	device_t dev = pf->dev;
2124 	int rid, want, vectors, queues, available;
2125 
2126 	/* Override by tuneable */
2127 	if (ixl_enable_msix == 0)
2128 		goto msi;
2129 
2130 	/*
2131 	** When used in a virtualized environment
2132 	** PCI BUSMASTER capability may not be set
2133 	** so explicity set it here and rewrite
2134 	** the ENABLE in the MSIX control register
2135 	** at this point to cause the host to
2136 	** successfully initialize us.
2137 	*/
2138 	{
2139 		u16 pci_cmd_word;
2140 		int msix_ctrl;
2141 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2142 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2143 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2144 		pci_find_cap(dev, PCIY_MSIX, &rid);
2145 		rid += PCIR_MSIX_CTRL;
2146 		msix_ctrl = pci_read_config(dev, rid, 2);
2147 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2148 		pci_write_config(dev, rid, msix_ctrl, 2);
2149 	}
2150 
2151 	/* First try MSI/X */
2152 	rid = PCIR_BAR(IXL_BAR);
2153 	pf->msix_mem = bus_alloc_resource_any(dev,
2154 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2155        	if (!pf->msix_mem) {
2156 		/* May not be enabled */
2157 		device_printf(pf->dev,
2158 		    "Unable to map MSIX table \n");
2159 		goto msi;
2160 	}
2161 
2162 	available = pci_msix_count(dev);
2163 	if (available == 0) { /* system has msix disabled */
2164 		bus_release_resource(dev, SYS_RES_MEMORY,
2165 		    rid, pf->msix_mem);
2166 		pf->msix_mem = NULL;
2167 		goto msi;
2168 	}
2169 
2170 	/* Figure out a reasonable auto config value */
2171 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2172 
2173 	/* Override with hardcoded value if it's less than autoconfig count */
2174 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2175 		queues = ixl_max_queues;
2176 	else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
2177 		device_printf(dev, "ixl_max_queues > # of cpus, using "
2178 		    "autoconfig amount...\n");
2179 	/* Or limit maximum auto-configured queues to 8 */
2180 	else if ((ixl_max_queues == 0) && (queues > 8))
2181 		queues = 8;
2182 
2183 #ifdef  RSS
2184 	/* If we're doing RSS, clamp at the number of RSS buckets */
2185 	if (queues > rss_getnumbuckets())
2186 		queues = rss_getnumbuckets();
2187 #endif
2188 
2189 	/*
2190 	** Want one vector (RX/TX pair) per queue
2191 	** plus an additional for the admin queue.
2192 	*/
2193 	want = queues + 1;
2194 	if (want <= available)	/* Have enough */
2195 		vectors = want;
2196 	else {
2197                	device_printf(pf->dev,
2198 		    "MSIX Configuration Problem, "
2199 		    "%d vectors available but %d wanted!\n",
2200 		    available, want);
2201 		return (0); /* Will go to Legacy setup */
2202 	}
2203 
2204 	if (pci_alloc_msix(dev, &vectors) == 0) {
2205                	device_printf(pf->dev,
2206 		    "Using MSIX interrupts with %d vectors\n", vectors);
2207 		pf->msix = vectors;
2208 		pf->vsi.num_queues = queues;
2209 #ifdef RSS
2210 		/*
2211 		 * If we're doing RSS, the number of queues needs to
2212 		 * match the number of RSS buckets that are configured.
2213 		 *
2214 		 * + If there's more queues than RSS buckets, we'll end
2215 		 *   up with queues that get no traffic.
2216 		 *
2217 		 * + If there's more RSS buckets than queues, we'll end
2218 		 *   up having multiple RSS buckets map to the same queue,
2219 		 *   so there'll be some contention.
2220 		 */
2221 		if (queues != rss_getnumbuckets()) {
2222 			device_printf(dev,
2223 			    "%s: queues (%d) != RSS buckets (%d)"
2224 			    "; performance will be impacted.\n",
2225 			    __func__, queues, rss_getnumbuckets());
2226 		}
2227 #endif
2228 		return (vectors);
2229 	}
2230 msi:
2231        	vectors = pci_msi_count(dev);
2232 	pf->vsi.num_queues = 1;
2233 	pf->msix = 1;
2234 	ixl_max_queues = 1;
2235 	ixl_enable_msix = 0;
2236        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2237                	device_printf(pf->dev,"Using an MSI interrupt\n");
2238 	else {
2239 		pf->msix = 0;
2240                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2241 	}
2242 	return (vectors);
2243 }
2244 
2245 
2246 /*
2247  * Plumb MSI/X vectors
2248  */
2249 static void
2250 ixl_configure_msix(struct ixl_pf *pf)
2251 {
2252 	struct i40e_hw	*hw = &pf->hw;
2253 	struct ixl_vsi *vsi = &pf->vsi;
2254 	u32		reg;
2255 	u16		vector = 1;
2256 
2257 	/* First set up the adminq - vector 0 */
2258 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2259 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2260 
2261 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2262 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2263 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2264 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2265 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2266 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2267 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2268 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2269 
2270 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2271 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2272 
2273 	wr32(hw, I40E_PFINT_DYN_CTL0,
2274 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2275 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2276 
2277 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2278 
2279 	/* Next configure the queues */
2280 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2281 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
2282 		/* First queue type is RX / type 0 */
2283 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2284 
2285 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2286 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2287 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2288 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2289 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2290 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2291 
2292 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2293 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2294 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2295 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2296 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2297 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2298 	}
2299 }
2300 
2301 /*
2302  * Configure for MSI single vector operation
2303  */
2304 static void
2305 ixl_configure_legacy(struct ixl_pf *pf)
2306 {
2307 	struct i40e_hw	*hw = &pf->hw;
2308 	u32		reg;
2309 
2310 
2311 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2312 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2313 
2314 
2315 	/* Setup "other" causes */
2316 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2317 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2318 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2319 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2320 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2321 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2322 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2323 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2324 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2325 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2326 	    ;
2327 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2328 
2329 	/* SW_ITR_IDX = 0, but don't change INTENA */
2330 	wr32(hw, I40E_PFINT_DYN_CTL0,
2331 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2332 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2333 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2334 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2335 
2336 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2337 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2338 
2339 	/* Associate the queue pair to the vector and enable the q int */
2340 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2341 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2342 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2343 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2344 
2345 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2346 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2347 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2348 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2349 
2350 	/* Next enable the queue pair */
2351 	reg = rd32(hw, I40E_QTX_ENA(0));
2352 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2353 	wr32(hw, I40E_QTX_ENA(0), reg);
2354 
2355 	reg = rd32(hw, I40E_QRX_ENA(0));
2356 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2357 	wr32(hw, I40E_QRX_ENA(0), reg);
2358 }
2359 
2360 
2361 /*
2362  * Set the Initial ITR state
2363  */
2364 static void
2365 ixl_configure_itr(struct ixl_pf *pf)
2366 {
2367 	struct i40e_hw		*hw = &pf->hw;
2368 	struct ixl_vsi		*vsi = &pf->vsi;
2369 	struct ixl_queue	*que = vsi->queues;
2370 
2371 	vsi->rx_itr_setting = ixl_rx_itr;
2372 	if (ixl_dynamic_rx_itr)
2373 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2374 	vsi->tx_itr_setting = ixl_tx_itr;
2375 	if (ixl_dynamic_tx_itr)
2376 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2377 
2378 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2379 		struct tx_ring	*txr = &que->txr;
2380 		struct rx_ring 	*rxr = &que->rxr;
2381 
2382 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2383 		    vsi->rx_itr_setting);
2384 		rxr->itr = vsi->rx_itr_setting;
2385 		rxr->latency = IXL_AVE_LATENCY;
2386 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2387 		    vsi->tx_itr_setting);
2388 		txr->itr = vsi->tx_itr_setting;
2389 		txr->latency = IXL_AVE_LATENCY;
2390 	}
2391 }
2392 
2393 
2394 static int
2395 ixl_allocate_pci_resources(struct ixl_pf *pf)
2396 {
2397 	int             rid;
2398 	device_t        dev = pf->dev;
2399 
2400 	rid = PCIR_BAR(0);
2401 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2402 	    &rid, RF_ACTIVE);
2403 
2404 	if (!(pf->pci_mem)) {
2405 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2406 		return (ENXIO);
2407 	}
2408 
2409 	pf->osdep.mem_bus_space_tag =
2410 		rman_get_bustag(pf->pci_mem);
2411 	pf->osdep.mem_bus_space_handle =
2412 		rman_get_bushandle(pf->pci_mem);
2413 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2414 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2415 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2416 
2417 	pf->hw.back = &pf->osdep;
2418 
2419 	/*
2420 	** Now setup MSI or MSI/X, should
2421 	** return us the number of supported
2422 	** vectors. (Will be 1 for MSI)
2423 	*/
2424 	pf->msix = ixl_init_msix(pf);
2425 	return (0);
2426 }
2427 
2428 static void
2429 ixl_free_pci_resources(struct ixl_pf * pf)
2430 {
2431 	struct ixl_vsi		*vsi = &pf->vsi;
2432 	struct ixl_queue	*que = vsi->queues;
2433 	device_t		dev = pf->dev;
2434 	int			rid, memrid;
2435 
2436 	memrid = PCIR_BAR(IXL_BAR);
2437 
2438 	/* We may get here before stations are setup */
2439 	if ((!ixl_enable_msix) || (que == NULL))
2440 		goto early;
2441 
2442 	/*
2443 	**  Release all msix VSI resources:
2444 	*/
2445 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2446 		rid = que->msix + 1;
2447 		if (que->tag != NULL) {
2448 			bus_teardown_intr(dev, que->res, que->tag);
2449 			que->tag = NULL;
2450 		}
2451 		if (que->res != NULL)
2452 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2453 	}
2454 
2455 early:
2456 	/* Clean the AdminQ interrupt last */
2457 	if (pf->admvec) /* we are doing MSIX */
2458 		rid = pf->admvec + 1;
2459 	else
2460 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2461 
2462 	if (pf->tag != NULL) {
2463 		bus_teardown_intr(dev, pf->res, pf->tag);
2464 		pf->tag = NULL;
2465 	}
2466 	if (pf->res != NULL)
2467 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2468 
2469 	if (pf->msix)
2470 		pci_release_msi(dev);
2471 
2472 	if (pf->msix_mem != NULL)
2473 		bus_release_resource(dev, SYS_RES_MEMORY,
2474 		    memrid, pf->msix_mem);
2475 
2476 	if (pf->pci_mem != NULL)
2477 		bus_release_resource(dev, SYS_RES_MEMORY,
2478 		    PCIR_BAR(0), pf->pci_mem);
2479 
2480 	return;
2481 }
2482 
2483 static void
2484 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2485 {
2486 	/* Display supported media types */
2487 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2488 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2489 
2490 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2491 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2492 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2493 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2494 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2495 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2496 
2497 	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2498 	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2499 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2500 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2501 
2502 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2503 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2504 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2505 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2506 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2507 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2508 
2509 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2510 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2511 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2512 	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2513 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2514 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2515 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2516 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2517 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2518 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2519 
2520 #ifndef IFM_ETH_XTYPE
2521 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2522 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2523 
2524 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2525 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2526 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2527 	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2528 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2529 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2530 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2531 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2532 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2533 
2534 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2535 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2536 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2537 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2538 #else
2539 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2540 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2541 
2542 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2543 	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2544 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2545 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2546 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2547 	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2548 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2549 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2550 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2551 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2552 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2553 
2554 	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2555 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2556 
2557 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2558 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2559 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2560 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2561 #endif
2562 }
2563 
2564 /*********************************************************************
2565  *
2566  *  Setup networking device structure and register an interface.
2567  *
2568  **********************************************************************/
2569 static int
2570 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2571 {
2572 	struct ifnet		*ifp;
2573 	struct i40e_hw		*hw = vsi->hw;
2574 	struct ixl_queue	*que = vsi->queues;
2575 	struct i40e_aq_get_phy_abilities_resp abilities;
2576 	enum i40e_status_code aq_error = 0;
2577 
2578 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2579 
2580 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2581 	if (ifp == NULL) {
2582 		device_printf(dev, "can not allocate ifnet structure\n");
2583 		return (-1);
2584 	}
2585 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2586 	ifp->if_mtu = ETHERMTU;
2587 	ifp->if_baudrate = IF_Gbps(40);
2588 	ifp->if_init = ixl_init;
2589 	ifp->if_softc = vsi;
2590 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2591 	ifp->if_ioctl = ixl_ioctl;
2592 
2593 #if __FreeBSD_version >= 1100036
2594 	if_setgetcounterfn(ifp, ixl_get_counter);
2595 #endif
2596 
2597 	ifp->if_transmit = ixl_mq_start;
2598 
2599 	ifp->if_qflush = ixl_qflush;
2600 
2601 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2602 
2603 	vsi->max_frame_size =
2604 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2605 	    + ETHER_VLAN_ENCAP_LEN;
2606 
2607 	/*
2608 	 * Tell the upper layer(s) we support long frames.
2609 	 */
2610 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2611 
2612 	ifp->if_capabilities |= IFCAP_HWCSUM;
2613 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2614 	ifp->if_capabilities |= IFCAP_TSO;
2615 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2616 	ifp->if_capabilities |= IFCAP_LRO;
2617 
2618 	/* VLAN capabilties */
2619 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2620 			     |  IFCAP_VLAN_HWTSO
2621 			     |  IFCAP_VLAN_MTU
2622 			     |  IFCAP_VLAN_HWCSUM;
2623 	ifp->if_capenable = ifp->if_capabilities;
2624 
2625 	/*
2626 	** Don't turn this on by default, if vlans are
2627 	** created on another pseudo device (eg. lagg)
2628 	** then vlan events are not passed thru, breaking
2629 	** operation, but with HW FILTER off it works. If
2630 	** using vlans directly on the ixl driver you can
2631 	** enable this and get full hardware tag filtering.
2632 	*/
2633 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2634 
2635 	/*
2636 	 * Specify the media types supported by this adapter and register
2637 	 * callbacks to update media and link information
2638 	 */
2639 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2640 		     ixl_media_status);
2641 
2642 	aq_error = i40e_aq_get_phy_capabilities(hw,
2643 	    FALSE, TRUE, &abilities, NULL);
2644 	/* May need delay to detect fiber correctly */
2645 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2646 		i40e_msec_delay(200);
2647 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2648 		    TRUE, &abilities, NULL);
2649 	}
2650 	if (aq_error) {
2651 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2652 			device_printf(dev, "Unknown PHY type detected!\n");
2653 		else
2654 			device_printf(dev,
2655 			    "Error getting supported media types, err %d,"
2656 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2657 		return (0);
2658 	}
2659 
2660 	ixl_add_ifmedia(vsi, abilities.phy_type);
2661 
2662 	/* Use autoselect media by default */
2663 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2664 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2665 
2666 	ether_ifattach(ifp, hw->mac.addr);
2667 
2668 	return (0);
2669 }
2670 
2671 /*
2672 ** Run when the Admin Queue gets a
2673 ** link transition interrupt.
2674 */
2675 static void
2676 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2677 {
2678 	struct i40e_hw	*hw = &pf->hw;
2679 	struct i40e_aqc_get_link_status *status =
2680 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2681 	bool check;
2682 
2683 	hw->phy.get_link_info = TRUE;
2684 	i40e_get_link_status(hw, &check);
2685 	pf->link_up = check;
2686 #ifdef IXL_DEBUG
2687 	printf("Link is %s\n", check ? "up":"down");
2688 #endif
2689 	/* Report if Unqualified modules are found */
2690 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2691 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2692 	    (!(status->link_info & I40E_AQ_LINK_UP)))
2693 		device_printf(pf->dev, "Link failed because "
2694 		    "an unqualified module was detected\n");
2695 
2696 	return;
2697 }
2698 
2699 /*********************************************************************
2700  *
2701  *  Get Firmware Switch configuration
2702  *	- this will need to be more robust when more complex
2703  *	  switch configurations are enabled.
2704  *
2705  **********************************************************************/
2706 static int
2707 ixl_switch_config(struct ixl_pf *pf)
2708 {
2709 	struct i40e_hw	*hw = &pf->hw;
2710 	struct ixl_vsi	*vsi = &pf->vsi;
2711 	device_t 	dev = vsi->dev;
2712 	struct i40e_aqc_get_switch_config_resp *sw_config;
2713 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2714 	int	ret;
2715 	u16	next = 0;
2716 
2717 	memset(&aq_buf, 0, sizeof(aq_buf));
2718 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2719 	ret = i40e_aq_get_switch_config(hw, sw_config,
2720 	    sizeof(aq_buf), &next, NULL);
2721 	if (ret) {
2722 		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2723 		    ret);
2724 		return (ret);
2725 	}
2726 #ifdef IXL_DEBUG
2727 	device_printf(dev,
2728 	    "Switch config: header reported: %d in structure, %d total\n",
2729     	    sw_config->header.num_reported, sw_config->header.num_total);
2730 	for (int i = 0; i < sw_config->header.num_reported; i++) {
2731 		device_printf(dev,
2732 		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2733 		    sw_config->element[i].element_type,
2734 		    sw_config->element[i].seid,
2735 		    sw_config->element[i].uplink_seid,
2736 		    sw_config->element[i].downlink_seid);
2737 	}
2738 #endif
2739 	/* Simplified due to a single VSI at the moment */
2740 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2741 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2742 	vsi->seid = sw_config->element[0].seid;
2743 	return (ret);
2744 }
2745 
2746 /*********************************************************************
2747  *
2748  *  Initialize the VSI:  this handles contexts, which means things
2749  *  			 like the number of descriptors, buffer size,
2750  *			 plus we init the rings thru this function.
2751  *
2752  **********************************************************************/
2753 static int
2754 ixl_initialize_vsi(struct ixl_vsi *vsi)
2755 {
2756 	struct ixl_pf		*pf = vsi->back;
2757 	struct ixl_queue	*que = vsi->queues;
2758 	device_t		dev = vsi->dev;
2759 	struct i40e_hw		*hw = vsi->hw;
2760 	struct i40e_vsi_context	ctxt;
2761 	int			err = 0;
2762 
2763 	memset(&ctxt, 0, sizeof(ctxt));
2764 	ctxt.seid = vsi->seid;
2765 	if (pf->veb_seid != 0)
2766 		ctxt.uplink_seid = pf->veb_seid;
2767 	ctxt.pf_num = hw->pf_id;
2768 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2769 	if (err) {
2770 		device_printf(dev,"get vsi params failed %x!!\n", err);
2771 		return (err);
2772 	}
2773 #ifdef IXL_DEBUG
2774 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2775 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2776 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2777 	    ctxt.uplink_seid, ctxt.vsi_number,
2778 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2779 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2780 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2781 #endif
2782 	/*
2783 	** Set the queue and traffic class bits
2784 	**  - when multiple traffic classes are supported
2785 	**    this will need to be more robust.
2786 	*/
2787 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2788 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2789 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
2790 	ctxt.info.queue_mapping[0] = 0;
2791 	/*
2792 	 * This VSI will only use traffic class 0; start traffic class 0's
2793 	 * queue allocation at queue 0, and assign it 64 (2^6) queues (though
2794 	 * the driver may not use all of them).
2795 	 */
2796 	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2797 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2798 	    ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2799 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2800 
2801 	/* Set VLAN receive stripping mode */
2802 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2803 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2804 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2805 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2806 	else
2807 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2808 
2809 	/* Keep copy of VSI info in VSI for statistic counters */
2810 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2811 
2812 	/* Reset VSI statistics */
2813 	ixl_vsi_reset_stats(vsi);
2814 	vsi->hw_filters_add = 0;
2815 	vsi->hw_filters_del = 0;
2816 
2817 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2818 
2819 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2820 	if (err) {
2821 		device_printf(dev,"update vsi params failed %x!!\n",
2822 		   hw->aq.asq_last_status);
2823 		return (err);
2824 	}
2825 
2826 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2827 		struct tx_ring		*txr = &que->txr;
2828 		struct rx_ring 		*rxr = &que->rxr;
2829 		struct i40e_hmc_obj_txq tctx;
2830 		struct i40e_hmc_obj_rxq rctx;
2831 		u32			txctl;
2832 		u16			size;
2833 
2834 
2835 		/* Setup the HMC TX Context  */
2836 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2837 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2838 		tctx.new_context = 1;
2839 		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2840 		tctx.qlen = que->num_desc;
2841 		tctx.fc_ena = 0;
2842 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2843 		/* Enable HEAD writeback */
2844 		tctx.head_wb_ena = 1;
2845 		tctx.head_wb_addr = txr->dma.pa +
2846 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2847 		tctx.rdylist_act = 0;
2848 		err = i40e_clear_lan_tx_queue_context(hw, i);
2849 		if (err) {
2850 			device_printf(dev, "Unable to clear TX context\n");
2851 			break;
2852 		}
2853 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2854 		if (err) {
2855 			device_printf(dev, "Unable to set TX context\n");
2856 			break;
2857 		}
2858 		/* Associate the ring with this PF */
2859 		txctl = I40E_QTX_CTL_PF_QUEUE;
2860 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2861 		    I40E_QTX_CTL_PF_INDX_MASK);
2862 		wr32(hw, I40E_QTX_CTL(i), txctl);
2863 		ixl_flush(hw);
2864 
2865 		/* Do ring (re)init */
2866 		ixl_init_tx_ring(que);
2867 
2868 		/* Next setup the HMC RX Context  */
2869 		if (vsi->max_frame_size <= MCLBYTES)
2870 			rxr->mbuf_sz = MCLBYTES;
2871 		else
2872 			rxr->mbuf_sz = MJUMPAGESIZE;
2873 
2874 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2875 
2876 		/* Set up an RX context for the HMC */
2877 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2878 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2879 		/* ignore header split for now */
2880 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2881 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2882 		    vsi->max_frame_size : max_rxmax;
2883 		rctx.dtype = 0;
2884 		rctx.dsize = 1;	/* do 32byte descriptors */
2885 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2886 		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2887 		rctx.qlen = que->num_desc;
2888 		rctx.tphrdesc_ena = 1;
2889 		rctx.tphwdesc_ena = 1;
2890 		rctx.tphdata_ena = 0;
2891 		rctx.tphhead_ena = 0;
2892 		rctx.lrxqthresh = 2;
2893 		rctx.crcstrip = 1;
2894 		rctx.l2tsel = 1;
2895 		rctx.showiv = 1;
2896 		rctx.fc_ena = 0;
2897 		rctx.prefena = 1;
2898 
2899 		err = i40e_clear_lan_rx_queue_context(hw, i);
2900 		if (err) {
2901 			device_printf(dev,
2902 			    "Unable to clear RX context %d\n", i);
2903 			break;
2904 		}
2905 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2906 		if (err) {
2907 			device_printf(dev, "Unable to set RX context %d\n", i);
2908 			break;
2909 		}
2910 		err = ixl_init_rx_ring(que);
2911 		if (err) {
2912 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2913 			break;
2914 		}
2915 #ifdef DEV_NETMAP
2916 		/* preserve queue */
2917 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2918 			struct netmap_adapter *na = NA(vsi->ifp);
2919 			struct netmap_kring *kring = &na->rx_rings[i];
2920 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2921 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2922 		} else
2923 #endif /* DEV_NETMAP */
2924 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2925 	}
2926 	return (err);
2927 }
2928 
2929 
2930 /*********************************************************************
2931  *
2932  *  Free all VSI structs.
2933  *
2934  **********************************************************************/
2935 void
2936 ixl_free_vsi(struct ixl_vsi *vsi)
2937 {
2938 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2939 	struct ixl_queue	*que = vsi->queues;
2940 
2941 	/* Free station queues */
2942 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2943 		struct tx_ring *txr = &que->txr;
2944 		struct rx_ring *rxr = &que->rxr;
2945 
2946 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2947 			continue;
2948 		IXL_TX_LOCK(txr);
2949 		ixl_free_que_tx(que);
2950 		if (txr->base)
2951 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2952 		IXL_TX_UNLOCK(txr);
2953 		IXL_TX_LOCK_DESTROY(txr);
2954 
2955 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2956 			continue;
2957 		IXL_RX_LOCK(rxr);
2958 		ixl_free_que_rx(que);
2959 		if (rxr->base)
2960 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2961 		IXL_RX_UNLOCK(rxr);
2962 		IXL_RX_LOCK_DESTROY(rxr);
2963 
2964 	}
2965 	free(vsi->queues, M_DEVBUF);
2966 
2967 	/* Free VSI filter list */
2968 	ixl_free_mac_filters(vsi);
2969 }
2970 
2971 static void
2972 ixl_free_mac_filters(struct ixl_vsi *vsi)
2973 {
2974 	struct ixl_mac_filter *f;
2975 
2976 	while (!SLIST_EMPTY(&vsi->ftl)) {
2977 		f = SLIST_FIRST(&vsi->ftl);
2978 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2979 		free(f, M_DEVBUF);
2980 	}
2981 }
2982 
2983 
2984 /*********************************************************************
2985  *
2986  *  Allocate memory for the VSI (virtual station interface) and their
2987  *  associated queues, rings and the descriptors associated with each,
2988  *  called only once at attach.
2989  *
2990  **********************************************************************/
2991 static int
2992 ixl_setup_stations(struct ixl_pf *pf)
2993 {
2994 	device_t		dev = pf->dev;
2995 	struct ixl_vsi		*vsi;
2996 	struct ixl_queue	*que;
2997 	struct tx_ring		*txr;
2998 	struct rx_ring		*rxr;
2999 	int 			rsize, tsize;
3000 	int			error = I40E_SUCCESS;
3001 
3002 	vsi = &pf->vsi;
3003 	vsi->back = (void *)pf;
3004 	vsi->hw = &pf->hw;
3005 	vsi->id = 0;
3006 	vsi->num_vlans = 0;
3007 	vsi->back = pf;
3008 
3009 	/* Get memory for the station queues */
3010         if (!(vsi->queues =
3011             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
3012             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3013                 device_printf(dev, "Unable to allocate queue memory\n");
3014                 error = ENOMEM;
3015                 goto early;
3016         }
3017 
3018 	for (int i = 0; i < vsi->num_queues; i++) {
3019 		que = &vsi->queues[i];
3020 		que->num_desc = ixl_ringsz;
3021 		que->me = i;
3022 		que->vsi = vsi;
3023 		/* mark the queue as active */
3024 		vsi->active_queues |= (u64)1 << que->me;
3025 		txr = &que->txr;
3026 		txr->que = que;
3027 		txr->tail = I40E_QTX_TAIL(que->me);
3028 
3029 		/* Initialize the TX lock */
3030 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3031 		    device_get_nameunit(dev), que->me);
3032 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
3033 		/* Create the TX descriptor ring */
3034 		tsize = roundup2((que->num_desc *
3035 		    sizeof(struct i40e_tx_desc)) +
3036 		    sizeof(u32), DBA_ALIGN);
3037 		if (i40e_allocate_dma_mem(&pf->hw,
3038 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3039 			device_printf(dev,
3040 			    "Unable to allocate TX Descriptor memory\n");
3041 			error = ENOMEM;
3042 			goto fail;
3043 		}
3044 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3045 		bzero((void *)txr->base, tsize);
3046        		/* Now allocate transmit soft structs for the ring */
3047        		if (ixl_allocate_tx_data(que)) {
3048 			device_printf(dev,
3049 			    "Critical Failure setting up TX structures\n");
3050 			error = ENOMEM;
3051 			goto fail;
3052        		}
3053 		/* Allocate a buf ring */
3054 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3055 		    M_WAITOK, &txr->mtx);
3056 		if (txr->br == NULL) {
3057 			device_printf(dev,
3058 			    "Critical Failure setting up TX buf ring\n");
3059 			error = ENOMEM;
3060 			goto fail;
3061        		}
3062 
3063 		/*
3064 		 * Next the RX queues...
3065 		 */
3066 		rsize = roundup2(que->num_desc *
3067 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3068 		rxr = &que->rxr;
3069 		rxr->que = que;
3070 		rxr->tail = I40E_QRX_TAIL(que->me);
3071 
3072 		/* Initialize the RX side lock */
3073 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3074 		    device_get_nameunit(dev), que->me);
3075 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3076 
3077 		if (i40e_allocate_dma_mem(&pf->hw,
3078 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3079 			device_printf(dev,
3080 			    "Unable to allocate RX Descriptor memory\n");
3081 			error = ENOMEM;
3082 			goto fail;
3083 		}
3084 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3085 		bzero((void *)rxr->base, rsize);
3086 
3087         	/* Allocate receive soft structs for the ring*/
3088 		if (ixl_allocate_rx_data(que)) {
3089 			device_printf(dev,
3090 			    "Critical Failure setting up receive structs\n");
3091 			error = ENOMEM;
3092 			goto fail;
3093 		}
3094 	}
3095 
3096 	return (0);
3097 
3098 fail:
3099 	for (int i = 0; i < vsi->num_queues; i++) {
3100 		que = &vsi->queues[i];
3101 		rxr = &que->rxr;
3102 		txr = &que->txr;
3103 		if (rxr->base)
3104 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3105 		if (txr->base)
3106 			i40e_free_dma_mem(&pf->hw, &txr->dma);
3107 	}
3108 
3109 early:
3110 	return (error);
3111 }
3112 
3113 /*
3114 ** Provide a update to the queue RX
3115 ** interrupt moderation value.
3116 */
3117 static void
3118 ixl_set_queue_rx_itr(struct ixl_queue *que)
3119 {
3120 	struct ixl_vsi	*vsi = que->vsi;
3121 	struct i40e_hw	*hw = vsi->hw;
3122 	struct rx_ring	*rxr = &que->rxr;
3123 	u16		rx_itr;
3124 	u16		rx_latency = 0;
3125 	int		rx_bytes;
3126 
3127 
3128 	/* Idle, do nothing */
3129 	if (rxr->bytes == 0)
3130 		return;
3131 
3132 	if (ixl_dynamic_rx_itr) {
3133 		rx_bytes = rxr->bytes/rxr->itr;
3134 		rx_itr = rxr->itr;
3135 
3136 		/* Adjust latency range */
3137 		switch (rxr->latency) {
3138 		case IXL_LOW_LATENCY:
3139 			if (rx_bytes > 10) {
3140 				rx_latency = IXL_AVE_LATENCY;
3141 				rx_itr = IXL_ITR_20K;
3142 			}
3143 			break;
3144 		case IXL_AVE_LATENCY:
3145 			if (rx_bytes > 20) {
3146 				rx_latency = IXL_BULK_LATENCY;
3147 				rx_itr = IXL_ITR_8K;
3148 			} else if (rx_bytes <= 10) {
3149 				rx_latency = IXL_LOW_LATENCY;
3150 				rx_itr = IXL_ITR_100K;
3151 			}
3152 			break;
3153 		case IXL_BULK_LATENCY:
3154 			if (rx_bytes <= 20) {
3155 				rx_latency = IXL_AVE_LATENCY;
3156 				rx_itr = IXL_ITR_20K;
3157 			}
3158 			break;
3159        		 }
3160 
3161 		rxr->latency = rx_latency;
3162 
3163 		if (rx_itr != rxr->itr) {
3164 			/* do an exponential smoothing */
3165 			rx_itr = (10 * rx_itr * rxr->itr) /
3166 			    ((9 * rx_itr) + rxr->itr);
3167 			rxr->itr = rx_itr & IXL_MAX_ITR;
3168 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3169 			    que->me), rxr->itr);
3170 		}
3171 	} else { /* We may have have toggled to non-dynamic */
3172 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3173 			vsi->rx_itr_setting = ixl_rx_itr;
3174 		/* Update the hardware if needed */
3175 		if (rxr->itr != vsi->rx_itr_setting) {
3176 			rxr->itr = vsi->rx_itr_setting;
3177 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3178 			    que->me), rxr->itr);
3179 		}
3180 	}
3181 	rxr->bytes = 0;
3182 	rxr->packets = 0;
3183 	return;
3184 }
3185 
3186 
3187 /*
3188 ** Provide a update to the queue TX
3189 ** interrupt moderation value.
3190 */
3191 static void
3192 ixl_set_queue_tx_itr(struct ixl_queue *que)
3193 {
3194 	struct ixl_vsi	*vsi = que->vsi;
3195 	struct i40e_hw	*hw = vsi->hw;
3196 	struct tx_ring	*txr = &que->txr;
3197 	u16		tx_itr;
3198 	u16		tx_latency = 0;
3199 	int		tx_bytes;
3200 
3201 
3202 	/* Idle, do nothing */
3203 	if (txr->bytes == 0)
3204 		return;
3205 
3206 	if (ixl_dynamic_tx_itr) {
3207 		tx_bytes = txr->bytes/txr->itr;
3208 		tx_itr = txr->itr;
3209 
3210 		switch (txr->latency) {
3211 		case IXL_LOW_LATENCY:
3212 			if (tx_bytes > 10) {
3213 				tx_latency = IXL_AVE_LATENCY;
3214 				tx_itr = IXL_ITR_20K;
3215 			}
3216 			break;
3217 		case IXL_AVE_LATENCY:
3218 			if (tx_bytes > 20) {
3219 				tx_latency = IXL_BULK_LATENCY;
3220 				tx_itr = IXL_ITR_8K;
3221 			} else if (tx_bytes <= 10) {
3222 				tx_latency = IXL_LOW_LATENCY;
3223 				tx_itr = IXL_ITR_100K;
3224 			}
3225 			break;
3226 		case IXL_BULK_LATENCY:
3227 			if (tx_bytes <= 20) {
3228 				tx_latency = IXL_AVE_LATENCY;
3229 				tx_itr = IXL_ITR_20K;
3230 			}
3231 			break;
3232 		}
3233 
3234 		txr->latency = tx_latency;
3235 
3236 		if (tx_itr != txr->itr) {
3237        	         /* do an exponential smoothing */
3238 			tx_itr = (10 * tx_itr * txr->itr) /
3239 			    ((9 * tx_itr) + txr->itr);
3240 			txr->itr = tx_itr & IXL_MAX_ITR;
3241 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3242 			    que->me), txr->itr);
3243 		}
3244 
3245 	} else { /* We may have have toggled to non-dynamic */
3246 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3247 			vsi->tx_itr_setting = ixl_tx_itr;
3248 		/* Update the hardware if needed */
3249 		if (txr->itr != vsi->tx_itr_setting) {
3250 			txr->itr = vsi->tx_itr_setting;
3251 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3252 			    que->me), txr->itr);
3253 		}
3254 	}
3255 	txr->bytes = 0;
3256 	txr->packets = 0;
3257 	return;
3258 }
3259 
3260 #define QUEUE_NAME_LEN 32
3261 
3262 static void
3263 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3264     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3265 {
3266 	struct sysctl_oid *tree;
3267 	struct sysctl_oid_list *child;
3268 	struct sysctl_oid_list *vsi_list;
3269 
3270 	tree = device_get_sysctl_tree(pf->dev);
3271 	child = SYSCTL_CHILDREN(tree);
3272 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3273 				   CTLFLAG_RD, NULL, "VSI Number");
3274 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3275 
3276 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3277 }
3278 
3279 static void
3280 ixl_add_hw_stats(struct ixl_pf *pf)
3281 {
3282 	device_t dev = pf->dev;
3283 	struct ixl_vsi *vsi = &pf->vsi;
3284 	struct ixl_queue *queues = vsi->queues;
3285 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3286 
3287 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3288 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3289 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3290 	struct sysctl_oid_list *vsi_list;
3291 
3292 	struct sysctl_oid *queue_node;
3293 	struct sysctl_oid_list *queue_list;
3294 
3295 	struct tx_ring *txr;
3296 	struct rx_ring *rxr;
3297 	char queue_namebuf[QUEUE_NAME_LEN];
3298 
3299 	/* Driver statistics */
3300 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3301 			CTLFLAG_RD, &pf->watchdog_events,
3302 			"Watchdog timeouts");
3303 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3304 			CTLFLAG_RD, &pf->admin_irq,
3305 			"Admin Queue IRQ Handled");
3306 
3307 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3308 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3309 
3310 	/* Queue statistics */
3311 	for (int q = 0; q < vsi->num_queues; q++) {
3312 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3313 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3314 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3315 		queue_list = SYSCTL_CHILDREN(queue_node);
3316 
3317 		txr = &(queues[q].txr);
3318 		rxr = &(queues[q].rxr);
3319 
3320 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3321 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3322 				"m_defrag() failed");
3323 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3324 				CTLFLAG_RD, &(queues[q].dropped_pkts),
3325 				"Driver dropped packets");
3326 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3327 				CTLFLAG_RD, &(queues[q].irqs),
3328 				"irqs on this queue");
3329 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3330 				CTLFLAG_RD, &(queues[q].tso),
3331 				"TSO");
3332 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3333 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3334 				"Driver tx dma failure in xmit");
3335 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3336 				CTLFLAG_RD, &(txr->no_desc),
3337 				"Queue No Descriptor Available");
3338 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3339 				CTLFLAG_RD, &(txr->total_packets),
3340 				"Queue Packets Transmitted");
3341 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3342 				CTLFLAG_RD, &(txr->tx_bytes),
3343 				"Queue Bytes Transmitted");
3344 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3345 				CTLFLAG_RD, &(rxr->rx_packets),
3346 				"Queue Packets Received");
3347 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3348 				CTLFLAG_RD, &(rxr->rx_bytes),
3349 				"Queue Bytes Received");
3350 	}
3351 
3352 	/* MAC stats */
3353 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3354 }
3355 
3356 static void
3357 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3358 	struct sysctl_oid_list *child,
3359 	struct i40e_eth_stats *eth_stats)
3360 {
3361 	struct ixl_sysctl_info ctls[] =
3362 	{
3363 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3364 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3365 			"Unicast Packets Received"},
3366 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3367 			"Multicast Packets Received"},
3368 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3369 			"Broadcast Packets Received"},
3370 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3371 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3372 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3373 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3374 			"Multicast Packets Transmitted"},
3375 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3376 			"Broadcast Packets Transmitted"},
3377 		// end
3378 		{0,0,0}
3379 	};
3380 
3381 	struct ixl_sysctl_info *entry = ctls;
3382 	while (entry->stat != 0)
3383 	{
3384 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3385 				CTLFLAG_RD, entry->stat,
3386 				entry->description);
3387 		entry++;
3388 	}
3389 }
3390 
3391 static void
3392 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3393 	struct sysctl_oid_list *child,
3394 	struct i40e_hw_port_stats *stats)
3395 {
3396 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3397 				    CTLFLAG_RD, NULL, "Mac Statistics");
3398 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3399 
3400 	struct i40e_eth_stats *eth_stats = &stats->eth;
3401 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3402 
3403 	struct ixl_sysctl_info ctls[] =
3404 	{
3405 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3406 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3407 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3408 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3409 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3410 		/* Packet Reception Stats */
3411 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3412 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3413 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3414 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3415 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3416 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3417 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3418 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3419 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3420 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3421 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3422 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3423 		/* Packet Transmission Stats */
3424 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3425 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3426 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3427 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3428 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3429 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3430 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3431 		/* Flow control */
3432 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3433 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3434 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3435 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3436 		/* End */
3437 		{0,0,0}
3438 	};
3439 
3440 	struct ixl_sysctl_info *entry = ctls;
3441 	while (entry->stat != 0)
3442 	{
3443 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3444 				CTLFLAG_RD, entry->stat,
3445 				entry->description);
3446 		entry++;
3447 	}
3448 }
3449 
3450 
3451 /*
3452 ** ixl_config_rss - setup RSS
3453 **  - note this is done for the single vsi
3454 */
3455 static void ixl_config_rss(struct ixl_vsi *vsi)
3456 {
3457 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3458 	struct i40e_hw	*hw = vsi->hw;
3459 	u32		lut = 0;
3460 	u64		set_hena = 0, hena;
3461 	int		i, j, que_id;
3462 #ifdef RSS
3463 	u32		rss_hash_config;
3464 	u32		rss_seed[IXL_KEYSZ];
3465 #else
3466 	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3467 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3468 			    0x35897377, 0x328b25e1, 0x4fa98922,
3469 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3470 #endif
3471 
3472 #ifdef RSS
3473         /* Fetch the configured RSS key */
3474         rss_getkey((uint8_t *) &rss_seed);
3475 #endif
3476 
3477 	/* Fill out hash function seed */
3478 	for (i = 0; i < IXL_KEYSZ; i++)
3479                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3480 
3481 	/* Enable PCTYPES for RSS: */
3482 #ifdef RSS
3483 	rss_hash_config = rss_gethashconfig();
3484 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3485                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3486 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3487                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3488 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3489                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3490 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3491                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3492 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3493 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3494 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3495                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3496         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3497                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3498 #else
3499 	set_hena =
3500 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3501 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3502 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3503 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3504 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3505 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3506 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3507 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3508 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3509 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3510 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3511 #endif
3512 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3513 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3514 	hena |= set_hena;
3515 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3516 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3517 
3518 	/* Populate the LUT with max no. of queues in round robin fashion */
3519 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3520 		if (j == vsi->num_queues)
3521 			j = 0;
3522 #ifdef RSS
3523 		/*
3524 		 * Fetch the RSS bucket id for the given indirection entry.
3525 		 * Cap it at the number of configured buckets (which is
3526 		 * num_queues.)
3527 		 */
3528 		que_id = rss_get_indirection_to_bucket(i);
3529 		que_id = que_id % vsi->num_queues;
3530 #else
3531 		que_id = j;
3532 #endif
3533 		/* lut = 4-byte sliding window of 4 lut entries */
3534 		lut = (lut << 8) | (que_id &
3535 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3536 		/* On i = 3, we have 4 entries in lut; write to the register */
3537 		if ((i & 3) == 3)
3538 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3539 	}
3540 	ixl_flush(hw);
3541 }
3542 
3543 
3544 /*
3545 ** This routine is run via an vlan config EVENT,
3546 ** it enables us to use the HW Filter table since
3547 ** we can get the vlan id. This just creates the
3548 ** entry in the soft version of the VFTA, init will
3549 ** repopulate the real table.
3550 */
3551 static void
3552 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3553 {
3554 	struct ixl_vsi	*vsi = ifp->if_softc;
3555 	struct i40e_hw	*hw = vsi->hw;
3556 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3557 
3558 	if (ifp->if_softc !=  arg)   /* Not our event */
3559 		return;
3560 
3561 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3562 		return;
3563 
3564 	IXL_PF_LOCK(pf);
3565 	++vsi->num_vlans;
3566 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3567 	IXL_PF_UNLOCK(pf);
3568 }
3569 
3570 /*
3571 ** This routine is run via an vlan
3572 ** unconfig EVENT, remove our entry
3573 ** in the soft vfta.
3574 */
3575 static void
3576 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3577 {
3578 	struct ixl_vsi	*vsi = ifp->if_softc;
3579 	struct i40e_hw	*hw = vsi->hw;
3580 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3581 
3582 	if (ifp->if_softc !=  arg)
3583 		return;
3584 
3585 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3586 		return;
3587 
3588 	IXL_PF_LOCK(pf);
3589 	--vsi->num_vlans;
3590 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3591 	IXL_PF_UNLOCK(pf);
3592 }
3593 
3594 /*
3595 ** This routine updates vlan filters, called by init
3596 ** it scans the filter table and then updates the hw
3597 ** after a soft reset.
3598 */
3599 static void
3600 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3601 {
3602 	struct ixl_mac_filter	*f;
3603 	int			cnt = 0, flags;
3604 
3605 	if (vsi->num_vlans == 0)
3606 		return;
3607 	/*
3608 	** Scan the filter list for vlan entries,
3609 	** mark them for addition and then call
3610 	** for the AQ update.
3611 	*/
3612 	SLIST_FOREACH(f, &vsi->ftl, next) {
3613 		if (f->flags & IXL_FILTER_VLAN) {
3614 			f->flags |=
3615 			    (IXL_FILTER_ADD |
3616 			    IXL_FILTER_USED);
3617 			cnt++;
3618 		}
3619 	}
3620 	if (cnt == 0) {
3621 		printf("setup vlan: no filters found!\n");
3622 		return;
3623 	}
3624 	flags = IXL_FILTER_VLAN;
3625 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3626 	ixl_add_hw_filters(vsi, flags, cnt);
3627 	return;
3628 }
3629 
3630 /*
3631 ** Initialize filter list and add filters that the hardware
3632 ** needs to know about.
3633 */
3634 static void
3635 ixl_init_filters(struct ixl_vsi *vsi)
3636 {
3637 	/* Add broadcast address */
3638 	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3639 }
3640 
3641 /*
3642 ** This routine adds mulicast filters
3643 */
3644 static void
3645 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3646 {
3647 	struct ixl_mac_filter *f;
3648 
3649 	/* Does one already exist */
3650 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3651 	if (f != NULL)
3652 		return;
3653 
3654 	f = ixl_get_filter(vsi);
3655 	if (f == NULL) {
3656 		printf("WARNING: no filter available!!\n");
3657 		return;
3658 	}
3659 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3660 	f->vlan = IXL_VLAN_ANY;
3661 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3662 	    | IXL_FILTER_MC);
3663 
3664 	return;
3665 }
3666 
3667 static void
3668 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3669 {
3670 
3671 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3672 }
3673 
3674 /*
3675 ** This routine adds macvlan filters
3676 */
3677 static void
3678 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3679 {
3680 	struct ixl_mac_filter	*f, *tmp;
3681 	struct ixl_pf		*pf;
3682 	device_t		dev;
3683 
3684 	DEBUGOUT("ixl_add_filter: begin");
3685 
3686 	pf = vsi->back;
3687 	dev = pf->dev;
3688 
3689 	/* Does one already exist */
3690 	f = ixl_find_filter(vsi, macaddr, vlan);
3691 	if (f != NULL)
3692 		return;
3693 	/*
3694 	** Is this the first vlan being registered, if so we
3695 	** need to remove the ANY filter that indicates we are
3696 	** not in a vlan, and replace that with a 0 filter.
3697 	*/
3698 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3699 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3700 		if (tmp != NULL) {
3701 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3702 			ixl_add_filter(vsi, macaddr, 0);
3703 		}
3704 	}
3705 
3706 	f = ixl_get_filter(vsi);
3707 	if (f == NULL) {
3708 		device_printf(dev, "WARNING: no filter available!!\n");
3709 		return;
3710 	}
3711 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3712 	f->vlan = vlan;
3713 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3714 	if (f->vlan != IXL_VLAN_ANY)
3715 		f->flags |= IXL_FILTER_VLAN;
3716 	else
3717 		vsi->num_macs++;
3718 
3719 	ixl_add_hw_filters(vsi, f->flags, 1);
3720 	return;
3721 }
3722 
3723 static void
3724 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3725 {
3726 	struct ixl_mac_filter *f;
3727 
3728 	f = ixl_find_filter(vsi, macaddr, vlan);
3729 	if (f == NULL)
3730 		return;
3731 
3732 	f->flags |= IXL_FILTER_DEL;
3733 	ixl_del_hw_filters(vsi, 1);
3734 	vsi->num_macs--;
3735 
3736 	/* Check if this is the last vlan removal */
3737 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3738 		/* Switch back to a non-vlan filter */
3739 		ixl_del_filter(vsi, macaddr, 0);
3740 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3741 	}
3742 	return;
3743 }
3744 
3745 /*
3746 ** Find the filter with both matching mac addr and vlan id
3747 */
3748 static struct ixl_mac_filter *
3749 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3750 {
3751 	struct ixl_mac_filter	*f;
3752 	bool			match = FALSE;
3753 
3754 	SLIST_FOREACH(f, &vsi->ftl, next) {
3755 		if (!cmp_etheraddr(f->macaddr, macaddr))
3756 			continue;
3757 		if (f->vlan == vlan) {
3758 			match = TRUE;
3759 			break;
3760 		}
3761 	}
3762 
3763 	if (!match)
3764 		f = NULL;
3765 	return (f);
3766 }
3767 
3768 /*
3769 ** This routine takes additions to the vsi filter
3770 ** table and creates an Admin Queue call to create
3771 ** the filters in the hardware.
3772 */
3773 static void
3774 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3775 {
3776 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3777 	struct ixl_mac_filter	*f;
3778 	struct ixl_pf		*pf;
3779 	struct i40e_hw		*hw;
3780 	device_t		dev;
3781 	int			err, j = 0;
3782 
3783 	pf = vsi->back;
3784 	dev = pf->dev;
3785 	hw = &pf->hw;
3786 	IXL_PF_LOCK_ASSERT(pf);
3787 
3788 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3789 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3790 	if (a == NULL) {
3791 		device_printf(dev, "add_hw_filters failed to get memory\n");
3792 		return;
3793 	}
3794 
3795 	/*
3796 	** Scan the filter list, each time we find one
3797 	** we add it to the admin queue array and turn off
3798 	** the add bit.
3799 	*/
3800 	SLIST_FOREACH(f, &vsi->ftl, next) {
3801 		if (f->flags == flags) {
3802 			b = &a[j]; // a pox on fvl long names :)
3803 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3804 			if (f->vlan == IXL_VLAN_ANY) {
3805 				b->vlan_tag = 0;
3806 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3807 			} else {
3808 				b->vlan_tag = f->vlan;
3809 				b->flags = 0;
3810 			}
3811 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3812 			f->flags &= ~IXL_FILTER_ADD;
3813 			j++;
3814 		}
3815 		if (j == cnt)
3816 			break;
3817 	}
3818 	if (j > 0) {
3819 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3820 		if (err)
3821 			device_printf(dev, "aq_add_macvlan err %d, "
3822 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3823 		else
3824 			vsi->hw_filters_add += j;
3825 	}
3826 	free(a, M_DEVBUF);
3827 	return;
3828 }
3829 
3830 /*
3831 ** This routine takes removals in the vsi filter
3832 ** table and creates an Admin Queue call to delete
3833 ** the filters in the hardware.
3834 */
3835 static void
3836 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3837 {
3838 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3839 	struct ixl_pf		*pf;
3840 	struct i40e_hw		*hw;
3841 	device_t		dev;
3842 	struct ixl_mac_filter	*f, *f_temp;
3843 	int			err, j = 0;
3844 
3845 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3846 
3847 	pf = vsi->back;
3848 	hw = &pf->hw;
3849 	dev = pf->dev;
3850 
3851 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3852 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3853 	if (d == NULL) {
3854 		printf("del hw filter failed to get memory\n");
3855 		return;
3856 	}
3857 
3858 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3859 		if (f->flags & IXL_FILTER_DEL) {
3860 			e = &d[j]; // a pox on fvl long names :)
3861 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3862 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3863 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3864 			/* delete entry from vsi list */
3865 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3866 			free(f, M_DEVBUF);
3867 			j++;
3868 		}
3869 		if (j == cnt)
3870 			break;
3871 	}
3872 	if (j > 0) {
3873 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3874 		/* NOTE: returns ENOENT every time but seems to work fine,
3875 		   so we'll ignore that specific error. */
3876 		// TODO: Does this still occur on current firmwares?
3877 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3878 			int sc = 0;
3879 			for (int i = 0; i < j; i++)
3880 				sc += (!d[i].error_code);
3881 			vsi->hw_filters_del += sc;
3882 			device_printf(dev,
3883 			    "Failed to remove %d/%d filters, aq error %d\n",
3884 			    j - sc, j, hw->aq.asq_last_status);
3885 		} else
3886 			vsi->hw_filters_del += j;
3887 	}
3888 	free(d, M_DEVBUF);
3889 
3890 	DEBUGOUT("ixl_del_hw_filters: end\n");
3891 	return;
3892 }
3893 
3894 static int
3895 ixl_enable_rings(struct ixl_vsi *vsi)
3896 {
3897 	struct ixl_pf	*pf = vsi->back;
3898 	struct i40e_hw	*hw = &pf->hw;
3899 	int		index, error;
3900 	u32		reg;
3901 
3902 	error = 0;
3903 	for (int i = 0; i < vsi->num_queues; i++) {
3904 		index = vsi->first_queue + i;
3905 		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3906 
3907 		reg = rd32(hw, I40E_QTX_ENA(index));
3908 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3909 		    I40E_QTX_ENA_QENA_STAT_MASK;
3910 		wr32(hw, I40E_QTX_ENA(index), reg);
3911 		/* Verify the enable took */
3912 		for (int j = 0; j < 10; j++) {
3913 			reg = rd32(hw, I40E_QTX_ENA(index));
3914 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3915 				break;
3916 			i40e_msec_delay(10);
3917 		}
3918 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3919 			device_printf(pf->dev, "TX queue %d disabled!\n",
3920 			    index);
3921 			error = ETIMEDOUT;
3922 		}
3923 
3924 		reg = rd32(hw, I40E_QRX_ENA(index));
3925 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3926 		    I40E_QRX_ENA_QENA_STAT_MASK;
3927 		wr32(hw, I40E_QRX_ENA(index), reg);
3928 		/* Verify the enable took */
3929 		for (int j = 0; j < 10; j++) {
3930 			reg = rd32(hw, I40E_QRX_ENA(index));
3931 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3932 				break;
3933 			i40e_msec_delay(10);
3934 		}
3935 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3936 			device_printf(pf->dev, "RX queue %d disabled!\n",
3937 			    index);
3938 			error = ETIMEDOUT;
3939 		}
3940 	}
3941 
3942 	return (error);
3943 }
3944 
3945 static int
3946 ixl_disable_rings(struct ixl_vsi *vsi)
3947 {
3948 	struct ixl_pf	*pf = vsi->back;
3949 	struct i40e_hw	*hw = &pf->hw;
3950 	int		index, error;
3951 	u32		reg;
3952 
3953 	error = 0;
3954 	for (int i = 0; i < vsi->num_queues; i++) {
3955 		index = vsi->first_queue + i;
3956 
3957 		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3958 		i40e_usec_delay(500);
3959 
3960 		reg = rd32(hw, I40E_QTX_ENA(index));
3961 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3962 		wr32(hw, I40E_QTX_ENA(index), reg);
3963 		/* Verify the disable took */
3964 		for (int j = 0; j < 10; j++) {
3965 			reg = rd32(hw, I40E_QTX_ENA(index));
3966 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3967 				break;
3968 			i40e_msec_delay(10);
3969 		}
3970 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3971 			device_printf(pf->dev, "TX queue %d still enabled!\n",
3972 			    index);
3973 			error = ETIMEDOUT;
3974 		}
3975 
3976 		reg = rd32(hw, I40E_QRX_ENA(index));
3977 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3978 		wr32(hw, I40E_QRX_ENA(index), reg);
3979 		/* Verify the disable took */
3980 		for (int j = 0; j < 10; j++) {
3981 			reg = rd32(hw, I40E_QRX_ENA(index));
3982 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3983 				break;
3984 			i40e_msec_delay(10);
3985 		}
3986 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3987 			device_printf(pf->dev, "RX queue %d still enabled!\n",
3988 			    index);
3989 			error = ETIMEDOUT;
3990 		}
3991 	}
3992 
3993 	return (error);
3994 }
3995 
3996 /**
3997  * ixl_handle_mdd_event
3998  *
3999  * Called from interrupt handler to identify possibly malicious vfs
4000  * (But also detects events from the PF, as well)
4001  **/
4002 static void ixl_handle_mdd_event(struct ixl_pf *pf)
4003 {
4004 	struct i40e_hw *hw = &pf->hw;
4005 	device_t dev = pf->dev;
4006 	bool mdd_detected = false;
4007 	bool pf_mdd_detected = false;
4008 	u32 reg;
4009 
4010 	/* find what triggered the MDD event */
4011 	reg = rd32(hw, I40E_GL_MDET_TX);
4012 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4013 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
4014 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
4015 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4016 				I40E_GL_MDET_TX_EVENT_SHIFT;
4017 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
4018 				I40E_GL_MDET_TX_QUEUE_SHIFT;
4019 		device_printf(dev,
4020 			 "Malicious Driver Detection event 0x%02x"
4021 			 " on TX queue %d pf number 0x%02x\n",
4022 			 event, queue, pf_num);
4023 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4024 		mdd_detected = true;
4025 	}
4026 	reg = rd32(hw, I40E_GL_MDET_RX);
4027 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4028 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
4029 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
4030 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4031 				I40E_GL_MDET_RX_EVENT_SHIFT;
4032 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
4033 				I40E_GL_MDET_RX_QUEUE_SHIFT;
4034 		device_printf(dev,
4035 			 "Malicious Driver Detection event 0x%02x"
4036 			 " on RX queue %d of function 0x%02x\n",
4037 			 event, queue, func);
4038 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4039 		mdd_detected = true;
4040 	}
4041 
4042 	if (mdd_detected) {
4043 		reg = rd32(hw, I40E_PF_MDET_TX);
4044 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4045 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4046 			device_printf(dev,
4047 				 "MDD TX event is for this function 0x%08x",
4048 				 reg);
4049 			pf_mdd_detected = true;
4050 		}
4051 		reg = rd32(hw, I40E_PF_MDET_RX);
4052 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4053 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4054 			device_printf(dev,
4055 				 "MDD RX event is for this function 0x%08x",
4056 				 reg);
4057 			pf_mdd_detected = true;
4058 		}
4059 	}
4060 
4061 	/* re-enable mdd interrupt cause */
4062 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4063 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4064 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4065 	ixl_flush(hw);
4066 }
4067 
4068 static void
4069 ixl_enable_intr(struct ixl_vsi *vsi)
4070 {
4071 	struct i40e_hw		*hw = vsi->hw;
4072 	struct ixl_queue	*que = vsi->queues;
4073 
4074 	if (ixl_enable_msix) {
4075 		ixl_enable_adminq(hw);
4076 		for (int i = 0; i < vsi->num_queues; i++, que++)
4077 			ixl_enable_queue(hw, que->me);
4078 	} else
4079 		ixl_enable_legacy(hw);
4080 }
4081 
4082 static void
4083 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4084 {
4085 	struct i40e_hw		*hw = vsi->hw;
4086 	struct ixl_queue	*que = vsi->queues;
4087 
4088 	for (int i = 0; i < vsi->num_queues; i++, que++)
4089 		ixl_disable_queue(hw, que->me);
4090 }
4091 
4092 static void
4093 ixl_disable_intr(struct ixl_vsi *vsi)
4094 {
4095 	struct i40e_hw		*hw = vsi->hw;
4096 
4097 	if (ixl_enable_msix)
4098 		ixl_disable_adminq(hw);
4099 	else
4100 		ixl_disable_legacy(hw);
4101 }
4102 
4103 static void
4104 ixl_enable_adminq(struct i40e_hw *hw)
4105 {
4106 	u32		reg;
4107 
4108 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4109 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4110 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4111 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4112 	ixl_flush(hw);
4113 	return;
4114 }
4115 
4116 static void
4117 ixl_disable_adminq(struct i40e_hw *hw)
4118 {
4119 	u32		reg;
4120 
4121 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4122 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4123 
4124 	return;
4125 }
4126 
4127 static void
4128 ixl_enable_queue(struct i40e_hw *hw, int id)
4129 {
4130 	u32		reg;
4131 
4132 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4133 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4134 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4135 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4136 }
4137 
4138 static void
4139 ixl_disable_queue(struct i40e_hw *hw, int id)
4140 {
4141 	u32		reg;
4142 
4143 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4144 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4145 
4146 	return;
4147 }
4148 
4149 static void
4150 ixl_enable_legacy(struct i40e_hw *hw)
4151 {
4152 	u32		reg;
4153 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4154 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4155 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4156 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4157 }
4158 
4159 static void
4160 ixl_disable_legacy(struct i40e_hw *hw)
4161 {
4162 	u32		reg;
4163 
4164 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4165 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4166 
4167 	return;
4168 }
4169 
4170 static void
4171 ixl_update_stats_counters(struct ixl_pf *pf)
4172 {
4173 	struct i40e_hw	*hw = &pf->hw;
4174 	struct ixl_vsi	*vsi = &pf->vsi;
4175 	struct ixl_vf	*vf;
4176 
4177 	struct i40e_hw_port_stats *nsd = &pf->stats;
4178 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4179 
4180 	/* Update hw stats */
4181 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4182 			   pf->stat_offsets_loaded,
4183 			   &osd->crc_errors, &nsd->crc_errors);
4184 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4185 			   pf->stat_offsets_loaded,
4186 			   &osd->illegal_bytes, &nsd->illegal_bytes);
4187 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4188 			   I40E_GLPRT_GORCL(hw->port),
4189 			   pf->stat_offsets_loaded,
4190 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4191 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4192 			   I40E_GLPRT_GOTCL(hw->port),
4193 			   pf->stat_offsets_loaded,
4194 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4195 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4196 			   pf->stat_offsets_loaded,
4197 			   &osd->eth.rx_discards,
4198 			   &nsd->eth.rx_discards);
4199 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4200 			   I40E_GLPRT_UPRCL(hw->port),
4201 			   pf->stat_offsets_loaded,
4202 			   &osd->eth.rx_unicast,
4203 			   &nsd->eth.rx_unicast);
4204 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4205 			   I40E_GLPRT_UPTCL(hw->port),
4206 			   pf->stat_offsets_loaded,
4207 			   &osd->eth.tx_unicast,
4208 			   &nsd->eth.tx_unicast);
4209 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4210 			   I40E_GLPRT_MPRCL(hw->port),
4211 			   pf->stat_offsets_loaded,
4212 			   &osd->eth.rx_multicast,
4213 			   &nsd->eth.rx_multicast);
4214 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4215 			   I40E_GLPRT_MPTCL(hw->port),
4216 			   pf->stat_offsets_loaded,
4217 			   &osd->eth.tx_multicast,
4218 			   &nsd->eth.tx_multicast);
4219 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4220 			   I40E_GLPRT_BPRCL(hw->port),
4221 			   pf->stat_offsets_loaded,
4222 			   &osd->eth.rx_broadcast,
4223 			   &nsd->eth.rx_broadcast);
4224 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4225 			   I40E_GLPRT_BPTCL(hw->port),
4226 			   pf->stat_offsets_loaded,
4227 			   &osd->eth.tx_broadcast,
4228 			   &nsd->eth.tx_broadcast);
4229 
4230 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4231 			   pf->stat_offsets_loaded,
4232 			   &osd->tx_dropped_link_down,
4233 			   &nsd->tx_dropped_link_down);
4234 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4235 			   pf->stat_offsets_loaded,
4236 			   &osd->mac_local_faults,
4237 			   &nsd->mac_local_faults);
4238 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4239 			   pf->stat_offsets_loaded,
4240 			   &osd->mac_remote_faults,
4241 			   &nsd->mac_remote_faults);
4242 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4243 			   pf->stat_offsets_loaded,
4244 			   &osd->rx_length_errors,
4245 			   &nsd->rx_length_errors);
4246 
4247 	/* Flow control (LFC) stats */
4248 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4249 			   pf->stat_offsets_loaded,
4250 			   &osd->link_xon_rx, &nsd->link_xon_rx);
4251 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4252 			   pf->stat_offsets_loaded,
4253 			   &osd->link_xon_tx, &nsd->link_xon_tx);
4254 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4255 			   pf->stat_offsets_loaded,
4256 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4257 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4258 			   pf->stat_offsets_loaded,
4259 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4260 
4261 	/* Packet size stats rx */
4262 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4263 			   I40E_GLPRT_PRC64L(hw->port),
4264 			   pf->stat_offsets_loaded,
4265 			   &osd->rx_size_64, &nsd->rx_size_64);
4266 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4267 			   I40E_GLPRT_PRC127L(hw->port),
4268 			   pf->stat_offsets_loaded,
4269 			   &osd->rx_size_127, &nsd->rx_size_127);
4270 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4271 			   I40E_GLPRT_PRC255L(hw->port),
4272 			   pf->stat_offsets_loaded,
4273 			   &osd->rx_size_255, &nsd->rx_size_255);
4274 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4275 			   I40E_GLPRT_PRC511L(hw->port),
4276 			   pf->stat_offsets_loaded,
4277 			   &osd->rx_size_511, &nsd->rx_size_511);
4278 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4279 			   I40E_GLPRT_PRC1023L(hw->port),
4280 			   pf->stat_offsets_loaded,
4281 			   &osd->rx_size_1023, &nsd->rx_size_1023);
4282 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4283 			   I40E_GLPRT_PRC1522L(hw->port),
4284 			   pf->stat_offsets_loaded,
4285 			   &osd->rx_size_1522, &nsd->rx_size_1522);
4286 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4287 			   I40E_GLPRT_PRC9522L(hw->port),
4288 			   pf->stat_offsets_loaded,
4289 			   &osd->rx_size_big, &nsd->rx_size_big);
4290 
4291 	/* Packet size stats tx */
4292 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4293 			   I40E_GLPRT_PTC64L(hw->port),
4294 			   pf->stat_offsets_loaded,
4295 			   &osd->tx_size_64, &nsd->tx_size_64);
4296 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4297 			   I40E_GLPRT_PTC127L(hw->port),
4298 			   pf->stat_offsets_loaded,
4299 			   &osd->tx_size_127, &nsd->tx_size_127);
4300 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4301 			   I40E_GLPRT_PTC255L(hw->port),
4302 			   pf->stat_offsets_loaded,
4303 			   &osd->tx_size_255, &nsd->tx_size_255);
4304 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4305 			   I40E_GLPRT_PTC511L(hw->port),
4306 			   pf->stat_offsets_loaded,
4307 			   &osd->tx_size_511, &nsd->tx_size_511);
4308 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4309 			   I40E_GLPRT_PTC1023L(hw->port),
4310 			   pf->stat_offsets_loaded,
4311 			   &osd->tx_size_1023, &nsd->tx_size_1023);
4312 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4313 			   I40E_GLPRT_PTC1522L(hw->port),
4314 			   pf->stat_offsets_loaded,
4315 			   &osd->tx_size_1522, &nsd->tx_size_1522);
4316 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4317 			   I40E_GLPRT_PTC9522L(hw->port),
4318 			   pf->stat_offsets_loaded,
4319 			   &osd->tx_size_big, &nsd->tx_size_big);
4320 
4321 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4322 			   pf->stat_offsets_loaded,
4323 			   &osd->rx_undersize, &nsd->rx_undersize);
4324 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4325 			   pf->stat_offsets_loaded,
4326 			   &osd->rx_fragments, &nsd->rx_fragments);
4327 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4328 			   pf->stat_offsets_loaded,
4329 			   &osd->rx_oversize, &nsd->rx_oversize);
4330 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4331 			   pf->stat_offsets_loaded,
4332 			   &osd->rx_jabber, &nsd->rx_jabber);
4333 	pf->stat_offsets_loaded = true;
4334 	/* End hw stats */
4335 
4336 	/* Update vsi stats */
4337 	ixl_update_vsi_stats(vsi);
4338 
4339 	for (int i = 0; i < pf->num_vfs; i++) {
4340 		vf = &pf->vfs[i];
4341 		if (vf->vf_flags & VF_FLAG_ENABLED)
4342 			ixl_update_eth_stats(&pf->vfs[i].vsi);
4343 	}
4344 }
4345 
4346 /*
4347 ** Tasklet handler for MSIX Adminq interrupts
4348 **  - do outside interrupt since it might sleep
4349 */
4350 static void
4351 ixl_do_adminq(void *context, int pending)
4352 {
4353 	struct ixl_pf			*pf = context;
4354 	struct i40e_hw			*hw = &pf->hw;
4355 	struct ixl_vsi			*vsi = &pf->vsi;
4356 	struct i40e_arq_event_info	event;
4357 	i40e_status			ret;
4358 	u32				reg, loop = 0;
4359 	u16				opcode, result;
4360 
4361 	event.buf_len = IXL_AQ_BUF_SZ;
4362 	event.msg_buf = malloc(event.buf_len,
4363 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4364 	if (!event.msg_buf) {
4365 		printf("Unable to allocate adminq memory\n");
4366 		return;
4367 	}
4368 
4369 	IXL_PF_LOCK(pf);
4370 	/* clean and process any events */
4371 	do {
4372 		ret = i40e_clean_arq_element(hw, &event, &result);
4373 		if (ret)
4374 			break;
4375 		opcode = LE16_TO_CPU(event.desc.opcode);
4376 		switch (opcode) {
4377 		case i40e_aqc_opc_get_link_status:
4378 			ixl_link_event(pf, &event);
4379 			ixl_update_link_status(pf);
4380 			break;
4381 		case i40e_aqc_opc_send_msg_to_pf:
4382 #ifdef PCI_IOV
4383 			ixl_handle_vf_msg(pf, &event);
4384 #endif
4385 			break;
4386 		case i40e_aqc_opc_event_lan_overflow:
4387 			break;
4388 		default:
4389 #ifdef IXL_DEBUG
4390 			printf("AdminQ unknown event %x\n", opcode);
4391 #endif
4392 			break;
4393 		}
4394 
4395 	} while (result && (loop++ < IXL_ADM_LIMIT));
4396 
4397 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4398 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4399 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4400 	free(event.msg_buf, M_DEVBUF);
4401 
4402 	/*
4403 	 * If there are still messages to process, reschedule ourselves.
4404 	 * Otherwise, re-enable our interrupt and go to sleep.
4405 	 */
4406 	if (result > 0)
4407 		taskqueue_enqueue(pf->tq, &pf->adminq);
4408 	else
4409 		ixl_enable_intr(vsi);
4410 
4411 	IXL_PF_UNLOCK(pf);
4412 }
4413 
4414 #ifdef IXL_DEBUG_SYSCTL
4415 static int
4416 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4417 {
4418 	struct ixl_pf	*pf;
4419 	int		error, input = 0;
4420 
4421 	error = sysctl_handle_int(oidp, &input, 0, req);
4422 
4423 	if (error || !req->newptr)
4424 		return (error);
4425 
4426 	if (input == 1) {
4427 		pf = (struct ixl_pf *)arg1;
4428 		ixl_print_debug_info(pf);
4429 	}
4430 
4431 	return (error);
4432 }
4433 
4434 static void
4435 ixl_print_debug_info(struct ixl_pf *pf)
4436 {
4437 	struct i40e_hw		*hw = &pf->hw;
4438 	struct ixl_vsi		*vsi = &pf->vsi;
4439 	struct ixl_queue	*que = vsi->queues;
4440 	struct rx_ring		*rxr = &que->rxr;
4441 	struct tx_ring		*txr = &que->txr;
4442 	u32			reg;
4443 
4444 
4445 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4446 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4447 	printf("RX next check = %x\n", rxr->next_check);
4448 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4449 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4450 	printf("TX desc avail = %x\n", txr->avail);
4451 
4452 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4453 	 printf("RX Bytes = %x\n", reg);
4454 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4455 	 printf("Port RX Bytes = %x\n", reg);
4456 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4457 	 printf("RX discard = %x\n", reg);
4458 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4459 	 printf("Port RX discard = %x\n", reg);
4460 
4461 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4462 	 printf("TX errors = %x\n", reg);
4463 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4464 	 printf("TX Bytes = %x\n", reg);
4465 
4466 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4467 	 printf("RX undersize = %x\n", reg);
4468 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4469 	 printf("RX fragments = %x\n", reg);
4470 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4471 	 printf("RX oversize = %x\n", reg);
4472 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4473 	 printf("RX length error = %x\n", reg);
4474 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4475 	 printf("mac remote fault = %x\n", reg);
4476 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4477 	 printf("mac local fault = %x\n", reg);
4478 }
4479 #endif
4480 
4481 /**
4482  * Update VSI-specific ethernet statistics counters.
4483  **/
4484 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4485 {
4486 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4487 	struct i40e_hw *hw = &pf->hw;
4488 	struct i40e_eth_stats *es;
4489 	struct i40e_eth_stats *oes;
4490 	struct i40e_hw_port_stats *nsd;
4491 	u16 stat_idx = vsi->info.stat_counter_idx;
4492 
4493 	es = &vsi->eth_stats;
4494 	oes = &vsi->eth_stats_offsets;
4495 	nsd = &pf->stats;
4496 
4497 	/* Gather up the stats that the hw collects */
4498 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4499 			   vsi->stat_offsets_loaded,
4500 			   &oes->tx_errors, &es->tx_errors);
4501 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4502 			   vsi->stat_offsets_loaded,
4503 			   &oes->rx_discards, &es->rx_discards);
4504 
4505 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4506 			   I40E_GLV_GORCL(stat_idx),
4507 			   vsi->stat_offsets_loaded,
4508 			   &oes->rx_bytes, &es->rx_bytes);
4509 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4510 			   I40E_GLV_UPRCL(stat_idx),
4511 			   vsi->stat_offsets_loaded,
4512 			   &oes->rx_unicast, &es->rx_unicast);
4513 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4514 			   I40E_GLV_MPRCL(stat_idx),
4515 			   vsi->stat_offsets_loaded,
4516 			   &oes->rx_multicast, &es->rx_multicast);
4517 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4518 			   I40E_GLV_BPRCL(stat_idx),
4519 			   vsi->stat_offsets_loaded,
4520 			   &oes->rx_broadcast, &es->rx_broadcast);
4521 
4522 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4523 			   I40E_GLV_GOTCL(stat_idx),
4524 			   vsi->stat_offsets_loaded,
4525 			   &oes->tx_bytes, &es->tx_bytes);
4526 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4527 			   I40E_GLV_UPTCL(stat_idx),
4528 			   vsi->stat_offsets_loaded,
4529 			   &oes->tx_unicast, &es->tx_unicast);
4530 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4531 			   I40E_GLV_MPTCL(stat_idx),
4532 			   vsi->stat_offsets_loaded,
4533 			   &oes->tx_multicast, &es->tx_multicast);
4534 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4535 			   I40E_GLV_BPTCL(stat_idx),
4536 			   vsi->stat_offsets_loaded,
4537 			   &oes->tx_broadcast, &es->tx_broadcast);
4538 	vsi->stat_offsets_loaded = true;
4539 }
4540 
4541 static void
4542 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4543 {
4544 	struct ixl_pf		*pf;
4545 	struct ifnet		*ifp;
4546 	struct i40e_eth_stats	*es;
4547 	u64			tx_discards;
4548 
4549 	struct i40e_hw_port_stats *nsd;
4550 
4551 	pf = vsi->back;
4552 	ifp = vsi->ifp;
4553 	es = &vsi->eth_stats;
4554 	nsd = &pf->stats;
4555 
4556 	ixl_update_eth_stats(vsi);
4557 
4558 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4559 	for (int i = 0; i < vsi->num_queues; i++)
4560 		tx_discards += vsi->queues[i].txr.br->br_drops;
4561 
4562 	/* Update ifnet stats */
4563 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4564 	                   es->rx_multicast +
4565 			   es->rx_broadcast);
4566 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4567 	                   es->tx_multicast +
4568 			   es->tx_broadcast);
4569 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4570 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4571 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4572 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4573 
4574 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4575 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4576 	    nsd->rx_jabber);
4577 	IXL_SET_OERRORS(vsi, es->tx_errors);
4578 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4579 	IXL_SET_OQDROPS(vsi, tx_discards);
4580 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4581 	IXL_SET_COLLISIONS(vsi, 0);
4582 }
4583 
4584 /**
4585  * Reset all of the stats for the given pf
4586  **/
4587 void ixl_pf_reset_stats(struct ixl_pf *pf)
4588 {
4589 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4590 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4591 	pf->stat_offsets_loaded = false;
4592 }
4593 
4594 /**
4595  * Resets all stats of the given vsi
4596  **/
4597 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4598 {
4599 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4600 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4601 	vsi->stat_offsets_loaded = false;
4602 }
4603 
4604 /**
4605  * Read and update a 48 bit stat from the hw
4606  *
4607  * Since the device stats are not reset at PFReset, they likely will not
4608  * be zeroed when the driver starts.  We'll save the first values read
4609  * and use them as offsets to be subtracted from the raw values in order
4610  * to report stats that count from zero.
4611  **/
4612 static void
4613 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4614 	bool offset_loaded, u64 *offset, u64 *stat)
4615 {
4616 	u64 new_data;
4617 
4618 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4619 	new_data = rd64(hw, loreg);
4620 #else
4621 	/*
4622 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4623 	 * 10 don't support 8 byte bus reads/writes.
4624 	 */
4625 	new_data = rd32(hw, loreg);
4626 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4627 #endif
4628 
4629 	if (!offset_loaded)
4630 		*offset = new_data;
4631 	if (new_data >= *offset)
4632 		*stat = new_data - *offset;
4633 	else
4634 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4635 	*stat &= 0xFFFFFFFFFFFFULL;
4636 }
4637 
4638 /**
4639  * Read and update a 32 bit stat from the hw
4640  **/
4641 static void
4642 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4643 	bool offset_loaded, u64 *offset, u64 *stat)
4644 {
4645 	u32 new_data;
4646 
4647 	new_data = rd32(hw, reg);
4648 	if (!offset_loaded)
4649 		*offset = new_data;
4650 	if (new_data >= *offset)
4651 		*stat = (u32)(new_data - *offset);
4652 	else
4653 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4654 }
4655 
4656 /*
4657 ** Set flow control using sysctl:
4658 ** 	0 - off
4659 **	1 - rx pause
4660 **	2 - tx pause
4661 **	3 - full
4662 */
4663 static int
4664 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4665 {
4666 	/*
4667 	 * TODO: ensure flow control is disabled if
4668 	 * priority flow control is enabled
4669 	 *
4670 	 * TODO: ensure tx CRC by hardware should be enabled
4671 	 * if tx flow control is enabled.
4672 	 */
4673 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4674 	struct i40e_hw *hw = &pf->hw;
4675 	device_t dev = pf->dev;
4676 	int error = 0;
4677 	enum i40e_status_code aq_error = 0;
4678 	u8 fc_aq_err = 0;
4679 
4680 	/* Get request */
4681 	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4682 	if ((error) || (req->newptr == NULL))
4683 		return (error);
4684 	if (pf->fc < 0 || pf->fc > 3) {
4685 		device_printf(dev,
4686 		    "Invalid fc mode; valid modes are 0 through 3\n");
4687 		return (EINVAL);
4688 	}
4689 
4690 	/*
4691 	** Changing flow control mode currently does not work on
4692 	** 40GBASE-CR4 PHYs
4693 	*/
4694 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4695 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4696 		device_printf(dev, "Changing flow control mode unsupported"
4697 		    " on 40GBase-CR4 media.\n");
4698 		return (ENODEV);
4699 	}
4700 
4701 	/* Set fc ability for port */
4702 	hw->fc.requested_mode = pf->fc;
4703 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4704 	if (aq_error) {
4705 		device_printf(dev,
4706 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4707 		    __func__, aq_error, fc_aq_err);
4708 		return (EAGAIN);
4709 	}
4710 
4711 	return (0);
4712 }
4713 
4714 static int
4715 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4716 {
4717 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4718 	struct i40e_hw *hw = &pf->hw;
4719 	int error = 0, index = 0;
4720 
4721 	char *speeds[] = {
4722 		"Unknown",
4723 		"100M",
4724 		"1G",
4725 		"10G",
4726 		"40G",
4727 		"20G"
4728 	};
4729 
4730 	ixl_update_link_status(pf);
4731 
4732 	switch (hw->phy.link_info.link_speed) {
4733 	case I40E_LINK_SPEED_100MB:
4734 		index = 1;
4735 		break;
4736 	case I40E_LINK_SPEED_1GB:
4737 		index = 2;
4738 		break;
4739 	case I40E_LINK_SPEED_10GB:
4740 		index = 3;
4741 		break;
4742 	case I40E_LINK_SPEED_40GB:
4743 		index = 4;
4744 		break;
4745 	case I40E_LINK_SPEED_20GB:
4746 		index = 5;
4747 		break;
4748 	case I40E_LINK_SPEED_UNKNOWN:
4749 	default:
4750 		index = 0;
4751 		break;
4752 	}
4753 
4754 	error = sysctl_handle_string(oidp, speeds[index],
4755 	    strlen(speeds[index]), req);
4756 	return (error);
4757 }
4758 
4759 static int
4760 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4761 {
4762 	struct i40e_hw *hw = &pf->hw;
4763 	device_t dev = pf->dev;
4764 	struct i40e_aq_get_phy_abilities_resp abilities;
4765 	struct i40e_aq_set_phy_config config;
4766 	enum i40e_status_code aq_error = 0;
4767 
4768 	/* Get current capability information */
4769 	aq_error = i40e_aq_get_phy_capabilities(hw,
4770 	    FALSE, FALSE, &abilities, NULL);
4771 	if (aq_error) {
4772 		device_printf(dev,
4773 		    "%s: Error getting phy capabilities %d,"
4774 		    " aq error: %d\n", __func__, aq_error,
4775 		    hw->aq.asq_last_status);
4776 		return (EAGAIN);
4777 	}
4778 
4779 	/* Prepare new config */
4780 	bzero(&config, sizeof(config));
4781 	config.phy_type = abilities.phy_type;
4782 	config.abilities = abilities.abilities
4783 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4784 	config.eee_capability = abilities.eee_capability;
4785 	config.eeer = abilities.eeer_val;
4786 	config.low_power_ctrl = abilities.d3_lpan;
4787 	/* Translate into aq cmd link_speed */
4788 	if (speeds & 0x8)
4789 		config.link_speed |= I40E_LINK_SPEED_20GB;
4790 	if (speeds & 0x4)
4791 		config.link_speed |= I40E_LINK_SPEED_10GB;
4792 	if (speeds & 0x2)
4793 		config.link_speed |= I40E_LINK_SPEED_1GB;
4794 	if (speeds & 0x1)
4795 		config.link_speed |= I40E_LINK_SPEED_100MB;
4796 
4797 	/* Do aq command & restart link */
4798 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4799 	if (aq_error) {
4800 		device_printf(dev,
4801 		    "%s: Error setting new phy config %d,"
4802 		    " aq error: %d\n", __func__, aq_error,
4803 		    hw->aq.asq_last_status);
4804 		return (EAGAIN);
4805 	}
4806 
4807 	/*
4808 	** This seems a bit heavy handed, but we
4809 	** need to get a reinit on some devices
4810 	*/
4811 	IXL_PF_LOCK(pf);
4812 	ixl_stop(pf);
4813 	ixl_init_locked(pf);
4814 	IXL_PF_UNLOCK(pf);
4815 
4816 	return (0);
4817 }
4818 
4819 /*
4820 ** Control link advertise speed:
4821 **	Flags:
4822 **	0x1 - advertise 100 Mb
4823 **	0x2 - advertise 1G
4824 **	0x4 - advertise 10G
4825 **	0x8 - advertise 20G
4826 **
4827 ** Does not work on 40G devices.
4828 */
4829 static int
4830 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4831 {
4832 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4833 	struct i40e_hw *hw = &pf->hw;
4834 	device_t dev = pf->dev;
4835 	int requested_ls = 0;
4836 	int error = 0;
4837 
4838 	/*
4839 	** FW doesn't support changing advertised speed
4840 	** for 40G devices; speed is always 40G.
4841 	*/
4842 	if (i40e_is_40G_device(hw->device_id))
4843 		return (ENODEV);
4844 
4845 	/* Read in new mode */
4846 	requested_ls = pf->advertised_speed;
4847 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4848 	if ((error) || (req->newptr == NULL))
4849 		return (error);
4850 	/* Check for sane value */
4851 	if (requested_ls < 0x1 || requested_ls > 0xE) {
4852 		device_printf(dev, "Invalid advertised speed; "
4853 		    "valid modes are 0x1 through 0xE\n");
4854 		return (EINVAL);
4855 	}
4856 	/* Then check for validity based on adapter type */
4857 	switch (hw->device_id) {
4858 	case I40E_DEV_ID_10G_BASE_T:
4859 		if (requested_ls & 0x8) {
4860 			device_printf(dev,
4861 			    "20Gbs speed not supported on this device.\n");
4862 			return (EINVAL);
4863 		}
4864 		break;
4865 	case I40E_DEV_ID_20G_KR2:
4866 		if (requested_ls & 0x1) {
4867 			device_printf(dev,
4868 			    "100Mbs speed not supported on this device.\n");
4869 			return (EINVAL);
4870 		}
4871 		break;
4872 	default:
4873 		if (requested_ls & ~0x6) {
4874 			device_printf(dev,
4875 			    "Only 1/10Gbs speeds are supported on this device.\n");
4876 			return (EINVAL);
4877 		}
4878 		break;
4879 	}
4880 
4881 	/* Exit if no change */
4882 	if (pf->advertised_speed == requested_ls)
4883 		return (0);
4884 
4885 	error = ixl_set_advertised_speeds(pf, requested_ls);
4886 	if (error)
4887 		return (error);
4888 
4889 	pf->advertised_speed = requested_ls;
4890 	ixl_update_link_status(pf);
4891 	return (0);
4892 }
4893 
4894 /*
4895 ** Get the width and transaction speed of
4896 ** the bus this adapter is plugged into.
4897 */
4898 static u16
4899 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4900 {
4901         u16                     link;
4902         u32                     offset;
4903 
4904 
4905         /* Get the PCI Express Capabilities offset */
4906         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4907 
4908         /* ...and read the Link Status Register */
4909         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4910 
4911         switch (link & I40E_PCI_LINK_WIDTH) {
4912         case I40E_PCI_LINK_WIDTH_1:
4913                 hw->bus.width = i40e_bus_width_pcie_x1;
4914                 break;
4915         case I40E_PCI_LINK_WIDTH_2:
4916                 hw->bus.width = i40e_bus_width_pcie_x2;
4917                 break;
4918         case I40E_PCI_LINK_WIDTH_4:
4919                 hw->bus.width = i40e_bus_width_pcie_x4;
4920                 break;
4921         case I40E_PCI_LINK_WIDTH_8:
4922                 hw->bus.width = i40e_bus_width_pcie_x8;
4923                 break;
4924         default:
4925                 hw->bus.width = i40e_bus_width_unknown;
4926                 break;
4927         }
4928 
4929         switch (link & I40E_PCI_LINK_SPEED) {
4930         case I40E_PCI_LINK_SPEED_2500:
4931                 hw->bus.speed = i40e_bus_speed_2500;
4932                 break;
4933         case I40E_PCI_LINK_SPEED_5000:
4934                 hw->bus.speed = i40e_bus_speed_5000;
4935                 break;
4936         case I40E_PCI_LINK_SPEED_8000:
4937                 hw->bus.speed = i40e_bus_speed_8000;
4938                 break;
4939         default:
4940                 hw->bus.speed = i40e_bus_speed_unknown;
4941                 break;
4942         }
4943 
4944 
4945         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4946             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4947             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4948             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4949             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4950             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4951             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4952             ("Unknown"));
4953 
4954         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4955             (hw->bus.speed < i40e_bus_speed_8000)) {
4956                 device_printf(dev, "PCI-Express bandwidth available"
4957                     " for this device\n     may be insufficient for"
4958                     " optimal performance.\n");
4959                 device_printf(dev, "For expected performance a x8 "
4960                     "PCIE Gen3 slot is required.\n");
4961         }
4962 
4963         return (link);
4964 }
4965 
4966 static int
4967 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4968 {
4969 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4970 	struct i40e_hw	*hw = &pf->hw;
4971 	char		buf[32];
4972 
4973 	snprintf(buf, sizeof(buf),
4974 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4975 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4976 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4977 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4978 	    IXL_NVM_VERSION_HI_SHIFT,
4979 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4980 	    IXL_NVM_VERSION_LO_SHIFT,
4981 	    hw->nvm.eetrack);
4982 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4983 }
4984 
4985 
4986 #ifdef IXL_DEBUG_SYSCTL
4987 static int
4988 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4989 {
4990 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4991 	struct i40e_hw *hw = &pf->hw;
4992 	struct i40e_link_status link_status;
4993 	char buf[512];
4994 
4995 	enum i40e_status_code aq_error = 0;
4996 
4997 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4998 	if (aq_error) {
4999 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
5000 		return (EPERM);
5001 	}
5002 
5003 	sprintf(buf, "\n"
5004 	    "PHY Type : %#04x\n"
5005 	    "Speed    : %#04x\n"
5006 	    "Link info: %#04x\n"
5007 	    "AN info  : %#04x\n"
5008 	    "Ext info : %#04x",
5009 	    link_status.phy_type, link_status.link_speed,
5010 	    link_status.link_info, link_status.an_info,
5011 	    link_status.ext_info);
5012 
5013 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5014 }
5015 
5016 static int
5017 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5018 {
5019 	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
5020 	struct i40e_hw		*hw = &pf->hw;
5021 	char			buf[512];
5022 	enum i40e_status_code	aq_error = 0;
5023 
5024 	struct i40e_aq_get_phy_abilities_resp abilities;
5025 
5026 	aq_error = i40e_aq_get_phy_capabilities(hw,
5027 	    TRUE, FALSE, &abilities, NULL);
5028 	if (aq_error) {
5029 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5030 		return (EPERM);
5031 	}
5032 
5033 	sprintf(buf, "\n"
5034 	    "PHY Type : %#010x\n"
5035 	    "Speed    : %#04x\n"
5036 	    "Abilities: %#04x\n"
5037 	    "EEE cap  : %#06x\n"
5038 	    "EEER reg : %#010x\n"
5039 	    "D3 Lpan  : %#04x",
5040 	    abilities.phy_type, abilities.link_speed,
5041 	    abilities.abilities, abilities.eee_capability,
5042 	    abilities.eeer_val, abilities.d3_lpan);
5043 
5044 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5045 }
5046 
5047 static int
5048 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5049 {
5050 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5051 	struct ixl_vsi *vsi = &pf->vsi;
5052 	struct ixl_mac_filter *f;
5053 	char *buf, *buf_i;
5054 
5055 	int error = 0;
5056 	int ftl_len = 0;
5057 	int ftl_counter = 0;
5058 	int buf_len = 0;
5059 	int entry_len = 42;
5060 
5061 	SLIST_FOREACH(f, &vsi->ftl, next) {
5062 		ftl_len++;
5063 	}
5064 
5065 	if (ftl_len < 1) {
5066 		sysctl_handle_string(oidp, "(none)", 6, req);
5067 		return (0);
5068 	}
5069 
5070 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5071 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5072 
5073 	sprintf(buf_i++, "\n");
5074 	SLIST_FOREACH(f, &vsi->ftl, next) {
5075 		sprintf(buf_i,
5076 		    MAC_FORMAT ", vlan %4d, flags %#06x",
5077 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5078 		buf_i += entry_len;
5079 		/* don't print '\n' for last entry */
5080 		if (++ftl_counter != ftl_len) {
5081 			sprintf(buf_i, "\n");
5082 			buf_i++;
5083 		}
5084 	}
5085 
5086 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5087 	if (error)
5088 		printf("sysctl error: %d\n", error);
5089 	free(buf, M_DEVBUF);
5090 	return error;
5091 }
5092 
5093 #define IXL_SW_RES_SIZE 0x14
5094 static int
5095 ixl_res_alloc_cmp(const void *a, const void *b)
5096 {
5097 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5098 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5099 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5100 
5101 	return ((int)one->resource_type - (int)two->resource_type);
5102 }
5103 
5104 static int
5105 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5106 {
5107 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5108 	struct i40e_hw *hw = &pf->hw;
5109 	device_t dev = pf->dev;
5110 	struct sbuf *buf;
5111 	int error = 0;
5112 
5113 	u8 num_entries;
5114 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5115 
5116 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5117 	if (!buf) {
5118 		device_printf(dev, "Could not allocate sbuf for output.\n");
5119 		return (ENOMEM);
5120 	}
5121 
5122 	bzero(resp, sizeof(resp));
5123 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5124 				resp,
5125 				IXL_SW_RES_SIZE,
5126 				NULL);
5127 	if (error) {
5128 		device_printf(dev,
5129 		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5130 		    __func__, error, hw->aq.asq_last_status);
5131 		sbuf_delete(buf);
5132 		return error;
5133 	}
5134 
5135 	/* Sort entries by type for display */
5136 	qsort(resp, num_entries,
5137 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5138 	    &ixl_res_alloc_cmp);
5139 
5140 	sbuf_cat(buf, "\n");
5141 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5142 	sbuf_printf(buf,
5143 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5144 	    "     | (this)     | (all) | (this) | (all)       \n");
5145 	for (int i = 0; i < num_entries; i++) {
5146 		sbuf_printf(buf,
5147 		    "%#4x | %10d   %5d   %6d   %12d",
5148 		    resp[i].resource_type,
5149 		    resp[i].guaranteed,
5150 		    resp[i].total,
5151 		    resp[i].used,
5152 		    resp[i].total_unalloced);
5153 		if (i < num_entries - 1)
5154 			sbuf_cat(buf, "\n");
5155 	}
5156 
5157 	error = sbuf_finish(buf);
5158 	sbuf_delete(buf);
5159 
5160 	return (error);
5161 }
5162 
5163 /*
5164 ** Caller must init and delete sbuf; this function will clear and
5165 ** finish it for caller.
5166 */
5167 static char *
5168 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5169 {
5170 	sbuf_clear(s);
5171 
5172 	if (seid == 0 && uplink)
5173 		sbuf_cat(s, "Network");
5174 	else if (seid == 0)
5175 		sbuf_cat(s, "Host");
5176 	else if (seid == 1)
5177 		sbuf_cat(s, "EMP");
5178 	else if (seid <= 5)
5179 		sbuf_printf(s, "MAC %d", seid - 2);
5180 	else if (seid <= 15)
5181 		sbuf_cat(s, "Reserved");
5182 	else if (seid <= 31)
5183 		sbuf_printf(s, "PF %d", seid - 16);
5184 	else if (seid <= 159)
5185 		sbuf_printf(s, "VF %d", seid - 32);
5186 	else if (seid <= 287)
5187 		sbuf_cat(s, "Reserved");
5188 	else if (seid <= 511)
5189 		sbuf_cat(s, "Other"); // for other structures
5190 	else if (seid <= 895)
5191 		sbuf_printf(s, "VSI %d", seid - 512);
5192 	else if (seid <= 1023)
5193 		sbuf_printf(s, "Reserved");
5194 	else
5195 		sbuf_cat(s, "Invalid");
5196 
5197 	sbuf_finish(s);
5198 	return sbuf_data(s);
5199 }
5200 
5201 static int
5202 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5203 {
5204 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5205 	struct i40e_hw *hw = &pf->hw;
5206 	device_t dev = pf->dev;
5207 	struct sbuf *buf;
5208 	struct sbuf *nmbuf;
5209 	int error = 0;
5210 	u8 aq_buf[I40E_AQ_LARGE_BUF];
5211 
5212 	u16 next = 0;
5213 	struct i40e_aqc_get_switch_config_resp *sw_config;
5214 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5215 
5216 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5217 	if (!buf) {
5218 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5219 		return (ENOMEM);
5220 	}
5221 
5222 	error = i40e_aq_get_switch_config(hw, sw_config,
5223 	    sizeof(aq_buf), &next, NULL);
5224 	if (error) {
5225 		device_printf(dev,
5226 		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5227 		    __func__, error, hw->aq.asq_last_status);
5228 		sbuf_delete(buf);
5229 		return error;
5230 	}
5231 
5232 	nmbuf = sbuf_new_auto();
5233 	if (!nmbuf) {
5234 		device_printf(dev, "Could not allocate sbuf for name output.\n");
5235 		return (ENOMEM);
5236 	}
5237 
5238 	sbuf_cat(buf, "\n");
5239 	// Assuming <= 255 elements in switch
5240 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5241 	/* Exclude:
5242 	** Revision -- all elements are revision 1 for now
5243 	*/
5244 	sbuf_printf(buf,
5245 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5246 	    "                |          |          | (uplink)\n");
5247 	for (int i = 0; i < sw_config->header.num_reported; i++) {
5248 		// "%4d (%8s) | %8s   %8s   %#8x",
5249 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5250 		sbuf_cat(buf, " ");
5251 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5252 		    sw_config->element[i].seid, false));
5253 		sbuf_cat(buf, " | ");
5254 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5255 		    sw_config->element[i].uplink_seid, true));
5256 		sbuf_cat(buf, "   ");
5257 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5258 		    sw_config->element[i].downlink_seid, false));
5259 		sbuf_cat(buf, "   ");
5260 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5261 		if (i < sw_config->header.num_reported - 1)
5262 			sbuf_cat(buf, "\n");
5263 	}
5264 	sbuf_delete(nmbuf);
5265 
5266 	error = sbuf_finish(buf);
5267 	sbuf_delete(buf);
5268 
5269 	return (error);
5270 }
5271 #endif /* IXL_DEBUG_SYSCTL */
5272 
5273 
5274 #ifdef PCI_IOV
5275 static int
5276 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5277 {
5278 	struct i40e_hw *hw;
5279 	struct ixl_vsi *vsi;
5280 	struct i40e_vsi_context vsi_ctx;
5281 	int i;
5282 	uint16_t first_queue;
5283 	enum i40e_status_code code;
5284 
5285 	hw = &pf->hw;
5286 	vsi = &pf->vsi;
5287 
5288 	vsi_ctx.pf_num = hw->pf_id;
5289 	vsi_ctx.uplink_seid = pf->veb_seid;
5290 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5291 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5292 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5293 
5294 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5295 
5296 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5297 	vsi_ctx.info.switch_id = htole16(0);
5298 
5299 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5300 	vsi_ctx.info.sec_flags = 0;
5301 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5302 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5303 
5304 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5305 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5306 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5307 
5308 	vsi_ctx.info.valid_sections |=
5309 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5310 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5311 	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5312 	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5313 		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5314 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5315 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5316 
5317 	vsi_ctx.info.tc_mapping[0] = htole16(
5318 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5319 	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5320 
5321 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5322 	if (code != I40E_SUCCESS)
5323 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5324 	vf->vsi.seid = vsi_ctx.seid;
5325 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5326 	vf->vsi.first_queue = first_queue;
5327 	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5328 
5329 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5330 	if (code != I40E_SUCCESS)
5331 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5332 
5333 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5334 	if (code != I40E_SUCCESS) {
5335 		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5336 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5337 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5338 	}
5339 
5340 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5341 	return (0);
5342 }
5343 
5344 static int
5345 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5346 {
5347 	struct i40e_hw *hw;
5348 	int error;
5349 
5350 	hw = &pf->hw;
5351 
5352 	error = ixl_vf_alloc_vsi(pf, vf);
5353 	if (error != 0)
5354 		return (error);
5355 
5356 	vf->vsi.hw_filters_add = 0;
5357 	vf->vsi.hw_filters_del = 0;
5358 	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5359 	ixl_reconfigure_filters(&vf->vsi);
5360 
5361 	return (0);
5362 }
5363 
5364 static void
5365 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5366     uint32_t val)
5367 {
5368 	uint32_t qtable;
5369 	int index, shift;
5370 
5371 	/*
5372 	 * Two queues are mapped in a single register, so we have to do some
5373 	 * gymnastics to convert the queue number into a register index and
5374 	 * shift.
5375 	 */
5376 	index = qnum / 2;
5377 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5378 
5379 	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5380 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5381 	qtable |= val << shift;
5382 	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5383 }
5384 
5385 static void
5386 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5387 {
5388 	struct i40e_hw *hw;
5389 	uint32_t qtable;
5390 	int i;
5391 
5392 	hw = &pf->hw;
5393 
5394 	/*
5395 	 * Contiguous mappings aren't actually supported by the hardware,
5396 	 * so we have to use non-contiguous mappings.
5397 	 */
5398 	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5399 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5400 
5401 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5402 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5403 
5404 	for (i = 0; i < vf->vsi.num_queues; i++) {
5405 		qtable = (vf->vsi.first_queue + i) <<
5406 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5407 
5408 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5409 	}
5410 
5411 	/* Map queues allocated to VF to its VSI. */
5412 	for (i = 0; i < vf->vsi.num_queues; i++)
5413 		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5414 
5415 	/* Set rest of VSI queues as unused. */
5416 	for (; i < IXL_MAX_VSI_QUEUES; i++)
5417 		ixl_vf_map_vsi_queue(hw, vf, i,
5418 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5419 
5420 	ixl_flush(hw);
5421 }
5422 
5423 static void
5424 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5425 {
5426 	struct i40e_hw *hw;
5427 
5428 	hw = &pf->hw;
5429 
5430 	if (vsi->seid == 0)
5431 		return;
5432 
5433 	i40e_aq_delete_element(hw, vsi->seid, NULL);
5434 }
5435 
5436 static void
5437 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5438 {
5439 
5440 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5441 	ixl_flush(hw);
5442 }
5443 
5444 static void
5445 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5446 {
5447 
5448 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5449 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5450 	ixl_flush(hw);
5451 }
5452 
5453 static void
5454 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5455 {
5456 	struct i40e_hw *hw;
5457 	uint32_t vfint_reg, vpint_reg;
5458 	int i;
5459 
5460 	hw = &pf->hw;
5461 
5462 	ixl_vf_vsi_release(pf, &vf->vsi);
5463 
5464 	/* Index 0 has a special register. */
5465 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5466 
5467 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5468 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5469 		ixl_vf_disable_queue_intr(hw, vfint_reg);
5470 	}
5471 
5472 	/* Index 0 has a special register. */
5473 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5474 
5475 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5476 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5477 		ixl_vf_unregister_intr(hw, vpint_reg);
5478 	}
5479 
5480 	vf->vsi.num_queues = 0;
5481 }
5482 
5483 static int
5484 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5485 {
5486 	struct i40e_hw *hw;
5487 	int i;
5488 	uint16_t global_vf_num;
5489 	uint32_t ciad;
5490 
5491 	hw = &pf->hw;
5492 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5493 
5494 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5495 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5496 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5497 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5498 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5499 			return (0);
5500 		DELAY(1);
5501 	}
5502 
5503 	return (ETIMEDOUT);
5504 }
5505 
5506 static void
5507 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5508 {
5509 	struct i40e_hw *hw;
5510 	uint32_t vfrtrig;
5511 
5512 	hw = &pf->hw;
5513 
5514 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5515 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5516 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5517 	ixl_flush(hw);
5518 
5519 	ixl_reinit_vf(pf, vf);
5520 }
5521 
5522 static void
5523 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5524 {
5525 	struct i40e_hw *hw;
5526 	uint32_t vfrstat, vfrtrig;
5527 	int i, error;
5528 
5529 	hw = &pf->hw;
5530 
5531 	error = ixl_flush_pcie(pf, vf);
5532 	if (error != 0)
5533 		device_printf(pf->dev,
5534 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5535 		    vf->vf_num);
5536 
5537 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5538 		DELAY(10);
5539 
5540 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5541 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5542 			break;
5543 	}
5544 
5545 	if (i == IXL_VF_RESET_TIMEOUT)
5546 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5547 
5548 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5549 
5550 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5551 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5552 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5553 
5554 	if (vf->vsi.seid != 0)
5555 		ixl_disable_rings(&vf->vsi);
5556 
5557 	ixl_vf_release_resources(pf, vf);
5558 	ixl_vf_setup_vsi(pf, vf);
5559 	ixl_vf_map_queues(pf, vf);
5560 
5561 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5562 	ixl_flush(hw);
5563 }
5564 
5565 static const char *
5566 ixl_vc_opcode_str(uint16_t op)
5567 {
5568 
5569 	switch (op) {
5570 	case I40E_VIRTCHNL_OP_VERSION:
5571 		return ("VERSION");
5572 	case I40E_VIRTCHNL_OP_RESET_VF:
5573 		return ("RESET_VF");
5574 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5575 		return ("GET_VF_RESOURCES");
5576 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5577 		return ("CONFIG_TX_QUEUE");
5578 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5579 		return ("CONFIG_RX_QUEUE");
5580 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5581 		return ("CONFIG_VSI_QUEUES");
5582 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5583 		return ("CONFIG_IRQ_MAP");
5584 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5585 		return ("ENABLE_QUEUES");
5586 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5587 		return ("DISABLE_QUEUES");
5588 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5589 		return ("ADD_ETHER_ADDRESS");
5590 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5591 		return ("DEL_ETHER_ADDRESS");
5592 	case I40E_VIRTCHNL_OP_ADD_VLAN:
5593 		return ("ADD_VLAN");
5594 	case I40E_VIRTCHNL_OP_DEL_VLAN:
5595 		return ("DEL_VLAN");
5596 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5597 		return ("CONFIG_PROMISCUOUS_MODE");
5598 	case I40E_VIRTCHNL_OP_GET_STATS:
5599 		return ("GET_STATS");
5600 	case I40E_VIRTCHNL_OP_FCOE:
5601 		return ("FCOE");
5602 	case I40E_VIRTCHNL_OP_EVENT:
5603 		return ("EVENT");
5604 	default:
5605 		return ("UNKNOWN");
5606 	}
5607 }
5608 
5609 static int
5610 ixl_vc_opcode_level(uint16_t opcode)
5611 {
5612 
5613 	switch (opcode) {
5614 	case I40E_VIRTCHNL_OP_GET_STATS:
5615 		return (10);
5616 	default:
5617 		return (5);
5618 	}
5619 }
5620 
5621 static void
5622 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5623     enum i40e_status_code status, void *msg, uint16_t len)
5624 {
5625 	struct i40e_hw *hw;
5626 	int global_vf_id;
5627 
5628 	hw = &pf->hw;
5629 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5630 
5631 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5632 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5633 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5634 
5635 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5636 }
5637 
5638 static void
5639 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5640 {
5641 
5642 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5643 }
5644 
5645 static void
5646 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5647     enum i40e_status_code status, const char *file, int line)
5648 {
5649 
5650 	I40E_VC_DEBUG(pf, 1,
5651 	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5652 	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5653 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5654 }
5655 
5656 static void
5657 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5658     uint16_t msg_size)
5659 {
5660 	struct i40e_virtchnl_version_info reply;
5661 
5662 	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5663 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5664 		    I40E_ERR_PARAM);
5665 		return;
5666 	}
5667 
5668 	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5669 	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5670 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5671 	    sizeof(reply));
5672 }
5673 
5674 static void
5675 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5676     uint16_t msg_size)
5677 {
5678 
5679 	if (msg_size != 0) {
5680 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5681 		    I40E_ERR_PARAM);
5682 		return;
5683 	}
5684 
5685 	ixl_reset_vf(pf, vf);
5686 
5687 	/* No response to a reset message. */
5688 }
5689 
5690 static void
5691 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5692     uint16_t msg_size)
5693 {
5694 	struct i40e_virtchnl_vf_resource reply;
5695 
5696 	if (msg_size != 0) {
5697 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5698 		    I40E_ERR_PARAM);
5699 		return;
5700 	}
5701 
5702 	bzero(&reply, sizeof(reply));
5703 
5704 	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5705 
5706 	reply.num_vsis = 1;
5707 	reply.num_queue_pairs = vf->vsi.num_queues;
5708 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5709 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5710 	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5711 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5712 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5713 
5714 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5715 	    I40E_SUCCESS, &reply, sizeof(reply));
5716 }
5717 
5718 static int
5719 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5720     struct i40e_virtchnl_txq_info *info)
5721 {
5722 	struct i40e_hw *hw;
5723 	struct i40e_hmc_obj_txq txq;
5724 	uint16_t global_queue_num, global_vf_num;
5725 	enum i40e_status_code status;
5726 	uint32_t qtx_ctl;
5727 
5728 	hw = &pf->hw;
5729 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5730 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5731 	bzero(&txq, sizeof(txq));
5732 
5733 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5734 	if (status != I40E_SUCCESS)
5735 		return (EINVAL);
5736 
5737 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5738 
5739 	txq.head_wb_ena = info->headwb_enabled;
5740 	txq.head_wb_addr = info->dma_headwb_addr;
5741 	txq.qlen = info->ring_len;
5742 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5743 	txq.rdylist_act = 0;
5744 
5745 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5746 	if (status != I40E_SUCCESS)
5747 		return (EINVAL);
5748 
5749 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5750 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5751 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5752 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5753 	ixl_flush(hw);
5754 
5755 	return (0);
5756 }
5757 
5758 static int
5759 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5760     struct i40e_virtchnl_rxq_info *info)
5761 {
5762 	struct i40e_hw *hw;
5763 	struct i40e_hmc_obj_rxq rxq;
5764 	uint16_t global_queue_num;
5765 	enum i40e_status_code status;
5766 
5767 	hw = &pf->hw;
5768 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5769 	bzero(&rxq, sizeof(rxq));
5770 
5771 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5772 		return (EINVAL);
5773 
5774 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5775 	    info->max_pkt_size < ETHER_MIN_LEN)
5776 		return (EINVAL);
5777 
5778 	if (info->splithdr_enabled) {
5779 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5780 			return (EINVAL);
5781 
5782 		rxq.hsplit_0 = info->rx_split_pos &
5783 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5784 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5785 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5786 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5787 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5788 
5789 		rxq.dtype = 2;
5790 	}
5791 
5792 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5793 	if (status != I40E_SUCCESS)
5794 		return (EINVAL);
5795 
5796 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5797 	rxq.qlen = info->ring_len;
5798 
5799 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5800 
5801 	rxq.dsize = 1;
5802 	rxq.crcstrip = 1;
5803 	rxq.l2tsel = 1;
5804 
5805 	rxq.rxmax = info->max_pkt_size;
5806 	rxq.tphrdesc_ena = 1;
5807 	rxq.tphwdesc_ena = 1;
5808 	rxq.tphdata_ena = 1;
5809 	rxq.tphhead_ena = 1;
5810 	rxq.lrxqthresh = 2;
5811 	rxq.prefena = 1;
5812 
5813 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5814 	if (status != I40E_SUCCESS)
5815 		return (EINVAL);
5816 
5817 	return (0);
5818 }
5819 
5820 static void
5821 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5822     uint16_t msg_size)
5823 {
5824 	struct i40e_virtchnl_vsi_queue_config_info *info;
5825 	struct i40e_virtchnl_queue_pair_info *pair;
5826 	int i;
5827 
5828 	if (msg_size < sizeof(*info)) {
5829 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5830 		    I40E_ERR_PARAM);
5831 		return;
5832 	}
5833 
5834 	info = msg;
5835 	if (info->num_queue_pairs == 0) {
5836 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5837 		    I40E_ERR_PARAM);
5838 		return;
5839 	}
5840 
5841 	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5842 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5843 		    I40E_ERR_PARAM);
5844 		return;
5845 	}
5846 
5847 	if (info->vsi_id != vf->vsi.vsi_num) {
5848 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5849 		    I40E_ERR_PARAM);
5850 		return;
5851 	}
5852 
5853 	for (i = 0; i < info->num_queue_pairs; i++) {
5854 		pair = &info->qpair[i];
5855 
5856 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5857 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5858 		    pair->txq.queue_id != pair->rxq.queue_id ||
5859 		    pair->txq.queue_id >= vf->vsi.num_queues) {
5860 
5861 			i40e_send_vf_nack(pf, vf,
5862 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5863 			return;
5864 		}
5865 
5866 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5867 			i40e_send_vf_nack(pf, vf,
5868 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5869 			return;
5870 		}
5871 
5872 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5873 			i40e_send_vf_nack(pf, vf,
5874 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5875 			return;
5876 		}
5877 	}
5878 
5879 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5880 }
5881 
5882 static void
5883 ixl_vf_set_qctl(struct ixl_pf *pf,
5884     const struct i40e_virtchnl_vector_map *vector,
5885     enum i40e_queue_type cur_type, uint16_t cur_queue,
5886     enum i40e_queue_type *last_type, uint16_t *last_queue)
5887 {
5888 	uint32_t offset, qctl;
5889 	uint16_t itr_indx;
5890 
5891 	if (cur_type == I40E_QUEUE_TYPE_RX) {
5892 		offset = I40E_QINT_RQCTL(cur_queue);
5893 		itr_indx = vector->rxitr_idx;
5894 	} else {
5895 		offset = I40E_QINT_TQCTL(cur_queue);
5896 		itr_indx = vector->txitr_idx;
5897 	}
5898 
5899 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5900 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5901 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5902 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5903 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5904 
5905 	wr32(&pf->hw, offset, qctl);
5906 
5907 	*last_type = cur_type;
5908 	*last_queue = cur_queue;
5909 }
5910 
5911 static void
5912 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5913     const struct i40e_virtchnl_vector_map *vector)
5914 {
5915 	struct i40e_hw *hw;
5916 	u_int qindex;
5917 	enum i40e_queue_type type, last_type;
5918 	uint32_t lnklst_reg;
5919 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5920 
5921 	hw = &pf->hw;
5922 
5923 	rxq_map = vector->rxq_map;
5924 	txq_map = vector->txq_map;
5925 
5926 	last_queue = IXL_END_OF_INTR_LNKLST;
5927 	last_type = I40E_QUEUE_TYPE_RX;
5928 
5929 	/*
5930 	 * The datasheet says to optimize performance, RX queues and TX queues
5931 	 * should be interleaved in the interrupt linked list, so we process
5932 	 * both at once here.
5933 	 */
5934 	while ((rxq_map != 0) || (txq_map != 0)) {
5935 		if (txq_map != 0) {
5936 			qindex = ffs(txq_map) - 1;
5937 			type = I40E_QUEUE_TYPE_TX;
5938 			cur_queue = vf->vsi.first_queue + qindex;
5939 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5940 			    &last_type, &last_queue);
5941 			txq_map &= ~(1 << qindex);
5942 		}
5943 
5944 		if (rxq_map != 0) {
5945 			qindex = ffs(rxq_map) - 1;
5946 			type = I40E_QUEUE_TYPE_RX;
5947 			cur_queue = vf->vsi.first_queue + qindex;
5948 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5949 			    &last_type, &last_queue);
5950 			rxq_map &= ~(1 << qindex);
5951 		}
5952 	}
5953 
5954 	if (vector->vector_id == 0)
5955 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5956 	else
5957 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5958 		    vf->vf_num);
5959 	wr32(hw, lnklst_reg,
5960 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5961 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5962 
5963 	ixl_flush(hw);
5964 }
5965 
5966 static void
5967 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5968     uint16_t msg_size)
5969 {
5970 	struct i40e_virtchnl_irq_map_info *map;
5971 	struct i40e_virtchnl_vector_map *vector;
5972 	struct i40e_hw *hw;
5973 	int i, largest_txq, largest_rxq;
5974 
5975 	hw = &pf->hw;
5976 
5977 	if (msg_size < sizeof(*map)) {
5978 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5979 		    I40E_ERR_PARAM);
5980 		return;
5981 	}
5982 
5983 	map = msg;
5984 	if (map->num_vectors == 0) {
5985 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5986 		    I40E_ERR_PARAM);
5987 		return;
5988 	}
5989 
5990 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5991 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5992 		    I40E_ERR_PARAM);
5993 		return;
5994 	}
5995 
5996 	for (i = 0; i < map->num_vectors; i++) {
5997 		vector = &map->vecmap[i];
5998 
5999 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6000 		    vector->vsi_id != vf->vsi.vsi_num) {
6001 			i40e_send_vf_nack(pf, vf,
6002 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6003 			return;
6004 		}
6005 
6006 		if (vector->rxq_map != 0) {
6007 			largest_rxq = fls(vector->rxq_map) - 1;
6008 			if (largest_rxq >= vf->vsi.num_queues) {
6009 				i40e_send_vf_nack(pf, vf,
6010 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6011 				    I40E_ERR_PARAM);
6012 				return;
6013 			}
6014 		}
6015 
6016 		if (vector->txq_map != 0) {
6017 			largest_txq = fls(vector->txq_map) - 1;
6018 			if (largest_txq >= vf->vsi.num_queues) {
6019 				i40e_send_vf_nack(pf, vf,
6020 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6021 				    I40E_ERR_PARAM);
6022 				return;
6023 			}
6024 		}
6025 
6026 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6027 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6028 			i40e_send_vf_nack(pf, vf,
6029 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6030 			    I40E_ERR_PARAM);
6031 			return;
6032 		}
6033 
6034 		ixl_vf_config_vector(pf, vf, vector);
6035 	}
6036 
6037 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6038 }
6039 
6040 static void
6041 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6042     uint16_t msg_size)
6043 {
6044 	struct i40e_virtchnl_queue_select *select;
6045 	int error;
6046 
6047 	if (msg_size != sizeof(*select)) {
6048 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6049 		    I40E_ERR_PARAM);
6050 		return;
6051 	}
6052 
6053 	select = msg;
6054 	if (select->vsi_id != vf->vsi.vsi_num ||
6055 	    select->rx_queues == 0 || select->tx_queues == 0) {
6056 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6057 		    I40E_ERR_PARAM);
6058 		return;
6059 	}
6060 
6061 	error = ixl_enable_rings(&vf->vsi);
6062 	if (error) {
6063 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6064 		    I40E_ERR_TIMEOUT);
6065 		return;
6066 	}
6067 
6068 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6069 }
6070 
6071 static void
6072 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6073     void *msg, uint16_t msg_size)
6074 {
6075 	struct i40e_virtchnl_queue_select *select;
6076 	int error;
6077 
6078 	if (msg_size != sizeof(*select)) {
6079 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6080 		    I40E_ERR_PARAM);
6081 		return;
6082 	}
6083 
6084 	select = msg;
6085 	if (select->vsi_id != vf->vsi.vsi_num ||
6086 	    select->rx_queues == 0 || select->tx_queues == 0) {
6087 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6088 		    I40E_ERR_PARAM);
6089 		return;
6090 	}
6091 
6092 	error = ixl_disable_rings(&vf->vsi);
6093 	if (error) {
6094 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6095 		    I40E_ERR_TIMEOUT);
6096 		return;
6097 	}
6098 
6099 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6100 }
6101 
6102 static boolean_t
6103 ixl_zero_mac(const uint8_t *addr)
6104 {
6105 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6106 
6107 	return (cmp_etheraddr(addr, zero));
6108 }
6109 
6110 static boolean_t
6111 ixl_bcast_mac(const uint8_t *addr)
6112 {
6113 
6114 	return (cmp_etheraddr(addr, ixl_bcast_addr));
6115 }
6116 
6117 static int
6118 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6119 {
6120 
6121 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6122 		return (EINVAL);
6123 
6124 	/*
6125 	 * If the VF is not allowed to change its MAC address, don't let it
6126 	 * set a MAC filter for an address that is not a multicast address and
6127 	 * is not its assigned MAC.
6128 	 */
6129 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6130 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6131 		return (EPERM);
6132 
6133 	return (0);
6134 }
6135 
6136 static void
6137 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6138     uint16_t msg_size)
6139 {
6140 	struct i40e_virtchnl_ether_addr_list *addr_list;
6141 	struct i40e_virtchnl_ether_addr *addr;
6142 	struct ixl_vsi *vsi;
6143 	int i;
6144 	size_t expected_size;
6145 
6146 	vsi = &vf->vsi;
6147 
6148 	if (msg_size < sizeof(*addr_list)) {
6149 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6150 		    I40E_ERR_PARAM);
6151 		return;
6152 	}
6153 
6154 	addr_list = msg;
6155 	expected_size = sizeof(*addr_list) +
6156 	    addr_list->num_elements * sizeof(*addr);
6157 
6158 	if (addr_list->num_elements == 0 ||
6159 	    addr_list->vsi_id != vsi->vsi_num ||
6160 	    msg_size != expected_size) {
6161 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6162 		    I40E_ERR_PARAM);
6163 		return;
6164 	}
6165 
6166 	for (i = 0; i < addr_list->num_elements; i++) {
6167 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6168 			i40e_send_vf_nack(pf, vf,
6169 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6170 			return;
6171 		}
6172 	}
6173 
6174 	for (i = 0; i < addr_list->num_elements; i++) {
6175 		addr = &addr_list->list[i];
6176 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6177 	}
6178 
6179 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6180 }
6181 
6182 static void
6183 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6184     uint16_t msg_size)
6185 {
6186 	struct i40e_virtchnl_ether_addr_list *addr_list;
6187 	struct i40e_virtchnl_ether_addr *addr;
6188 	size_t expected_size;
6189 	int i;
6190 
6191 	if (msg_size < sizeof(*addr_list)) {
6192 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6193 		    I40E_ERR_PARAM);
6194 		return;
6195 	}
6196 
6197 	addr_list = msg;
6198 	expected_size = sizeof(*addr_list) +
6199 	    addr_list->num_elements * sizeof(*addr);
6200 
6201 	if (addr_list->num_elements == 0 ||
6202 	    addr_list->vsi_id != vf->vsi.vsi_num ||
6203 	    msg_size != expected_size) {
6204 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6205 		    I40E_ERR_PARAM);
6206 		return;
6207 	}
6208 
6209 	for (i = 0; i < addr_list->num_elements; i++) {
6210 		addr = &addr_list->list[i];
6211 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6212 			i40e_send_vf_nack(pf, vf,
6213 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6214 			return;
6215 		}
6216 	}
6217 
6218 	for (i = 0; i < addr_list->num_elements; i++) {
6219 		addr = &addr_list->list[i];
6220 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6221 	}
6222 
6223 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6224 }
6225 
6226 static enum i40e_status_code
6227 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6228 {
6229 	struct i40e_vsi_context vsi_ctx;
6230 
6231 	vsi_ctx.seid = vf->vsi.seid;
6232 
6233 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6234 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6235 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6236 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6237 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6238 }
6239 
6240 static void
6241 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6242     uint16_t msg_size)
6243 {
6244 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6245 	enum i40e_status_code code;
6246 	size_t expected_size;
6247 	int i;
6248 
6249 	if (msg_size < sizeof(*filter_list)) {
6250 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6251 		    I40E_ERR_PARAM);
6252 		return;
6253 	}
6254 
6255 	filter_list = msg;
6256 	expected_size = sizeof(*filter_list) +
6257 	    filter_list->num_elements * sizeof(uint16_t);
6258 	if (filter_list->num_elements == 0 ||
6259 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6260 	    msg_size != expected_size) {
6261 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6262 		    I40E_ERR_PARAM);
6263 		return;
6264 	}
6265 
6266 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6267 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6268 		    I40E_ERR_PARAM);
6269 		return;
6270 	}
6271 
6272 	for (i = 0; i < filter_list->num_elements; i++) {
6273 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6274 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6275 			    I40E_ERR_PARAM);
6276 			return;
6277 		}
6278 	}
6279 
6280 	code = ixl_vf_enable_vlan_strip(pf, vf);
6281 	if (code != I40E_SUCCESS) {
6282 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6283 		    I40E_ERR_PARAM);
6284 	}
6285 
6286 	for (i = 0; i < filter_list->num_elements; i++)
6287 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6288 
6289 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6290 }
6291 
6292 static void
6293 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6294     uint16_t msg_size)
6295 {
6296 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6297 	int i;
6298 	size_t expected_size;
6299 
6300 	if (msg_size < sizeof(*filter_list)) {
6301 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6302 		    I40E_ERR_PARAM);
6303 		return;
6304 	}
6305 
6306 	filter_list = msg;
6307 	expected_size = sizeof(*filter_list) +
6308 	    filter_list->num_elements * sizeof(uint16_t);
6309 	if (filter_list->num_elements == 0 ||
6310 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6311 	    msg_size != expected_size) {
6312 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6313 		    I40E_ERR_PARAM);
6314 		return;
6315 	}
6316 
6317 	for (i = 0; i < filter_list->num_elements; i++) {
6318 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6319 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6320 			    I40E_ERR_PARAM);
6321 			return;
6322 		}
6323 	}
6324 
6325 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6326 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6327 		    I40E_ERR_PARAM);
6328 		return;
6329 	}
6330 
6331 	for (i = 0; i < filter_list->num_elements; i++)
6332 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6333 
6334 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6335 }
6336 
6337 static void
6338 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6339     void *msg, uint16_t msg_size)
6340 {
6341 	struct i40e_virtchnl_promisc_info *info;
6342 	enum i40e_status_code code;
6343 
6344 	if (msg_size != sizeof(*info)) {
6345 		i40e_send_vf_nack(pf, vf,
6346 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6347 		return;
6348 	}
6349 
6350 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6351 		i40e_send_vf_nack(pf, vf,
6352 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6353 		return;
6354 	}
6355 
6356 	info = msg;
6357 	if (info->vsi_id != vf->vsi.vsi_num) {
6358 		i40e_send_vf_nack(pf, vf,
6359 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6360 		return;
6361 	}
6362 
6363 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6364 	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6365 	if (code != I40E_SUCCESS) {
6366 		i40e_send_vf_nack(pf, vf,
6367 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6368 		return;
6369 	}
6370 
6371 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6372 	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6373 	if (code != I40E_SUCCESS) {
6374 		i40e_send_vf_nack(pf, vf,
6375 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6376 		return;
6377 	}
6378 
6379 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6380 }
6381 
6382 static void
6383 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6384     uint16_t msg_size)
6385 {
6386 	struct i40e_virtchnl_queue_select *queue;
6387 
6388 	if (msg_size != sizeof(*queue)) {
6389 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6390 		    I40E_ERR_PARAM);
6391 		return;
6392 	}
6393 
6394 	queue = msg;
6395 	if (queue->vsi_id != vf->vsi.vsi_num) {
6396 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6397 		    I40E_ERR_PARAM);
6398 		return;
6399 	}
6400 
6401 	ixl_update_eth_stats(&vf->vsi);
6402 
6403 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6404 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6405 }
6406 
6407 static void
6408 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6409 {
6410 	struct ixl_vf *vf;
6411 	void *msg;
6412 	uint16_t vf_num, msg_size;
6413 	uint32_t opcode;
6414 
6415 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6416 	opcode = le32toh(event->desc.cookie_high);
6417 
6418 	if (vf_num >= pf->num_vfs) {
6419 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6420 		return;
6421 	}
6422 
6423 	vf = &pf->vfs[vf_num];
6424 	msg = event->msg_buf;
6425 	msg_size = event->msg_len;
6426 
6427 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6428 	    "Got msg %s(%d) from VF-%d of size %d\n",
6429 	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6430 
6431 	switch (opcode) {
6432 	case I40E_VIRTCHNL_OP_VERSION:
6433 		ixl_vf_version_msg(pf, vf, msg, msg_size);
6434 		break;
6435 	case I40E_VIRTCHNL_OP_RESET_VF:
6436 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6437 		break;
6438 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6439 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6440 		break;
6441 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6442 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6443 		break;
6444 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6445 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6446 		break;
6447 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6448 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6449 		break;
6450 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6451 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6452 		break;
6453 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6454 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6455 		break;
6456 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6457 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6458 		break;
6459 	case I40E_VIRTCHNL_OP_ADD_VLAN:
6460 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6461 		break;
6462 	case I40E_VIRTCHNL_OP_DEL_VLAN:
6463 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6464 		break;
6465 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6466 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6467 		break;
6468 	case I40E_VIRTCHNL_OP_GET_STATS:
6469 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6470 		break;
6471 
6472 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6473 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6474 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6475 	default:
6476 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6477 		break;
6478 	}
6479 }
6480 
6481 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6482 static void
6483 ixl_handle_vflr(void *arg, int pending)
6484 {
6485 	struct ixl_pf *pf;
6486 	struct i40e_hw *hw;
6487 	uint16_t global_vf_num;
6488 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6489 	int i;
6490 
6491 	pf = arg;
6492 	hw = &pf->hw;
6493 
6494 	IXL_PF_LOCK(pf);
6495 	for (i = 0; i < pf->num_vfs; i++) {
6496 		global_vf_num = hw->func_caps.vf_base_id + i;
6497 
6498 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6499 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6500 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6501 		if (vflrstat & vflrstat_mask) {
6502 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6503 			    vflrstat_mask);
6504 
6505 			ixl_reinit_vf(pf, &pf->vfs[i]);
6506 		}
6507 	}
6508 
6509 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6510 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6511 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6512 	ixl_flush(hw);
6513 
6514 	IXL_PF_UNLOCK(pf);
6515 }
6516 
6517 static int
6518 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6519 {
6520 
6521 	switch (err) {
6522 	case I40E_AQ_RC_EPERM:
6523 		return (EPERM);
6524 	case I40E_AQ_RC_ENOENT:
6525 		return (ENOENT);
6526 	case I40E_AQ_RC_ESRCH:
6527 		return (ESRCH);
6528 	case I40E_AQ_RC_EINTR:
6529 		return (EINTR);
6530 	case I40E_AQ_RC_EIO:
6531 		return (EIO);
6532 	case I40E_AQ_RC_ENXIO:
6533 		return (ENXIO);
6534 	case I40E_AQ_RC_E2BIG:
6535 		return (E2BIG);
6536 	case I40E_AQ_RC_EAGAIN:
6537 		return (EAGAIN);
6538 	case I40E_AQ_RC_ENOMEM:
6539 		return (ENOMEM);
6540 	case I40E_AQ_RC_EACCES:
6541 		return (EACCES);
6542 	case I40E_AQ_RC_EFAULT:
6543 		return (EFAULT);
6544 	case I40E_AQ_RC_EBUSY:
6545 		return (EBUSY);
6546 	case I40E_AQ_RC_EEXIST:
6547 		return (EEXIST);
6548 	case I40E_AQ_RC_EINVAL:
6549 		return (EINVAL);
6550 	case I40E_AQ_RC_ENOTTY:
6551 		return (ENOTTY);
6552 	case I40E_AQ_RC_ENOSPC:
6553 		return (ENOSPC);
6554 	case I40E_AQ_RC_ENOSYS:
6555 		return (ENOSYS);
6556 	case I40E_AQ_RC_ERANGE:
6557 		return (ERANGE);
6558 	case I40E_AQ_RC_EFLUSHED:
6559 		return (EINVAL);	/* No exact equivalent in errno.h */
6560 	case I40E_AQ_RC_BAD_ADDR:
6561 		return (EFAULT);
6562 	case I40E_AQ_RC_EMODE:
6563 		return (EPERM);
6564 	case I40E_AQ_RC_EFBIG:
6565 		return (EFBIG);
6566 	default:
6567 		return (EINVAL);
6568 	}
6569 }
6570 
6571 static int
6572 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6573 {
6574 	struct ixl_pf *pf;
6575 	struct i40e_hw *hw;
6576 	struct ixl_vsi *pf_vsi;
6577 	enum i40e_status_code ret;
6578 	int i, error;
6579 
6580 	pf = device_get_softc(dev);
6581 	hw = &pf->hw;
6582 	pf_vsi = &pf->vsi;
6583 
6584 	IXL_PF_LOCK(pf);
6585 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6586 	    M_ZERO);
6587 
6588 	if (pf->vfs == NULL) {
6589 		error = ENOMEM;
6590 		goto fail;
6591 	}
6592 
6593 	for (i = 0; i < num_vfs; i++)
6594 		sysctl_ctx_init(&pf->vfs[i].ctx);
6595 
6596 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6597 	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6598 	if (ret != I40E_SUCCESS) {
6599 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6600 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6601 		    error);
6602 		goto fail;
6603 	}
6604 
6605 	ixl_configure_msix(pf);
6606 	ixl_enable_adminq(hw);
6607 
6608 	pf->num_vfs = num_vfs;
6609 	IXL_PF_UNLOCK(pf);
6610 	return (0);
6611 
6612 fail:
6613 	free(pf->vfs, M_IXL);
6614 	pf->vfs = NULL;
6615 	IXL_PF_UNLOCK(pf);
6616 	return (error);
6617 }
6618 
6619 static void
6620 ixl_iov_uninit(device_t dev)
6621 {
6622 	struct ixl_pf *pf;
6623 	struct i40e_hw *hw;
6624 	struct ixl_vsi *vsi;
6625 	struct ifnet *ifp;
6626 	struct ixl_vf *vfs;
6627 	int i, num_vfs;
6628 
6629 	pf = device_get_softc(dev);
6630 	hw = &pf->hw;
6631 	vsi = &pf->vsi;
6632 	ifp = vsi->ifp;
6633 
6634 	IXL_PF_LOCK(pf);
6635 	for (i = 0; i < pf->num_vfs; i++) {
6636 		if (pf->vfs[i].vsi.seid != 0)
6637 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6638 	}
6639 
6640 	if (pf->veb_seid != 0) {
6641 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6642 		pf->veb_seid = 0;
6643 	}
6644 
6645 #if __FreeBSD_version > 1100022
6646 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6647 #else
6648 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
6649 #endif
6650 		ixl_disable_intr(vsi);
6651 
6652 	vfs = pf->vfs;
6653 	num_vfs = pf->num_vfs;
6654 
6655 	pf->vfs = NULL;
6656 	pf->num_vfs = 0;
6657 	IXL_PF_UNLOCK(pf);
6658 
6659 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6660 	for (i = 0; i < num_vfs; i++)
6661 		sysctl_ctx_free(&vfs[i].ctx);
6662 	free(vfs, M_IXL);
6663 }
6664 
6665 static int
6666 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6667 {
6668 	char sysctl_name[QUEUE_NAME_LEN];
6669 	struct ixl_pf *pf;
6670 	struct ixl_vf *vf;
6671 	const void *mac;
6672 	size_t size;
6673 	int error;
6674 
6675 	pf = device_get_softc(dev);
6676 	vf = &pf->vfs[vfnum];
6677 
6678 	IXL_PF_LOCK(pf);
6679 	vf->vf_num = vfnum;
6680 
6681 	vf->vsi.back = pf;
6682 	vf->vf_flags = VF_FLAG_ENABLED;
6683 	SLIST_INIT(&vf->vsi.ftl);
6684 
6685 	error = ixl_vf_setup_vsi(pf, vf);
6686 	if (error != 0)
6687 		goto out;
6688 
6689 	if (nvlist_exists_binary(params, "mac-addr")) {
6690 		mac = nvlist_get_binary(params, "mac-addr", &size);
6691 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6692 
6693 		if (nvlist_get_bool(params, "allow-set-mac"))
6694 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6695 	} else
6696 		/*
6697 		 * If the administrator has not specified a MAC address then
6698 		 * we must allow the VF to choose one.
6699 		 */
6700 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6701 
6702 	if (nvlist_get_bool(params, "mac-anti-spoof"))
6703 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6704 
6705 	if (nvlist_get_bool(params, "allow-promisc"))
6706 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6707 
6708 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6709 
6710 	ixl_reset_vf(pf, vf);
6711 out:
6712 	IXL_PF_UNLOCK(pf);
6713 	if (error == 0) {
6714 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6715 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6716 	}
6717 
6718 	return (error);
6719 }
6720 #endif /* PCI_IOV */
6721