xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40 
41 #include "ixl.h"
42 #include "ixl_pf.h"
43 
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47 
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixl_driver_version[] = "1.4.3";
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixl_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {
65 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
74 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
75 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76 #ifdef X722_SUPPORT
77 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
78 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
79 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
80 #endif
81 	/* required last entry */
82 	{0, 0, 0, 0, 0}
83 };
84 
85 /*********************************************************************
86  *  Table of branding strings
87  *********************************************************************/
88 
89 static char    *ixl_strings[] = {
90 	"Intel(R) Ethernet Connection XL710 Driver"
91 };
92 
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 static int      ixl_probe(device_t);
98 static int      ixl_attach(device_t);
99 static int      ixl_detach(device_t);
100 static int      ixl_shutdown(device_t);
101 static int	ixl_get_hw_capabilities(struct ixl_pf *);
102 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
103 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
104 static void	ixl_init(void *);
105 static void	ixl_init_locked(struct ixl_pf *);
106 static void     ixl_stop(struct ixl_pf *);
107 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
108 static int      ixl_media_change(struct ifnet *);
109 static void     ixl_update_link_status(struct ixl_pf *);
110 static int      ixl_allocate_pci_resources(struct ixl_pf *);
111 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
112 static int	ixl_setup_stations(struct ixl_pf *);
113 static int	ixl_switch_config(struct ixl_pf *);
114 static int	ixl_initialize_vsi(struct ixl_vsi *);
115 static int	ixl_assign_vsi_msix(struct ixl_pf *);
116 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
117 static int	ixl_init_msix(struct ixl_pf *);
118 static void	ixl_configure_msix(struct ixl_pf *);
119 static void	ixl_configure_itr(struct ixl_pf *);
120 static void	ixl_configure_legacy(struct ixl_pf *);
121 static void	ixl_free_pci_resources(struct ixl_pf *);
122 static void	ixl_local_timer(void *);
123 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
124 static void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
125 static void	ixl_config_rss(struct ixl_vsi *);
126 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
127 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
128 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
129 
130 static int	ixl_enable_rings(struct ixl_vsi *);
131 static int	ixl_disable_rings(struct ixl_vsi *);
132 static void	ixl_enable_intr(struct ixl_vsi *);
133 static void	ixl_disable_intr(struct ixl_vsi *);
134 static void	ixl_disable_rings_intr(struct ixl_vsi *);
135 
136 static void     ixl_enable_adminq(struct i40e_hw *);
137 static void     ixl_disable_adminq(struct i40e_hw *);
138 static void     ixl_enable_queue(struct i40e_hw *, int);
139 static void     ixl_disable_queue(struct i40e_hw *, int);
140 static void     ixl_enable_legacy(struct i40e_hw *);
141 static void     ixl_disable_legacy(struct i40e_hw *);
142 
143 static void     ixl_set_promisc(struct ixl_vsi *);
144 static void     ixl_add_multi(struct ixl_vsi *);
145 static void     ixl_del_multi(struct ixl_vsi *);
146 static void	ixl_register_vlan(void *, struct ifnet *, u16);
147 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
148 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
149 
150 static void	ixl_init_filters(struct ixl_vsi *);
151 static void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
152 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
154 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
155 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
156 static struct ixl_mac_filter *
157 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
158 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
159 static void	ixl_free_mac_filters(struct ixl_vsi *vsi);
160 
161 
162 /* Sysctl debug interface */
163 #ifdef IXL_DEBUG_SYSCTL
164 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
165 static void	ixl_print_debug_info(struct ixl_pf *);
166 #endif
167 
168 /* The MSI/X Interrupt handlers */
169 static void	ixl_intr(void *);
170 static void	ixl_msix_que(void *);
171 static void	ixl_msix_adminq(void *);
172 static void	ixl_handle_mdd_event(struct ixl_pf *);
173 
174 /* Deferred interrupt tasklets */
175 static void	ixl_do_adminq(void *, int);
176 
177 /* Sysctl handlers */
178 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
179 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
180 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
181 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
182 
183 /* Statistics */
184 static void     ixl_add_hw_stats(struct ixl_pf *);
185 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
186 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
187 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
188 		    struct sysctl_oid_list *,
189 		    struct i40e_eth_stats *);
190 static void	ixl_update_stats_counters(struct ixl_pf *);
191 static void	ixl_update_eth_stats(struct ixl_vsi *);
192 static void	ixl_update_vsi_stats(struct ixl_vsi *);
193 static void	ixl_pf_reset_stats(struct ixl_pf *);
194 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
195 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
196 		    u64 *, u64 *);
197 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
198 		    u64 *, u64 *);
199 
200 #ifdef IXL_DEBUG_SYSCTL
201 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
202 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
203 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
204 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
205 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
206 #endif
207 
208 #ifdef PCI_IOV
209 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
210 
211 static int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
212 static void	ixl_iov_uninit(device_t dev);
213 static int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
214 
215 static void	ixl_handle_vf_msg(struct ixl_pf *,
216 		    struct i40e_arq_event_info *);
217 static void	ixl_handle_vflr(void *arg, int pending);
218 
219 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
220 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
221 #endif
222 
223 /*********************************************************************
224  *  FreeBSD Device Interface Entry Points
225  *********************************************************************/
226 
227 static device_method_t ixl_methods[] = {
228 	/* Device interface */
229 	DEVMETHOD(device_probe, ixl_probe),
230 	DEVMETHOD(device_attach, ixl_attach),
231 	DEVMETHOD(device_detach, ixl_detach),
232 	DEVMETHOD(device_shutdown, ixl_shutdown),
233 #ifdef PCI_IOV
234 	DEVMETHOD(pci_iov_init, ixl_iov_init),
235 	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
236 	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
237 #endif
238 	{0, 0}
239 };
240 
241 static driver_t ixl_driver = {
242 	"ixl", ixl_methods, sizeof(struct ixl_pf),
243 };
244 
245 devclass_t ixl_devclass;
246 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
247 
248 MODULE_DEPEND(ixl, pci, 1, 1, 1);
249 MODULE_DEPEND(ixl, ether, 1, 1, 1);
250 #ifdef DEV_NETMAP
251 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
252 #endif /* DEV_NETMAP */
253 
254 /*
255 ** Global reset mutex
256 */
257 static struct mtx ixl_reset_mtx;
258 
259 /*
260 ** TUNEABLE PARAMETERS:
261 */
262 
263 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
264                    "IXL driver parameters");
265 
266 /*
267  * MSIX should be the default for best performance,
268  * but this allows it to be forced off for testing.
269  */
270 static int ixl_enable_msix = 1;
271 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
272 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
273     "Enable MSI-X interrupts");
274 
275 /*
276 ** Number of descriptors per ring:
277 **   - TX and RX are the same size
278 */
279 static int ixl_ringsz = DEFAULT_RING;
280 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
282     &ixl_ringsz, 0, "Descriptor Ring Size");
283 
284 /*
285 ** This can be set manually, if left as 0 the
286 ** number of queues will be calculated based
287 ** on cpus and msix vectors available.
288 */
289 int ixl_max_queues = 0;
290 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
291 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
292     &ixl_max_queues, 0, "Number of Queues");
293 
294 /*
295 ** Controls for Interrupt Throttling
296 **	- true/false for dynamic adjustment
297 ** 	- default values for static ITR
298 */
299 int ixl_dynamic_rx_itr = 0;
300 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
301 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
302     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
303 
304 int ixl_dynamic_tx_itr = 0;
305 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
306 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
307     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
308 
309 int ixl_rx_itr = IXL_ITR_8K;
310 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
311 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
312     &ixl_rx_itr, 0, "RX Interrupt Rate");
313 
314 int ixl_tx_itr = IXL_ITR_4K;
315 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
316 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
317     &ixl_tx_itr, 0, "TX Interrupt Rate");
318 
319 #ifdef IXL_FDIR
320 static int ixl_enable_fdir = 1;
321 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
322 /* Rate at which we sample */
323 int ixl_atr_rate = 20;
324 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
325 #endif
326 
327 #ifdef DEV_NETMAP
328 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
329 #include <dev/netmap/if_ixl_netmap.h>
330 #endif /* DEV_NETMAP */
331 
332 static char *ixl_fc_string[6] = {
333 	"None",
334 	"Rx",
335 	"Tx",
336 	"Full",
337 	"Priority",
338 	"Default"
339 };
340 
341 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
342 
343 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
344     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
345 
346 /*********************************************************************
347  *  Device identification routine
348  *
349  *  ixl_probe determines if the driver should be loaded on
350  *  the hardware based on PCI vendor/device id of the device.
351  *
352  *  return BUS_PROBE_DEFAULT on success, positive on failure
353  *********************************************************************/
354 
355 static int
356 ixl_probe(device_t dev)
357 {
358 	ixl_vendor_info_t *ent;
359 
360 	u16	pci_vendor_id, pci_device_id;
361 	u16	pci_subvendor_id, pci_subdevice_id;
362 	char	device_name[256];
363 	static bool lock_init = FALSE;
364 
365 	INIT_DEBUGOUT("ixl_probe: begin");
366 
367 	pci_vendor_id = pci_get_vendor(dev);
368 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
369 		return (ENXIO);
370 
371 	pci_device_id = pci_get_device(dev);
372 	pci_subvendor_id = pci_get_subvendor(dev);
373 	pci_subdevice_id = pci_get_subdevice(dev);
374 
375 	ent = ixl_vendor_info_array;
376 	while (ent->vendor_id != 0) {
377 		if ((pci_vendor_id == ent->vendor_id) &&
378 		    (pci_device_id == ent->device_id) &&
379 
380 		    ((pci_subvendor_id == ent->subvendor_id) ||
381 		     (ent->subvendor_id == 0)) &&
382 
383 		    ((pci_subdevice_id == ent->subdevice_id) ||
384 		     (ent->subdevice_id == 0))) {
385 			sprintf(device_name, "%s, Version - %s",
386 				ixl_strings[ent->index],
387 				ixl_driver_version);
388 			device_set_desc_copy(dev, device_name);
389 			/* One shot mutex init */
390 			if (lock_init == FALSE) {
391 				lock_init = TRUE;
392 				mtx_init(&ixl_reset_mtx,
393 				    "ixl_reset",
394 				    "IXL RESET Lock", MTX_DEF);
395 			}
396 			return (BUS_PROBE_DEFAULT);
397 		}
398 		ent++;
399 	}
400 	return (ENXIO);
401 }
402 
403 /*********************************************************************
404  *  Device initialization routine
405  *
406  *  The attach entry point is called when the driver is being loaded.
407  *  This routine identifies the type of hardware, allocates all resources
408  *  and initializes the hardware.
409  *
410  *  return 0 on success, positive on failure
411  *********************************************************************/
412 
413 static int
414 ixl_attach(device_t dev)
415 {
416 	struct ixl_pf	*pf;
417 	struct i40e_hw	*hw;
418 	struct ixl_vsi *vsi;
419 	u16		bus;
420 	int             error = 0;
421 #ifdef PCI_IOV
422 	nvlist_t	*pf_schema, *vf_schema;
423 	int		iov_error;
424 #endif
425 
426 	INIT_DEBUGOUT("ixl_attach: begin");
427 
428 	/* Allocate, clear, and link in our primary soft structure */
429 	pf = device_get_softc(dev);
430 	pf->dev = pf->osdep.dev = dev;
431 	hw = &pf->hw;
432 
433 	/*
434 	** Note this assumes we have a single embedded VSI,
435 	** this could be enhanced later to allocate multiple
436 	*/
437 	vsi = &pf->vsi;
438 	vsi->dev = pf->dev;
439 
440 	/* Core Lock Init*/
441 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
442 
443 	/* Set up the timer callout */
444 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
445 
446 	/* Set up sysctls */
447 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
448 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
449 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
450 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
451 
452 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
453 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
455 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
456 
457 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
458 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
459 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
460 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
461 
462 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
463 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
465 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
466 
467 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
468 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
470 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
471 
472 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
473 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
474 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
475 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
476 
477 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
478 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
480 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
481 
482 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
483 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
485 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
486 
487 #ifdef IXL_DEBUG_SYSCTL
488 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
491 	    ixl_debug_info, "I", "Debug Information");
492 
493 	/* Debug shared-code message level */
494 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
495 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496 	    OID_AUTO, "debug_mask", CTLFLAG_RW,
497 	    &pf->hw.debug_mask, 0, "Debug Message Level");
498 
499 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
500 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
501 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
502 	    0, "PF/VF Virtual Channel debug level");
503 
504 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
505 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
507 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
508 
509 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
512 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
513 
514 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
515 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
516 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
517 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
518 
519 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
520 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
521 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
522 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
523 
524 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
525 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
526 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
527 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
528 #endif
529 
530 	/* Save off the PCI information */
531 	hw->vendor_id = pci_get_vendor(dev);
532 	hw->device_id = pci_get_device(dev);
533 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
534 	hw->subsystem_vendor_id =
535 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
536 	hw->subsystem_device_id =
537 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
538 
539 	hw->bus.device = pci_get_slot(dev);
540 	hw->bus.func = pci_get_function(dev);
541 
542 	pf->vc_debug_lvl = 1;
543 
544 	/* Do PCI setup - map BAR0, etc */
545 	if (ixl_allocate_pci_resources(pf)) {
546 		device_printf(dev, "Allocation of PCI resources failed\n");
547 		error = ENXIO;
548 		goto err_out;
549 	}
550 
551 	/* Establish a clean starting point */
552 	i40e_clear_hw(hw);
553 	error = i40e_pf_reset(hw);
554 	if (error) {
555 		device_printf(dev,"PF reset failure %x\n", error);
556 		error = EIO;
557 		goto err_out;
558 	}
559 
560 	/* Set admin queue parameters */
561 	hw->aq.num_arq_entries = IXL_AQ_LEN;
562 	hw->aq.num_asq_entries = IXL_AQ_LEN;
563 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
564 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
565 
566 	/* Initialize the shared code */
567 	error = i40e_init_shared_code(hw);
568 	if (error) {
569 		device_printf(dev,"Unable to initialize the shared code\n");
570 		error = EIO;
571 		goto err_out;
572 	}
573 
574 	/* Set up the admin queue */
575 	error = i40e_init_adminq(hw);
576 	if (error) {
577 		device_printf(dev, "The driver for the device stopped "
578 		    "because the NVM image is newer than expected.\n"
579 		    "You must install the most recent version of "
580 		    " the network driver.\n");
581 		goto err_out;
582 	}
583 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
584 
585         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
586 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
587 		device_printf(dev, "The driver for the device detected "
588 		    "a newer version of the NVM image than expected.\n"
589 		    "Please install the most recent version of the network driver.\n");
590 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
591 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
592 		device_printf(dev, "The driver for the device detected "
593 		    "an older version of the NVM image than expected.\n"
594 		    "Please update the NVM image.\n");
595 
596 	/* Clear PXE mode */
597 	i40e_clear_pxe_mode(hw);
598 
599 	/* Get capabilities from the device */
600 	error = ixl_get_hw_capabilities(pf);
601 	if (error) {
602 		device_printf(dev, "HW capabilities failure!\n");
603 		goto err_get_cap;
604 	}
605 
606 	/* Set up host memory cache */
607 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
608 	    hw->func_caps.num_rx_qp, 0, 0);
609 	if (error) {
610 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
611 		goto err_get_cap;
612 	}
613 
614 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
615 	if (error) {
616 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
617 		goto err_mac_hmc;
618 	}
619 
620 	/* Disable LLDP from the firmware */
621 	i40e_aq_stop_lldp(hw, TRUE, NULL);
622 
623 	i40e_get_mac_addr(hw, hw->mac.addr);
624 	error = i40e_validate_mac_addr(hw->mac.addr);
625 	if (error) {
626 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
627 		goto err_mac_hmc;
628 	}
629 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
630 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
631 
632 	/* Set up VSI and queues */
633 	if (ixl_setup_stations(pf) != 0) {
634 		device_printf(dev, "setup stations failed!\n");
635 		error = ENOMEM;
636 		goto err_mac_hmc;
637 	}
638 
639 	/* Initialize mac filter list for VSI */
640 	SLIST_INIT(&vsi->ftl);
641 
642 	/* Set up interrupt routing here */
643 	if (pf->msix > 1)
644 		error = ixl_assign_vsi_msix(pf);
645 	else
646 		error = ixl_assign_vsi_legacy(pf);
647 	if (error)
648 		goto err_late;
649 
650 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
651 	    (hw->aq.fw_maj_ver < 4)) {
652 		i40e_msec_delay(75);
653 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
654 		if (error)
655 			device_printf(dev, "link restart failed, aq_err=%d\n",
656 			    pf->hw.aq.asq_last_status);
657 	}
658 
659 	/* Determine link state */
660 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
661 	i40e_get_link_status(hw, &pf->link_up);
662 
663 	/* Setup OS specific network interface */
664 	if (ixl_setup_interface(dev, vsi) != 0) {
665 		device_printf(dev, "interface setup failed!\n");
666 		error = EIO;
667 		goto err_late;
668 	}
669 
670 	error = ixl_switch_config(pf);
671 	if (error) {
672 		device_printf(dev, "Initial switch config failed: %d\n", error);
673 		goto err_mac_hmc;
674 	}
675 
676 	/* Limit phy interrupts to link and modules failure */
677 	error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN |
678 		I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
679 	if (error)
680 		device_printf(dev, "set phy mask failed: %d\n", error);
681 
682 	/* Get the bus configuration and set the shared code */
683 	bus = ixl_get_bus_info(hw, dev);
684 	i40e_set_pci_config_data(hw, bus);
685 
686 	/* Initialize statistics */
687 	ixl_pf_reset_stats(pf);
688 	ixl_update_stats_counters(pf);
689 	ixl_add_hw_stats(pf);
690 
691 	/* Register for VLAN events */
692 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
693 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
694 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
695 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
696 
697 #ifdef PCI_IOV
698 	/* SR-IOV is only supported when MSI-X is in use. */
699 	if (pf->msix > 1) {
700 		pf_schema = pci_iov_schema_alloc_node();
701 		vf_schema = pci_iov_schema_alloc_node();
702 		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
703 		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
704 		    IOV_SCHEMA_HASDEFAULT, TRUE);
705 		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
706 		    IOV_SCHEMA_HASDEFAULT, FALSE);
707 		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
708 		    IOV_SCHEMA_HASDEFAULT, FALSE);
709 
710 		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
711 		if (iov_error != 0)
712 			device_printf(dev,
713 			    "Failed to initialize SR-IOV (error=%d)\n",
714 			    iov_error);
715 	}
716 #endif
717 
718 #ifdef DEV_NETMAP
719 	ixl_netmap_attach(vsi);
720 #endif /* DEV_NETMAP */
721 	INIT_DEBUGOUT("ixl_attach: end");
722 	return (0);
723 
724 err_late:
725 	if (vsi->ifp != NULL)
726 		if_free(vsi->ifp);
727 err_mac_hmc:
728 	i40e_shutdown_lan_hmc(hw);
729 err_get_cap:
730 	i40e_shutdown_adminq(hw);
731 err_out:
732 	ixl_free_pci_resources(pf);
733 	ixl_free_vsi(vsi);
734 	IXL_PF_LOCK_DESTROY(pf);
735 	return (error);
736 }
737 
738 /*********************************************************************
739  *  Device removal routine
740  *
741  *  The detach entry point is called when the driver is being removed.
742  *  This routine stops the adapter and deallocates all the resources
743  *  that were allocated for driver operation.
744  *
745  *  return 0 on success, positive on failure
746  *********************************************************************/
747 
748 static int
749 ixl_detach(device_t dev)
750 {
751 	struct ixl_pf		*pf = device_get_softc(dev);
752 	struct i40e_hw		*hw = &pf->hw;
753 	struct ixl_vsi		*vsi = &pf->vsi;
754 	struct ixl_queue	*que = vsi->queues;
755 	i40e_status		status;
756 #ifdef PCI_IOV
757 	int			error;
758 #endif
759 
760 	INIT_DEBUGOUT("ixl_detach: begin");
761 
762 	/* Make sure VLANS are not using driver */
763 	if (vsi->ifp->if_vlantrunk != NULL) {
764 		device_printf(dev,"Vlan in use, detach first\n");
765 		return (EBUSY);
766 	}
767 
768 #ifdef PCI_IOV
769 	error = pci_iov_detach(dev);
770 	if (error != 0) {
771 		device_printf(dev, "SR-IOV in use; detach first.\n");
772 		return (error);
773 	}
774 #endif
775 
776 	ether_ifdetach(vsi->ifp);
777 	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
778 		IXL_PF_LOCK(pf);
779 		ixl_stop(pf);
780 		IXL_PF_UNLOCK(pf);
781 	}
782 
783 	for (int i = 0; i < vsi->num_queues; i++, que++) {
784 		if (que->tq) {
785 			taskqueue_drain(que->tq, &que->task);
786 			taskqueue_drain(que->tq, &que->tx_task);
787 			taskqueue_free(que->tq);
788 		}
789 	}
790 
791 	/* Shutdown LAN HMC */
792 	status = i40e_shutdown_lan_hmc(hw);
793 	if (status)
794 		device_printf(dev,
795 		    "Shutdown LAN HMC failed with code %d\n", status);
796 
797 	/* Shutdown admin queue */
798 	status = i40e_shutdown_adminq(hw);
799 	if (status)
800 		device_printf(dev,
801 		    "Shutdown Admin queue failed with code %d\n", status);
802 
803 	/* Unregister VLAN events */
804 	if (vsi->vlan_attach != NULL)
805 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
806 	if (vsi->vlan_detach != NULL)
807 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
808 
809 	callout_drain(&pf->timer);
810 #ifdef DEV_NETMAP
811 	netmap_detach(vsi->ifp);
812 #endif /* DEV_NETMAP */
813 	ixl_free_pci_resources(pf);
814 	bus_generic_detach(dev);
815 	if_free(vsi->ifp);
816 	ixl_free_vsi(vsi);
817 	IXL_PF_LOCK_DESTROY(pf);
818 	return (0);
819 }
820 
821 /*********************************************************************
822  *
823  *  Shutdown entry point
824  *
825  **********************************************************************/
826 
827 static int
828 ixl_shutdown(device_t dev)
829 {
830 	struct ixl_pf *pf = device_get_softc(dev);
831 	IXL_PF_LOCK(pf);
832 	ixl_stop(pf);
833 	IXL_PF_UNLOCK(pf);
834 	return (0);
835 }
836 
837 
838 /*********************************************************************
839  *
840  *  Get the hardware capabilities
841  *
842  **********************************************************************/
843 
844 static int
845 ixl_get_hw_capabilities(struct ixl_pf *pf)
846 {
847 	struct i40e_aqc_list_capabilities_element_resp *buf;
848 	struct i40e_hw	*hw = &pf->hw;
849 	device_t 	dev = pf->dev;
850 	int             error, len;
851 	u16		needed;
852 	bool		again = TRUE;
853 
854 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
855 retry:
856 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
857 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
858 		device_printf(dev, "Unable to allocate cap memory\n");
859                 return (ENOMEM);
860 	}
861 
862 	/* This populates the hw struct */
863         error = i40e_aq_discover_capabilities(hw, buf, len,
864 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
865 	free(buf, M_DEVBUF);
866 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
867 	    (again == TRUE)) {
868 		/* retry once with a larger buffer */
869 		again = FALSE;
870 		len = needed;
871 		goto retry;
872 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
873 		device_printf(dev, "capability discovery failed: %d\n",
874 		    pf->hw.aq.asq_last_status);
875 		return (ENODEV);
876 	}
877 
878 	/* Capture this PF's starting queue pair */
879 	pf->qbase = hw->func_caps.base_queue;
880 
881 #ifdef IXL_DEBUG
882 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
883 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
884 	    hw->pf_id, hw->func_caps.num_vfs,
885 	    hw->func_caps.num_msix_vectors,
886 	    hw->func_caps.num_msix_vectors_vf,
887 	    hw->func_caps.fd_filters_guaranteed,
888 	    hw->func_caps.fd_filters_best_effort,
889 	    hw->func_caps.num_tx_qp,
890 	    hw->func_caps.num_rx_qp,
891 	    hw->func_caps.base_queue);
892 #endif
893 	return (error);
894 }
895 
896 static void
897 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
898 {
899 	device_t 	dev = vsi->dev;
900 
901 	/* Enable/disable TXCSUM/TSO4 */
902 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
903 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
904 		if (mask & IFCAP_TXCSUM) {
905 			ifp->if_capenable |= IFCAP_TXCSUM;
906 			/* enable TXCSUM, restore TSO if previously enabled */
907 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
908 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
909 				ifp->if_capenable |= IFCAP_TSO4;
910 			}
911 		}
912 		else if (mask & IFCAP_TSO4) {
913 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
914 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
915 			device_printf(dev,
916 			    "TSO4 requires txcsum, enabling both...\n");
917 		}
918 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
919 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
920 		if (mask & IFCAP_TXCSUM)
921 			ifp->if_capenable &= ~IFCAP_TXCSUM;
922 		else if (mask & IFCAP_TSO4)
923 			ifp->if_capenable |= IFCAP_TSO4;
924 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
925 	    && (ifp->if_capenable & IFCAP_TSO4)) {
926 		if (mask & IFCAP_TXCSUM) {
927 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
928 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
929 			device_printf(dev,
930 			    "TSO4 requires txcsum, disabling both...\n");
931 		} else if (mask & IFCAP_TSO4)
932 			ifp->if_capenable &= ~IFCAP_TSO4;
933 	}
934 
935 	/* Enable/disable TXCSUM_IPV6/TSO6 */
936 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
937 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
938 		if (mask & IFCAP_TXCSUM_IPV6) {
939 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
940 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
941 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
942 				ifp->if_capenable |= IFCAP_TSO6;
943 			}
944 		} else if (mask & IFCAP_TSO6) {
945 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
946 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
947 			device_printf(dev,
948 			    "TSO6 requires txcsum6, enabling both...\n");
949 		}
950 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
951 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
952 		if (mask & IFCAP_TXCSUM_IPV6)
953 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
954 		else if (mask & IFCAP_TSO6)
955 			ifp->if_capenable |= IFCAP_TSO6;
956 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
957 	    && (ifp->if_capenable & IFCAP_TSO6)) {
958 		if (mask & IFCAP_TXCSUM_IPV6) {
959 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
960 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
961 			device_printf(dev,
962 			    "TSO6 requires txcsum6, disabling both...\n");
963 		} else if (mask & IFCAP_TSO6)
964 			ifp->if_capenable &= ~IFCAP_TSO6;
965 	}
966 }
967 
968 /*********************************************************************
969  *  Ioctl entry point
970  *
971  *  ixl_ioctl is called when the user wants to configure the
972  *  interface.
973  *
974  *  return 0 on success, positive on failure
975  **********************************************************************/
976 
977 static int
978 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
979 {
980 	struct ixl_vsi	*vsi = ifp->if_softc;
981 	struct ixl_pf	*pf = vsi->back;
982 	struct ifreq	*ifr = (struct ifreq *) data;
983 #if defined(INET) || defined(INET6)
984 	struct ifaddr *ifa = (struct ifaddr *)data;
985 	bool		avoid_reset = FALSE;
986 #endif
987 	int             error = 0;
988 
989 	switch (command) {
990 
991         case SIOCSIFADDR:
992 #ifdef INET
993 		if (ifa->ifa_addr->sa_family == AF_INET)
994 			avoid_reset = TRUE;
995 #endif
996 #ifdef INET6
997 		if (ifa->ifa_addr->sa_family == AF_INET6)
998 			avoid_reset = TRUE;
999 #endif
1000 #if defined(INET) || defined(INET6)
1001 		/*
1002 		** Calling init results in link renegotiation,
1003 		** so we avoid doing it when possible.
1004 		*/
1005 		if (avoid_reset) {
1006 			ifp->if_flags |= IFF_UP;
1007 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1008 				ixl_init(pf);
1009 #ifdef INET
1010 			if (!(ifp->if_flags & IFF_NOARP))
1011 				arp_ifinit(ifp, ifa);
1012 #endif
1013 		} else
1014 			error = ether_ioctl(ifp, command, data);
1015 		break;
1016 #endif
1017 	case SIOCSIFMTU:
1018 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1019 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1020 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1021 			error = EINVAL;
1022 		} else {
1023 			IXL_PF_LOCK(pf);
1024 			ifp->if_mtu = ifr->ifr_mtu;
1025 			vsi->max_frame_size =
1026 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1027 			    + ETHER_VLAN_ENCAP_LEN;
1028 			ixl_init_locked(pf);
1029 			IXL_PF_UNLOCK(pf);
1030 		}
1031 		break;
1032 	case SIOCSIFFLAGS:
1033 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1034 		IXL_PF_LOCK(pf);
1035 		if (ifp->if_flags & IFF_UP) {
1036 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1037 				if ((ifp->if_flags ^ pf->if_flags) &
1038 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1039 					ixl_set_promisc(vsi);
1040 				}
1041 			} else
1042 				ixl_init_locked(pf);
1043 		} else
1044 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1045 				ixl_stop(pf);
1046 		pf->if_flags = ifp->if_flags;
1047 		IXL_PF_UNLOCK(pf);
1048 		break;
1049 	case SIOCADDMULTI:
1050 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1051 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1052 			IXL_PF_LOCK(pf);
1053 			ixl_disable_intr(vsi);
1054 			ixl_add_multi(vsi);
1055 			ixl_enable_intr(vsi);
1056 			IXL_PF_UNLOCK(pf);
1057 		}
1058 		break;
1059 	case SIOCDELMULTI:
1060 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1061 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1062 			IXL_PF_LOCK(pf);
1063 			ixl_disable_intr(vsi);
1064 			ixl_del_multi(vsi);
1065 			ixl_enable_intr(vsi);
1066 			IXL_PF_UNLOCK(pf);
1067 		}
1068 		break;
1069 	case SIOCSIFMEDIA:
1070 	case SIOCGIFMEDIA:
1071 #ifdef IFM_ETH_XTYPE
1072 	case SIOCGIFXMEDIA:
1073 #endif
1074 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1075 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1076 		break;
1077 	case SIOCSIFCAP:
1078 	{
1079 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1080 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1081 
1082 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1083 
1084 		if (mask & IFCAP_RXCSUM)
1085 			ifp->if_capenable ^= IFCAP_RXCSUM;
1086 		if (mask & IFCAP_RXCSUM_IPV6)
1087 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1088 		if (mask & IFCAP_LRO)
1089 			ifp->if_capenable ^= IFCAP_LRO;
1090 		if (mask & IFCAP_VLAN_HWTAGGING)
1091 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1092 		if (mask & IFCAP_VLAN_HWFILTER)
1093 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1094 		if (mask & IFCAP_VLAN_HWTSO)
1095 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1096 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1097 			IXL_PF_LOCK(pf);
1098 			ixl_init_locked(pf);
1099 			IXL_PF_UNLOCK(pf);
1100 		}
1101 		VLAN_CAPABILITIES(ifp);
1102 
1103 		break;
1104 	}
1105 
1106 	default:
1107 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1108 		error = ether_ioctl(ifp, command, data);
1109 		break;
1110 	}
1111 
1112 	return (error);
1113 }
1114 
1115 
1116 /*********************************************************************
1117  *  Init entry point
1118  *
1119  *  This routine is used in two ways. It is used by the stack as
1120  *  init entry point in network interface structure. It is also used
1121  *  by the driver as a hw/sw initialization routine to get to a
1122  *  consistent state.
1123  *
1124  *  return 0 on success, positive on failure
1125  **********************************************************************/
1126 
1127 static void
1128 ixl_init_locked(struct ixl_pf *pf)
1129 {
1130 	struct i40e_hw	*hw = &pf->hw;
1131 	struct ixl_vsi	*vsi = &pf->vsi;
1132 	struct ifnet	*ifp = vsi->ifp;
1133 	device_t 	dev = pf->dev;
1134 	struct i40e_filter_control_settings	filter;
1135 	u8		tmpaddr[ETHER_ADDR_LEN];
1136 	int		ret;
1137 
1138 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1139 	INIT_DEBUGOUT("ixl_init: begin");
1140 	ixl_stop(pf);
1141 
1142 	/* Get the latest mac address... User might use a LAA */
1143 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1144 	      I40E_ETH_LENGTH_OF_ADDRESS);
1145 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1146 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1147 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1148 		bcopy(tmpaddr, hw->mac.addr,
1149 		    I40E_ETH_LENGTH_OF_ADDRESS);
1150 		ret = i40e_aq_mac_address_write(hw,
1151 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1152 		    hw->mac.addr, NULL);
1153 		if (ret) {
1154 			device_printf(dev, "LLA address"
1155 			 "change failed!!\n");
1156 			return;
1157 		} else {
1158 			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1159 		}
1160 	}
1161 
1162 	/* Set the various hardware offload abilities */
1163 	ifp->if_hwassist = 0;
1164 	if (ifp->if_capenable & IFCAP_TSO)
1165 		ifp->if_hwassist |= CSUM_TSO;
1166 	if (ifp->if_capenable & IFCAP_TXCSUM)
1167 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1168 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1169 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1170 
1171 	/* Set up the device filtering */
1172 	bzero(&filter, sizeof(filter));
1173 	filter.enable_ethtype = TRUE;
1174 	filter.enable_macvlan = TRUE;
1175 #ifdef IXL_FDIR
1176 	filter.enable_fdir = TRUE;
1177 #endif
1178 	if (i40e_set_filter_control(hw, &filter))
1179 		device_printf(dev, "set_filter_control() failed\n");
1180 
1181 	/* Set up RSS */
1182 	ixl_config_rss(vsi);
1183 
1184 	/*
1185 	** Prepare the VSI: rings, hmc contexts, etc...
1186 	*/
1187 	if (ixl_initialize_vsi(vsi)) {
1188 		device_printf(dev, "initialize vsi failed!!\n");
1189 		return;
1190 	}
1191 
1192 	/* Add protocol filters to list */
1193 	ixl_init_filters(vsi);
1194 
1195 	/* Setup vlan's if needed */
1196 	ixl_setup_vlan_filters(vsi);
1197 
1198 	/* Start the local timer */
1199 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1200 
1201 	/* Set up MSI/X routing and the ITR settings */
1202 	if (ixl_enable_msix) {
1203 		ixl_configure_msix(pf);
1204 		ixl_configure_itr(pf);
1205 	} else
1206 		ixl_configure_legacy(pf);
1207 
1208 	ixl_enable_rings(vsi);
1209 
1210 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1211 
1212 	ixl_reconfigure_filters(vsi);
1213 
1214 	/* Set MTU in hardware*/
1215 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1216 	    TRUE, 0, NULL);
1217 	if (aq_error)
1218 		device_printf(vsi->dev,
1219 			"aq_set_mac_config in init error, code %d\n",
1220 		    aq_error);
1221 
1222 	/* And now turn on interrupts */
1223 	ixl_enable_intr(vsi);
1224 
1225 	/* Now inform the stack we're ready */
1226 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1227 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1228 
1229 	return;
1230 }
1231 
1232 static void
1233 ixl_init(void *arg)
1234 {
1235 	struct ixl_pf *pf = arg;
1236 
1237 	IXL_PF_LOCK(pf);
1238 	ixl_init_locked(pf);
1239 	IXL_PF_UNLOCK(pf);
1240 	return;
1241 }
1242 
1243 /*
1244 **
1245 ** MSIX Interrupt Handlers and Tasklets
1246 **
1247 */
1248 static void
1249 ixl_handle_que(void *context, int pending)
1250 {
1251 	struct ixl_queue *que = context;
1252 	struct ixl_vsi *vsi = que->vsi;
1253 	struct i40e_hw  *hw = vsi->hw;
1254 	struct tx_ring  *txr = &que->txr;
1255 	struct ifnet    *ifp = vsi->ifp;
1256 	bool		more;
1257 
1258 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1259 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1260 		IXL_TX_LOCK(txr);
1261 		ixl_txeof(que);
1262 		if (!drbr_empty(ifp, txr->br))
1263 			ixl_mq_start_locked(ifp, txr);
1264 		IXL_TX_UNLOCK(txr);
1265 		if (more) {
1266 			taskqueue_enqueue(que->tq, &que->task);
1267 			return;
1268 		}
1269 	}
1270 
1271 	/* Reenable this interrupt - hmmm */
1272 	ixl_enable_queue(hw, que->me);
1273 	return;
1274 }
1275 
1276 
1277 /*********************************************************************
1278  *
1279  *  Legacy Interrupt Service routine
1280  *
1281  **********************************************************************/
1282 void
1283 ixl_intr(void *arg)
1284 {
1285 	struct ixl_pf		*pf = arg;
1286 	struct i40e_hw		*hw =  &pf->hw;
1287 	struct ixl_vsi		*vsi = &pf->vsi;
1288 	struct ixl_queue	*que = vsi->queues;
1289 	struct ifnet		*ifp = vsi->ifp;
1290 	struct tx_ring		*txr = &que->txr;
1291         u32			reg, icr0, mask;
1292 	bool			more_tx, more_rx;
1293 
1294 	++que->irqs;
1295 
1296 	/* Protect against spurious interrupts */
1297 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1298 		return;
1299 
1300 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1301 
1302 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1303 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1304 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1305 
1306         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1307 
1308 #ifdef PCI_IOV
1309 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1310 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1311 #endif
1312 
1313 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1314 		taskqueue_enqueue(pf->tq, &pf->adminq);
1315 		return;
1316 	}
1317 
1318 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1319 
1320 	IXL_TX_LOCK(txr);
1321 	more_tx = ixl_txeof(que);
1322 	if (!drbr_empty(vsi->ifp, txr->br))
1323 		more_tx = 1;
1324 	IXL_TX_UNLOCK(txr);
1325 
1326 	/* re-enable other interrupt causes */
1327 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1328 
1329 	/* And now the queues */
1330 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1331 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1332 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1333 
1334 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1335 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1336 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1337 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1338 
1339 	ixl_enable_legacy(hw);
1340 
1341 	return;
1342 }
1343 
1344 
1345 /*********************************************************************
1346  *
1347  *  MSIX VSI Interrupt Service routine
1348  *
1349  **********************************************************************/
1350 void
1351 ixl_msix_que(void *arg)
1352 {
1353 	struct ixl_queue	*que = arg;
1354 	struct ixl_vsi	*vsi = que->vsi;
1355 	struct i40e_hw	*hw = vsi->hw;
1356 	struct tx_ring	*txr = &que->txr;
1357 	bool		more_tx, more_rx;
1358 
1359 	/* Protect against spurious interrupts */
1360 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1361 		return;
1362 
1363 	++que->irqs;
1364 
1365 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1366 
1367 	IXL_TX_LOCK(txr);
1368 	more_tx = ixl_txeof(que);
1369 	/*
1370 	** Make certain that if the stack
1371 	** has anything queued the task gets
1372 	** scheduled to handle it.
1373 	*/
1374 	if (!drbr_empty(vsi->ifp, txr->br))
1375 		more_tx = 1;
1376 	IXL_TX_UNLOCK(txr);
1377 
1378 	ixl_set_queue_rx_itr(que);
1379 	ixl_set_queue_tx_itr(que);
1380 
1381 	if (more_tx || more_rx)
1382 		taskqueue_enqueue(que->tq, &que->task);
1383 	else
1384 		ixl_enable_queue(hw, que->me);
1385 
1386 	return;
1387 }
1388 
1389 
1390 /*********************************************************************
1391  *
1392  *  MSIX Admin Queue Interrupt Service routine
1393  *
1394  **********************************************************************/
1395 static void
1396 ixl_msix_adminq(void *arg)
1397 {
1398 	struct ixl_pf	*pf = arg;
1399 	struct i40e_hw	*hw = &pf->hw;
1400 	u32		reg, mask;
1401 
1402 	++pf->admin_irq;
1403 
1404 	reg = rd32(hw, I40E_PFINT_ICR0);
1405 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1406 
1407 	/* Check on the cause */
1408 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1409 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1410 
1411 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1412 		ixl_handle_mdd_event(pf);
1413 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1414 	}
1415 
1416 #ifdef PCI_IOV
1417 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1418 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1419 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1420 	}
1421 #endif
1422 
1423 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1424 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1425 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1426 
1427 	taskqueue_enqueue(pf->tq, &pf->adminq);
1428 	return;
1429 }
1430 
1431 /*********************************************************************
1432  *
1433  *  Media Ioctl callback
1434  *
1435  *  This routine is called whenever the user queries the status of
1436  *  the interface using ifconfig.
1437  *
1438  **********************************************************************/
1439 static void
1440 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1441 {
1442 	struct ixl_vsi	*vsi = ifp->if_softc;
1443 	struct ixl_pf	*pf = vsi->back;
1444 	struct i40e_hw  *hw = &pf->hw;
1445 
1446 	INIT_DEBUGOUT("ixl_media_status: begin");
1447 	IXL_PF_LOCK(pf);
1448 
1449 	hw->phy.get_link_info = TRUE;
1450 	i40e_get_link_status(hw, &pf->link_up);
1451 	ixl_update_link_status(pf);
1452 
1453 	ifmr->ifm_status = IFM_AVALID;
1454 	ifmr->ifm_active = IFM_ETHER;
1455 
1456 	if (!pf->link_up) {
1457 		IXL_PF_UNLOCK(pf);
1458 		return;
1459 	}
1460 
1461 	ifmr->ifm_status |= IFM_ACTIVE;
1462 	/* Hardware is always full-duplex */
1463 	ifmr->ifm_active |= IFM_FDX;
1464 
1465 	switch (hw->phy.link_info.phy_type) {
1466 		/* 100 M */
1467 		case I40E_PHY_TYPE_100BASE_TX:
1468 			ifmr->ifm_active |= IFM_100_TX;
1469 			break;
1470 		/* 1 G */
1471 		case I40E_PHY_TYPE_1000BASE_T:
1472 			ifmr->ifm_active |= IFM_1000_T;
1473 			break;
1474 		case I40E_PHY_TYPE_1000BASE_SX:
1475 			ifmr->ifm_active |= IFM_1000_SX;
1476 			break;
1477 		case I40E_PHY_TYPE_1000BASE_LX:
1478 			ifmr->ifm_active |= IFM_1000_LX;
1479 			break;
1480 		/* 10 G */
1481 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1482 			ifmr->ifm_active |= IFM_10G_TWINAX;
1483 			break;
1484 		case I40E_PHY_TYPE_10GBASE_SR:
1485 			ifmr->ifm_active |= IFM_10G_SR;
1486 			break;
1487 		case I40E_PHY_TYPE_10GBASE_LR:
1488 			ifmr->ifm_active |= IFM_10G_LR;
1489 			break;
1490 		case I40E_PHY_TYPE_10GBASE_T:
1491 			ifmr->ifm_active |= IFM_10G_T;
1492 			break;
1493 		/* 40 G */
1494 		case I40E_PHY_TYPE_40GBASE_CR4:
1495 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1496 			ifmr->ifm_active |= IFM_40G_CR4;
1497 			break;
1498 		case I40E_PHY_TYPE_40GBASE_SR4:
1499 			ifmr->ifm_active |= IFM_40G_SR4;
1500 			break;
1501 		case I40E_PHY_TYPE_40GBASE_LR4:
1502 			ifmr->ifm_active |= IFM_40G_LR4;
1503 			break;
1504 #ifndef IFM_ETH_XTYPE
1505 		case I40E_PHY_TYPE_1000BASE_KX:
1506 			ifmr->ifm_active |= IFM_1000_CX;
1507 			break;
1508 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1509 		case I40E_PHY_TYPE_10GBASE_CR1:
1510 			ifmr->ifm_active |= IFM_10G_TWINAX;
1511 			break;
1512 		case I40E_PHY_TYPE_10GBASE_KX4:
1513 			ifmr->ifm_active |= IFM_10G_CX4;
1514 			break;
1515 		case I40E_PHY_TYPE_10GBASE_KR:
1516 			ifmr->ifm_active |= IFM_10G_SR;
1517 			break;
1518 		case I40E_PHY_TYPE_40GBASE_KR4:
1519 		case I40E_PHY_TYPE_XLPPI:
1520 			ifmr->ifm_active |= IFM_40G_SR4;
1521 			break;
1522 #else
1523 		case I40E_PHY_TYPE_1000BASE_KX:
1524 			ifmr->ifm_active |= IFM_1000_KX;
1525 			break;
1526 		/* ERJ: What's the difference between these? */
1527 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1528 		case I40E_PHY_TYPE_10GBASE_CR1:
1529 			ifmr->ifm_active |= IFM_10G_CR1;
1530 			break;
1531 		case I40E_PHY_TYPE_10GBASE_KX4:
1532 			ifmr->ifm_active |= IFM_10G_KX4;
1533 			break;
1534 		case I40E_PHY_TYPE_10GBASE_KR:
1535 			ifmr->ifm_active |= IFM_10G_KR;
1536 			break;
1537 		case I40E_PHY_TYPE_20GBASE_KR2:
1538 			ifmr->ifm_active |= IFM_20G_KR2;
1539 			break;
1540 		case I40E_PHY_TYPE_40GBASE_KR4:
1541 			ifmr->ifm_active |= IFM_40G_KR4;
1542 			break;
1543 		case I40E_PHY_TYPE_XLPPI:
1544 			ifmr->ifm_active |= IFM_40G_XLPPI;
1545 			break;
1546 #endif
1547 		default:
1548 			ifmr->ifm_active |= IFM_UNKNOWN;
1549 			break;
1550 	}
1551 	/* Report flow control status as well */
1552 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1553 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1554 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1555 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1556 
1557 	IXL_PF_UNLOCK(pf);
1558 
1559 	return;
1560 }
1561 
1562 /*********************************************************************
1563  *
1564  *  Media Ioctl callback
1565  *
1566  *  This routine is called when the user changes speed/duplex using
1567  *  media/mediopt option with ifconfig.
1568  *
1569  **********************************************************************/
1570 static int
1571 ixl_media_change(struct ifnet * ifp)
1572 {
1573 	struct ixl_vsi *vsi = ifp->if_softc;
1574 	struct ifmedia *ifm = &vsi->media;
1575 
1576 	INIT_DEBUGOUT("ixl_media_change: begin");
1577 
1578 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1579 		return (EINVAL);
1580 
1581 	if_printf(ifp, "Media change is currently not supported.\n");
1582 
1583 	return (ENODEV);
1584 }
1585 
1586 
1587 #ifdef IXL_FDIR
1588 /*
1589 ** ATR: Application Targetted Receive - creates a filter
1590 **	based on TX flow info that will keep the receive
1591 **	portion of the flow on the same queue. Based on the
1592 **	implementation this is only available for TCP connections
1593 */
1594 void
1595 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1596 {
1597 	struct ixl_vsi			*vsi = que->vsi;
1598 	struct tx_ring			*txr = &que->txr;
1599 	struct i40e_filter_program_desc	*FDIR;
1600 	u32				ptype, dtype;
1601 	int				idx;
1602 
1603 	/* check if ATR is enabled and sample rate */
1604 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1605 		return;
1606 	/*
1607 	** We sample all TCP SYN/FIN packets,
1608 	** or at the selected sample rate
1609 	*/
1610 	txr->atr_count++;
1611 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1612 	    (txr->atr_count < txr->atr_rate))
1613                 return;
1614 	txr->atr_count = 0;
1615 
1616 	/* Get a descriptor to use */
1617 	idx = txr->next_avail;
1618 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1619 	if (++idx == que->num_desc)
1620 		idx = 0;
1621 	txr->avail--;
1622 	txr->next_avail = idx;
1623 
1624 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1625 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1626 
1627 	ptype |= (etype == ETHERTYPE_IP) ?
1628 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1629 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1630 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1631 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1632 
1633 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1634 
1635 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1636 
1637 	/*
1638 	** We use the TCP TH_FIN as a trigger to remove
1639 	** the filter, otherwise its an update.
1640 	*/
1641 	dtype |= (th->th_flags & TH_FIN) ?
1642 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1643 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1644 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1645 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1646 
1647 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1648 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1649 
1650 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1651 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1652 
1653 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1654 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1655 	return;
1656 }
1657 #endif
1658 
1659 
1660 static void
1661 ixl_set_promisc(struct ixl_vsi *vsi)
1662 {
1663 	struct ifnet	*ifp = vsi->ifp;
1664 	struct i40e_hw	*hw = vsi->hw;
1665 	int		err, mcnt = 0;
1666 	bool		uni = FALSE, multi = FALSE;
1667 
1668 	if (ifp->if_flags & IFF_ALLMULTI)
1669                 multi = TRUE;
1670 	else { /* Need to count the multicast addresses */
1671 		struct  ifmultiaddr *ifma;
1672 		if_maddr_rlock(ifp);
1673 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1674                         if (ifma->ifma_addr->sa_family != AF_LINK)
1675                                 continue;
1676                         if (mcnt == MAX_MULTICAST_ADDR)
1677                                 break;
1678                         mcnt++;
1679 		}
1680 		if_maddr_runlock(ifp);
1681 	}
1682 
1683 	if (mcnt >= MAX_MULTICAST_ADDR)
1684                 multi = TRUE;
1685         if (ifp->if_flags & IFF_PROMISC)
1686 		uni = TRUE;
1687 
1688 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1689 	    vsi->seid, uni, NULL);
1690 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1691 	    vsi->seid, multi, NULL);
1692 	return;
1693 }
1694 
1695 /*********************************************************************
1696  * 	Filter Routines
1697  *
1698  *	Routines for multicast and vlan filter management.
1699  *
1700  *********************************************************************/
1701 static void
1702 ixl_add_multi(struct ixl_vsi *vsi)
1703 {
1704 	struct	ifmultiaddr	*ifma;
1705 	struct ifnet		*ifp = vsi->ifp;
1706 	struct i40e_hw		*hw = vsi->hw;
1707 	int			mcnt = 0, flags;
1708 
1709 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1710 
1711 	if_maddr_rlock(ifp);
1712 	/*
1713 	** First just get a count, to decide if we
1714 	** we simply use multicast promiscuous.
1715 	*/
1716 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1717 		if (ifma->ifma_addr->sa_family != AF_LINK)
1718 			continue;
1719 		mcnt++;
1720 	}
1721 	if_maddr_runlock(ifp);
1722 
1723 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1724 		/* delete existing MC filters */
1725 		ixl_del_hw_filters(vsi, mcnt);
1726 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1727 		    vsi->seid, TRUE, NULL);
1728 		return;
1729 	}
1730 
1731 	mcnt = 0;
1732 	if_maddr_rlock(ifp);
1733 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1734 		if (ifma->ifma_addr->sa_family != AF_LINK)
1735 			continue;
1736 		ixl_add_mc_filter(vsi,
1737 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1738 		mcnt++;
1739 	}
1740 	if_maddr_runlock(ifp);
1741 	if (mcnt > 0) {
1742 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1743 		ixl_add_hw_filters(vsi, flags, mcnt);
1744 	}
1745 
1746 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1747 	return;
1748 }
1749 
1750 static void
1751 ixl_del_multi(struct ixl_vsi *vsi)
1752 {
1753 	struct ifnet		*ifp = vsi->ifp;
1754 	struct ifmultiaddr	*ifma;
1755 	struct ixl_mac_filter	*f;
1756 	int			mcnt = 0;
1757 	bool		match = FALSE;
1758 
1759 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1760 
1761 	/* Search for removed multicast addresses */
1762 	if_maddr_rlock(ifp);
1763 	SLIST_FOREACH(f, &vsi->ftl, next) {
1764 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1765 			match = FALSE;
1766 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1767 				if (ifma->ifma_addr->sa_family != AF_LINK)
1768 					continue;
1769 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1770 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1771 					match = TRUE;
1772 					break;
1773 				}
1774 			}
1775 			if (match == FALSE) {
1776 				f->flags |= IXL_FILTER_DEL;
1777 				mcnt++;
1778 			}
1779 		}
1780 	}
1781 	if_maddr_runlock(ifp);
1782 
1783 	if (mcnt > 0)
1784 		ixl_del_hw_filters(vsi, mcnt);
1785 }
1786 
1787 
1788 /*********************************************************************
1789  *  Timer routine
1790  *
1791  *  This routine checks for link status,updates statistics,
1792  *  and runs the watchdog check.
1793  *
1794  **********************************************************************/
1795 
1796 static void
1797 ixl_local_timer(void *arg)
1798 {
1799 	struct ixl_pf		*pf = arg;
1800 	struct i40e_hw		*hw = &pf->hw;
1801 	struct ixl_vsi		*vsi = &pf->vsi;
1802 	struct ixl_queue	*que = vsi->queues;
1803 	device_t		dev = pf->dev;
1804 	int			hung = 0;
1805 	u32			mask;
1806 
1807 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1808 
1809 	/* Fire off the adminq task */
1810 	taskqueue_enqueue(pf->tq, &pf->adminq);
1811 
1812 	/* Update stats */
1813 	ixl_update_stats_counters(pf);
1814 
1815 	/*
1816 	** Check status of the queues
1817 	*/
1818 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1819 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1820 
1821 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1822 		/* Any queues with outstanding work get a sw irq */
1823 		if (que->busy)
1824 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1825 		/*
1826 		** Each time txeof runs without cleaning, but there
1827 		** are uncleaned descriptors it increments busy. If
1828 		** we get to 5 we declare it hung.
1829 		*/
1830 		if (que->busy == IXL_QUEUE_HUNG) {
1831 			++hung;
1832 			/* Mark the queue as inactive */
1833 			vsi->active_queues &= ~((u64)1 << que->me);
1834 			continue;
1835 		} else {
1836 			/* Check if we've come back from hung */
1837 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1838 				vsi->active_queues |= ((u64)1 << que->me);
1839 		}
1840 		if (que->busy >= IXL_MAX_TX_BUSY) {
1841 #ifdef IXL_DEBUG
1842 			device_printf(dev,"Warning queue %d "
1843 			    "appears to be hung!\n", i);
1844 #endif
1845 			que->busy = IXL_QUEUE_HUNG;
1846 			++hung;
1847 		}
1848 	}
1849 	/* Only reinit if all queues show hung */
1850 	if (hung == vsi->num_queues)
1851 		goto hung;
1852 
1853 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1854 	return;
1855 
1856 hung:
1857 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1858 	ixl_init_locked(pf);
1859 }
1860 
1861 /*
1862 ** Note: this routine updates the OS on the link state
1863 **	the real check of the hardware only happens with
1864 **	a link interrupt.
1865 */
1866 static void
1867 ixl_update_link_status(struct ixl_pf *pf)
1868 {
1869 	struct ixl_vsi		*vsi = &pf->vsi;
1870 	struct i40e_hw		*hw = &pf->hw;
1871 	struct ifnet		*ifp = vsi->ifp;
1872 	device_t		dev = pf->dev;
1873 
1874 	if (pf->link_up){
1875 		if (vsi->link_active == FALSE) {
1876 			pf->fc = hw->fc.current_mode;
1877 			if (bootverbose) {
1878 				device_printf(dev,"Link is up %d Gbps %s,"
1879 				    " Flow Control: %s\n",
1880 				    ((pf->link_speed ==
1881 				    I40E_LINK_SPEED_40GB)? 40:10),
1882 				    "Full Duplex", ixl_fc_string[pf->fc]);
1883 			}
1884 			vsi->link_active = TRUE;
1885 			/*
1886 			** Warn user if link speed on NPAR enabled
1887 			** partition is not at least 10GB
1888 			*/
1889 			if (hw->func_caps.npar_enable &&
1890 			   (hw->phy.link_info.link_speed ==
1891 			   I40E_LINK_SPEED_1GB ||
1892 			   hw->phy.link_info.link_speed ==
1893 			   I40E_LINK_SPEED_100MB))
1894 				device_printf(dev, "The partition detected"
1895 				    "link speed that is less than 10Gbps\n");
1896 			if_link_state_change(ifp, LINK_STATE_UP);
1897 		}
1898 	} else { /* Link down */
1899 		if (vsi->link_active == TRUE) {
1900 			if (bootverbose)
1901 				device_printf(dev,"Link is Down\n");
1902 			if_link_state_change(ifp, LINK_STATE_DOWN);
1903 			vsi->link_active = FALSE;
1904 		}
1905 	}
1906 
1907 	return;
1908 }
1909 
1910 /*********************************************************************
1911  *
1912  *  This routine disables all traffic on the adapter by issuing a
1913  *  global reset on the MAC and deallocates TX/RX buffers.
1914  *
1915  **********************************************************************/
1916 
1917 static void
1918 ixl_stop(struct ixl_pf *pf)
1919 {
1920 	struct ixl_vsi	*vsi = &pf->vsi;
1921 	struct ifnet	*ifp = vsi->ifp;
1922 
1923 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1924 
1925 	INIT_DEBUGOUT("ixl_stop: begin\n");
1926 	if (pf->num_vfs == 0)
1927 		ixl_disable_intr(vsi);
1928 	else
1929 		ixl_disable_rings_intr(vsi);
1930 	ixl_disable_rings(vsi);
1931 
1932 	/* Tell the stack that the interface is no longer active */
1933 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1934 
1935 	/* Stop the local timer */
1936 	callout_stop(&pf->timer);
1937 
1938 	return;
1939 }
1940 
1941 
1942 /*********************************************************************
1943  *
1944  *  Setup MSIX Interrupt resources and handlers for the VSI
1945  *
1946  **********************************************************************/
1947 static int
1948 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1949 {
1950 	device_t        dev = pf->dev;
1951 	struct 		ixl_vsi *vsi = &pf->vsi;
1952 	struct		ixl_queue *que = vsi->queues;
1953 	int 		error, rid = 0;
1954 
1955 	if (pf->msix == 1)
1956 		rid = 1;
1957 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1958 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1959 	if (pf->res == NULL) {
1960 		device_printf(dev,"Unable to allocate"
1961 		    " bus resource: vsi legacy/msi interrupt\n");
1962 		return (ENXIO);
1963 	}
1964 
1965 	/* Set the handler function */
1966 	error = bus_setup_intr(dev, pf->res,
1967 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1968 	    ixl_intr, pf, &pf->tag);
1969 	if (error) {
1970 		pf->res = NULL;
1971 		device_printf(dev, "Failed to register legacy/msi handler");
1972 		return (error);
1973 	}
1974 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1975 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1976 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1977 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1978 	    taskqueue_thread_enqueue, &que->tq);
1979 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1980 	    device_get_nameunit(dev));
1981 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1982 
1983 #ifdef PCI_IOV
1984 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1985 #endif
1986 
1987 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1988 	    taskqueue_thread_enqueue, &pf->tq);
1989 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1990 	    device_get_nameunit(dev));
1991 
1992 	return (0);
1993 }
1994 
1995 
1996 /*********************************************************************
1997  *
1998  *  Setup MSIX Interrupt resources and handlers for the VSI
1999  *
2000  **********************************************************************/
2001 static int
2002 ixl_assign_vsi_msix(struct ixl_pf *pf)
2003 {
2004 	device_t	dev = pf->dev;
2005 	struct 		ixl_vsi *vsi = &pf->vsi;
2006 	struct 		ixl_queue *que = vsi->queues;
2007 	struct		tx_ring	 *txr;
2008 	int 		error, rid, vector = 0;
2009 #ifdef	RSS
2010 	cpuset_t cpu_mask;
2011 #endif
2012 
2013 	/* Admin Que is vector 0*/
2014 	rid = vector + 1;
2015 	pf->res = bus_alloc_resource_any(dev,
2016     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2017 	if (!pf->res) {
2018 		device_printf(dev,"Unable to allocate"
2019     	    " bus resource: Adminq interrupt [%d]\n", rid);
2020 		return (ENXIO);
2021 	}
2022 	/* Set the adminq vector and handler */
2023 	error = bus_setup_intr(dev, pf->res,
2024 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2025 	    ixl_msix_adminq, pf, &pf->tag);
2026 	if (error) {
2027 		pf->res = NULL;
2028 		device_printf(dev, "Failed to register Admin que handler");
2029 		return (error);
2030 	}
2031 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2032 	pf->admvec = vector;
2033 	/* Tasklet for Admin Queue */
2034 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2035 
2036 #ifdef PCI_IOV
2037 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2038 #endif
2039 
2040 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2041 	    taskqueue_thread_enqueue, &pf->tq);
2042 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2043 	    device_get_nameunit(pf->dev));
2044 	++vector;
2045 
2046 	/* Now set up the stations */
2047 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2048 		int cpu_id = i;
2049 		rid = vector + 1;
2050 		txr = &que->txr;
2051 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2052 		    RF_SHAREABLE | RF_ACTIVE);
2053 		if (que->res == NULL) {
2054 			device_printf(dev,"Unable to allocate"
2055 		    	    " bus resource: que interrupt [%d]\n", vector);
2056 			return (ENXIO);
2057 		}
2058 		/* Set the handler function */
2059 		error = bus_setup_intr(dev, que->res,
2060 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2061 		    ixl_msix_que, que, &que->tag);
2062 		if (error) {
2063 			que->res = NULL;
2064 			device_printf(dev, "Failed to register que handler");
2065 			return (error);
2066 		}
2067 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2068 		/* Bind the vector to a CPU */
2069 #ifdef RSS
2070 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2071 #endif
2072 		bus_bind_intr(dev, que->res, cpu_id);
2073 		que->msix = vector;
2074 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2075 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2076 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2077 		    taskqueue_thread_enqueue, &que->tq);
2078 #ifdef RSS
2079 		CPU_SETOF(cpu_id, &cpu_mask);
2080 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2081 		    &cpu_mask, "%s (bucket %d)",
2082 		    device_get_nameunit(dev), cpu_id);
2083 #else
2084 		taskqueue_start_threads(&que->tq, 1, PI_NET,
2085 		    "%s que", device_get_nameunit(dev));
2086 #endif
2087 	}
2088 
2089 	return (0);
2090 }
2091 
2092 
2093 /*
2094  * Allocate MSI/X vectors
2095  */
2096 static int
2097 ixl_init_msix(struct ixl_pf *pf)
2098 {
2099 	device_t dev = pf->dev;
2100 	int rid, want, vectors, queues, available;
2101 
2102 	/* Override by tuneable */
2103 	if (ixl_enable_msix == 0)
2104 		goto msi;
2105 
2106 	/*
2107 	** When used in a virtualized environment
2108 	** PCI BUSMASTER capability may not be set
2109 	** so explicity set it here and rewrite
2110 	** the ENABLE in the MSIX control register
2111 	** at this point to cause the host to
2112 	** successfully initialize us.
2113 	*/
2114 	{
2115 		u16 pci_cmd_word;
2116 		int msix_ctrl;
2117 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2118 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2119 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2120 		pci_find_cap(dev, PCIY_MSIX, &rid);
2121 		rid += PCIR_MSIX_CTRL;
2122 		msix_ctrl = pci_read_config(dev, rid, 2);
2123 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2124 		pci_write_config(dev, rid, msix_ctrl, 2);
2125 	}
2126 
2127 	/* First try MSI/X */
2128 	rid = PCIR_BAR(IXL_BAR);
2129 	pf->msix_mem = bus_alloc_resource_any(dev,
2130 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2131        	if (!pf->msix_mem) {
2132 		/* May not be enabled */
2133 		device_printf(pf->dev,
2134 		    "Unable to map MSIX table \n");
2135 		goto msi;
2136 	}
2137 
2138 	available = pci_msix_count(dev);
2139 	if (available == 0) { /* system has msix disabled */
2140 		bus_release_resource(dev, SYS_RES_MEMORY,
2141 		    rid, pf->msix_mem);
2142 		pf->msix_mem = NULL;
2143 		goto msi;
2144 	}
2145 
2146 	/* Figure out a reasonable auto config value */
2147 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2148 
2149 	/* Override with hardcoded value if sane */
2150 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2151 		queues = ixl_max_queues;
2152 
2153 #ifdef  RSS
2154 	/* If we're doing RSS, clamp at the number of RSS buckets */
2155 	if (queues > rss_getnumbuckets())
2156 		queues = rss_getnumbuckets();
2157 #endif
2158 
2159 	/*
2160 	** Want one vector (RX/TX pair) per queue
2161 	** plus an additional for the admin queue.
2162 	*/
2163 	want = queues + 1;
2164 	if (want <= available)	/* Have enough */
2165 		vectors = want;
2166 	else {
2167                	device_printf(pf->dev,
2168 		    "MSIX Configuration Problem, "
2169 		    "%d vectors available but %d wanted!\n",
2170 		    available, want);
2171 		return (0); /* Will go to Legacy setup */
2172 	}
2173 
2174 	if (pci_alloc_msix(dev, &vectors) == 0) {
2175                	device_printf(pf->dev,
2176 		    "Using MSIX interrupts with %d vectors\n", vectors);
2177 		pf->msix = vectors;
2178 		pf->vsi.num_queues = queues;
2179 #ifdef RSS
2180 		/*
2181 		 * If we're doing RSS, the number of queues needs to
2182 		 * match the number of RSS buckets that are configured.
2183 		 *
2184 		 * + If there's more queues than RSS buckets, we'll end
2185 		 *   up with queues that get no traffic.
2186 		 *
2187 		 * + If there's more RSS buckets than queues, we'll end
2188 		 *   up having multiple RSS buckets map to the same queue,
2189 		 *   so there'll be some contention.
2190 		 */
2191 		if (queues != rss_getnumbuckets()) {
2192 			device_printf(dev,
2193 			    "%s: queues (%d) != RSS buckets (%d)"
2194 			    "; performance will be impacted.\n",
2195 			    __func__, queues, rss_getnumbuckets());
2196 		}
2197 #endif
2198 		return (vectors);
2199 	}
2200 msi:
2201        	vectors = pci_msi_count(dev);
2202 	pf->vsi.num_queues = 1;
2203 	pf->msix = 1;
2204 	ixl_max_queues = 1;
2205 	ixl_enable_msix = 0;
2206        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2207                	device_printf(pf->dev,"Using an MSI interrupt\n");
2208 	else {
2209 		pf->msix = 0;
2210                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2211 	}
2212 	return (vectors);
2213 }
2214 
2215 
2216 /*
2217  * Plumb MSI/X vectors
2218  */
2219 static void
2220 ixl_configure_msix(struct ixl_pf *pf)
2221 {
2222 	struct i40e_hw	*hw = &pf->hw;
2223 	struct ixl_vsi *vsi = &pf->vsi;
2224 	u32		reg;
2225 	u16		vector = 1;
2226 
2227 	/* First set up the adminq - vector 0 */
2228 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2229 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2230 
2231 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2232 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2233 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2234 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2235 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2236 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2237 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2238 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2239 
2240 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2241 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2242 
2243 	wr32(hw, I40E_PFINT_DYN_CTL0,
2244 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2245 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2246 
2247 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2248 
2249 	/* Next configure the queues */
2250 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2251 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2252 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2253 
2254 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2255 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2256 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2257 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2258 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2259 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2260 
2261 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2262 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2263 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2264 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2265 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2266 		if (i == (vsi->num_queues - 1))
2267 			reg |= (IXL_QUEUE_EOL
2268 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2269 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2270 	}
2271 }
2272 
2273 /*
2274  * Configure for MSI single vector operation
2275  */
2276 static void
2277 ixl_configure_legacy(struct ixl_pf *pf)
2278 {
2279 	struct i40e_hw	*hw = &pf->hw;
2280 	u32		reg;
2281 
2282 
2283 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2284 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2285 
2286 
2287 	/* Setup "other" causes */
2288 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2289 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2290 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2291 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2292 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2293 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2294 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2295 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2296 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2297 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2298 	    ;
2299 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2300 
2301 	/* SW_ITR_IDX = 0, but don't change INTENA */
2302 	wr32(hw, I40E_PFINT_DYN_CTL0,
2303 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2304 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2305 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2306 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2307 
2308 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2309 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2310 
2311 	/* Associate the queue pair to the vector and enable the q int */
2312 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2313 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2314 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2315 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2316 
2317 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2318 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2319 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2320 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2321 
2322 	/* Next enable the queue pair */
2323 	reg = rd32(hw, I40E_QTX_ENA(0));
2324 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2325 	wr32(hw, I40E_QTX_ENA(0), reg);
2326 
2327 	reg = rd32(hw, I40E_QRX_ENA(0));
2328 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2329 	wr32(hw, I40E_QRX_ENA(0), reg);
2330 }
2331 
2332 
2333 /*
2334  * Set the Initial ITR state
2335  */
2336 static void
2337 ixl_configure_itr(struct ixl_pf *pf)
2338 {
2339 	struct i40e_hw		*hw = &pf->hw;
2340 	struct ixl_vsi		*vsi = &pf->vsi;
2341 	struct ixl_queue	*que = vsi->queues;
2342 
2343 	vsi->rx_itr_setting = ixl_rx_itr;
2344 	if (ixl_dynamic_rx_itr)
2345 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2346 	vsi->tx_itr_setting = ixl_tx_itr;
2347 	if (ixl_dynamic_tx_itr)
2348 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2349 
2350 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2351 		struct tx_ring	*txr = &que->txr;
2352 		struct rx_ring 	*rxr = &que->rxr;
2353 
2354 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2355 		    vsi->rx_itr_setting);
2356 		rxr->itr = vsi->rx_itr_setting;
2357 		rxr->latency = IXL_AVE_LATENCY;
2358 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2359 		    vsi->tx_itr_setting);
2360 		txr->itr = vsi->tx_itr_setting;
2361 		txr->latency = IXL_AVE_LATENCY;
2362 	}
2363 }
2364 
2365 
2366 static int
2367 ixl_allocate_pci_resources(struct ixl_pf *pf)
2368 {
2369 	int             rid;
2370 	device_t        dev = pf->dev;
2371 
2372 	rid = PCIR_BAR(0);
2373 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2374 	    &rid, RF_ACTIVE);
2375 
2376 	if (!(pf->pci_mem)) {
2377 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2378 		return (ENXIO);
2379 	}
2380 
2381 	pf->osdep.mem_bus_space_tag =
2382 		rman_get_bustag(pf->pci_mem);
2383 	pf->osdep.mem_bus_space_handle =
2384 		rman_get_bushandle(pf->pci_mem);
2385 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2386 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2387 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2388 
2389 	pf->hw.back = &pf->osdep;
2390 
2391 	/*
2392 	** Now setup MSI or MSI/X, should
2393 	** return us the number of supported
2394 	** vectors. (Will be 1 for MSI)
2395 	*/
2396 	pf->msix = ixl_init_msix(pf);
2397 	return (0);
2398 }
2399 
2400 static void
2401 ixl_free_pci_resources(struct ixl_pf * pf)
2402 {
2403 	struct ixl_vsi		*vsi = &pf->vsi;
2404 	struct ixl_queue	*que = vsi->queues;
2405 	device_t		dev = pf->dev;
2406 	int			rid, memrid;
2407 
2408 	memrid = PCIR_BAR(IXL_BAR);
2409 
2410 	/* We may get here before stations are setup */
2411 	if ((!ixl_enable_msix) || (que == NULL))
2412 		goto early;
2413 
2414 	/*
2415 	**  Release all msix VSI resources:
2416 	*/
2417 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2418 		rid = que->msix + 1;
2419 		if (que->tag != NULL) {
2420 			bus_teardown_intr(dev, que->res, que->tag);
2421 			que->tag = NULL;
2422 		}
2423 		if (que->res != NULL)
2424 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2425 	}
2426 
2427 early:
2428 	/* Clean the AdminQ interrupt last */
2429 	if (pf->admvec) /* we are doing MSIX */
2430 		rid = pf->admvec + 1;
2431 	else
2432 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2433 
2434 	if (pf->tag != NULL) {
2435 		bus_teardown_intr(dev, pf->res, pf->tag);
2436 		pf->tag = NULL;
2437 	}
2438 	if (pf->res != NULL)
2439 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2440 
2441 	if (pf->msix)
2442 		pci_release_msi(dev);
2443 
2444 	if (pf->msix_mem != NULL)
2445 		bus_release_resource(dev, SYS_RES_MEMORY,
2446 		    memrid, pf->msix_mem);
2447 
2448 	if (pf->pci_mem != NULL)
2449 		bus_release_resource(dev, SYS_RES_MEMORY,
2450 		    PCIR_BAR(0), pf->pci_mem);
2451 
2452 	return;
2453 }
2454 
2455 static void
2456 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2457 {
2458 	/* Display supported media types */
2459 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2460 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2461 
2462 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2463 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2464 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2465 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2466 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2467 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2468 
2469 	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2470 	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2471 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2472 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2473 
2474 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2475 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2476 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2477 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2478 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2479 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2480 
2481 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2482 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2483 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2484 	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2485 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2486 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2487 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2488 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2489 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2490 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2491 
2492 #ifndef IFM_ETH_XTYPE
2493 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2494 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2495 
2496 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2497 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2498 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2499 	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2500 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2501 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2502 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2503 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2504 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2505 
2506 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2507 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2508 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2509 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2510 #else
2511 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2512 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2513 
2514 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2515 	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2516 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2517 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2518 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2519 	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2520 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2521 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2522 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2523 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2524 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2525 
2526 	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2527 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2528 
2529 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2530 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2531 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2532 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2533 #endif
2534 }
2535 
2536 /*********************************************************************
2537  *
2538  *  Setup networking device structure and register an interface.
2539  *
2540  **********************************************************************/
2541 static int
2542 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2543 {
2544 	struct ifnet		*ifp;
2545 	struct i40e_hw		*hw = vsi->hw;
2546 	struct ixl_queue	*que = vsi->queues;
2547 	struct i40e_aq_get_phy_abilities_resp abilities;
2548 	enum i40e_status_code aq_error = 0;
2549 
2550 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2551 
2552 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2553 	if (ifp == NULL) {
2554 		device_printf(dev, "can not allocate ifnet structure\n");
2555 		return (-1);
2556 	}
2557 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2558 	ifp->if_mtu = ETHERMTU;
2559 	ifp->if_baudrate = IF_Gbps(40);
2560 	ifp->if_init = ixl_init;
2561 	ifp->if_softc = vsi;
2562 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2563 	ifp->if_ioctl = ixl_ioctl;
2564 
2565 #if __FreeBSD_version >= 1100036
2566 	if_setgetcounterfn(ifp, ixl_get_counter);
2567 #endif
2568 
2569 	ifp->if_transmit = ixl_mq_start;
2570 
2571 	ifp->if_qflush = ixl_qflush;
2572 
2573 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2574 
2575 	vsi->max_frame_size =
2576 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2577 	    + ETHER_VLAN_ENCAP_LEN;
2578 
2579 	/*
2580 	 * Tell the upper layer(s) we support long frames.
2581 	 */
2582 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2583 
2584 	ifp->if_capabilities |= IFCAP_HWCSUM;
2585 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2586 	ifp->if_capabilities |= IFCAP_TSO;
2587 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2588 	ifp->if_capabilities |= IFCAP_LRO;
2589 
2590 	/* VLAN capabilties */
2591 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2592 			     |  IFCAP_VLAN_HWTSO
2593 			     |  IFCAP_VLAN_MTU
2594 			     |  IFCAP_VLAN_HWCSUM;
2595 	ifp->if_capenable = ifp->if_capabilities;
2596 
2597 	/*
2598 	** Don't turn this on by default, if vlans are
2599 	** created on another pseudo device (eg. lagg)
2600 	** then vlan events are not passed thru, breaking
2601 	** operation, but with HW FILTER off it works. If
2602 	** using vlans directly on the ixl driver you can
2603 	** enable this and get full hardware tag filtering.
2604 	*/
2605 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2606 
2607 	/*
2608 	 * Specify the media types supported by this adapter and register
2609 	 * callbacks to update media and link information
2610 	 */
2611 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2612 		     ixl_media_status);
2613 
2614 	aq_error = i40e_aq_get_phy_capabilities(hw,
2615 	    FALSE, TRUE, &abilities, NULL);
2616 	/* May need delay to detect fiber correctly */
2617 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2618 		i40e_msec_delay(200);
2619 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2620 		    TRUE, &abilities, NULL);
2621 	}
2622 	if (aq_error) {
2623 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2624 			device_printf(dev, "Unknown PHY type detected!\n");
2625 		else
2626 			device_printf(dev,
2627 			    "Error getting supported media types, err %d,"
2628 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2629 		return (0);
2630 	}
2631 
2632 	ixl_add_ifmedia(vsi, abilities.phy_type);
2633 
2634 	/* Use autoselect media by default */
2635 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2636 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2637 
2638 	ether_ifattach(ifp, hw->mac.addr);
2639 
2640 	return (0);
2641 }
2642 
2643 /*
2644 ** Run when the Admin Queue gets a
2645 ** link transition interrupt.
2646 */
2647 static void
2648 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2649 {
2650 	struct i40e_hw	*hw = &pf->hw;
2651 	struct i40e_aqc_get_link_status *status =
2652 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2653 	bool check;
2654 
2655 	hw->phy.get_link_info = TRUE;
2656 	i40e_get_link_status(hw, &check);
2657 	pf->link_up = check;
2658 #ifdef IXL_DEBUG
2659 	printf("Link is %s\n", check ? "up":"down");
2660 #endif
2661 	/* Report if Unqualified modules are found */
2662 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2663 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2664 	    (!(status->link_info & I40E_AQ_LINK_UP)))
2665 		device_printf(pf->dev, "Link failed because "
2666 		    "an unqualified module was detected\n");
2667 
2668 	return;
2669 }
2670 
2671 /*********************************************************************
2672  *
2673  *  Get Firmware Switch configuration
2674  *	- this will need to be more robust when more complex
2675  *	  switch configurations are enabled.
2676  *
2677  **********************************************************************/
2678 static int
2679 ixl_switch_config(struct ixl_pf *pf)
2680 {
2681 	struct i40e_hw	*hw = &pf->hw;
2682 	struct ixl_vsi	*vsi = &pf->vsi;
2683 	device_t 	dev = vsi->dev;
2684 	struct i40e_aqc_get_switch_config_resp *sw_config;
2685 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2686 	int	ret;
2687 	u16	next = 0;
2688 
2689 	memset(&aq_buf, 0, sizeof(aq_buf));
2690 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2691 	ret = i40e_aq_get_switch_config(hw, sw_config,
2692 	    sizeof(aq_buf), &next, NULL);
2693 	if (ret) {
2694 		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2695 		    ret);
2696 		return (ret);
2697 	}
2698 #ifdef IXL_DEBUG
2699 	device_printf(dev,
2700 	    "Switch config: header reported: %d in structure, %d total\n",
2701     	    sw_config->header.num_reported, sw_config->header.num_total);
2702 	for (int i = 0; i < sw_config->header.num_reported; i++) {
2703 		device_printf(dev,
2704 		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2705 		    sw_config->element[i].element_type,
2706 		    sw_config->element[i].seid,
2707 		    sw_config->element[i].uplink_seid,
2708 		    sw_config->element[i].downlink_seid);
2709 	}
2710 #endif
2711 	/* Simplified due to a single VSI at the moment */
2712 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2713 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2714 	vsi->seid = sw_config->element[0].seid;
2715 	return (ret);
2716 }
2717 
2718 /*********************************************************************
2719  *
2720  *  Initialize the VSI:  this handles contexts, which means things
2721  *  			 like the number of descriptors, buffer size,
2722  *			 plus we init the rings thru this function.
2723  *
2724  **********************************************************************/
2725 static int
2726 ixl_initialize_vsi(struct ixl_vsi *vsi)
2727 {
2728 	struct ixl_pf		*pf = vsi->back;
2729 	struct ixl_queue	*que = vsi->queues;
2730 	device_t		dev = vsi->dev;
2731 	struct i40e_hw		*hw = vsi->hw;
2732 	struct i40e_vsi_context	ctxt;
2733 	int			err = 0;
2734 
2735 	memset(&ctxt, 0, sizeof(ctxt));
2736 	ctxt.seid = vsi->seid;
2737 	if (pf->veb_seid != 0)
2738 		ctxt.uplink_seid = pf->veb_seid;
2739 	ctxt.pf_num = hw->pf_id;
2740 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2741 	if (err) {
2742 		device_printf(dev,"get vsi params failed %x!!\n", err);
2743 		return (err);
2744 	}
2745 #ifdef IXL_DEBUG
2746 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2747 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2748 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2749 	    ctxt.uplink_seid, ctxt.vsi_number,
2750 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2751 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2752 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2753 #endif
2754 	/*
2755 	** Set the queue and traffic class bits
2756 	**  - when multiple traffic classes are supported
2757 	**    this will need to be more robust.
2758 	*/
2759 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2760 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2761 	ctxt.info.queue_mapping[0] = 0;
2762 	ctxt.info.tc_mapping[0] = 0x0800;
2763 
2764 	/* Set VLAN receive stripping mode */
2765 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2766 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2767 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2768 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2769 	else
2770 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2771 
2772 	/* Keep copy of VSI info in VSI for statistic counters */
2773 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2774 
2775 	/* Reset VSI statistics */
2776 	ixl_vsi_reset_stats(vsi);
2777 	vsi->hw_filters_add = 0;
2778 	vsi->hw_filters_del = 0;
2779 
2780 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2781 
2782 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2783 	if (err) {
2784 		device_printf(dev,"update vsi params failed %x!!\n",
2785 		   hw->aq.asq_last_status);
2786 		return (err);
2787 	}
2788 
2789 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2790 		struct tx_ring		*txr = &que->txr;
2791 		struct rx_ring 		*rxr = &que->rxr;
2792 		struct i40e_hmc_obj_txq tctx;
2793 		struct i40e_hmc_obj_rxq rctx;
2794 		u32			txctl;
2795 		u16			size;
2796 
2797 
2798 		/* Setup the HMC TX Context  */
2799 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2800 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2801 		tctx.new_context = 1;
2802 		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2803 		tctx.qlen = que->num_desc;
2804 		tctx.fc_ena = 0;
2805 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2806 		/* Enable HEAD writeback */
2807 		tctx.head_wb_ena = 1;
2808 		tctx.head_wb_addr = txr->dma.pa +
2809 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2810 		tctx.rdylist_act = 0;
2811 		err = i40e_clear_lan_tx_queue_context(hw, i);
2812 		if (err) {
2813 			device_printf(dev, "Unable to clear TX context\n");
2814 			break;
2815 		}
2816 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2817 		if (err) {
2818 			device_printf(dev, "Unable to set TX context\n");
2819 			break;
2820 		}
2821 		/* Associate the ring with this PF */
2822 		txctl = I40E_QTX_CTL_PF_QUEUE;
2823 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2824 		    I40E_QTX_CTL_PF_INDX_MASK);
2825 		wr32(hw, I40E_QTX_CTL(i), txctl);
2826 		ixl_flush(hw);
2827 
2828 		/* Do ring (re)init */
2829 		ixl_init_tx_ring(que);
2830 
2831 		/* Next setup the HMC RX Context  */
2832 		if (vsi->max_frame_size <= MCLBYTES)
2833 			rxr->mbuf_sz = MCLBYTES;
2834 		else
2835 			rxr->mbuf_sz = MJUMPAGESIZE;
2836 
2837 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2838 
2839 		/* Set up an RX context for the HMC */
2840 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2841 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2842 		/* ignore header split for now */
2843 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2844 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2845 		    vsi->max_frame_size : max_rxmax;
2846 		rctx.dtype = 0;
2847 		rctx.dsize = 1;	/* do 32byte descriptors */
2848 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2849 		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2850 		rctx.qlen = que->num_desc;
2851 		rctx.tphrdesc_ena = 1;
2852 		rctx.tphwdesc_ena = 1;
2853 		rctx.tphdata_ena = 0;
2854 		rctx.tphhead_ena = 0;
2855 		rctx.lrxqthresh = 2;
2856 		rctx.crcstrip = 1;
2857 		rctx.l2tsel = 1;
2858 		rctx.showiv = 1;
2859 		rctx.fc_ena = 0;
2860 		rctx.prefena = 1;
2861 
2862 		err = i40e_clear_lan_rx_queue_context(hw, i);
2863 		if (err) {
2864 			device_printf(dev,
2865 			    "Unable to clear RX context %d\n", i);
2866 			break;
2867 		}
2868 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2869 		if (err) {
2870 			device_printf(dev, "Unable to set RX context %d\n", i);
2871 			break;
2872 		}
2873 		err = ixl_init_rx_ring(que);
2874 		if (err) {
2875 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2876 			break;
2877 		}
2878 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2879 #ifdef DEV_NETMAP
2880 		/* preserve queue */
2881 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2882 			struct netmap_adapter *na = NA(vsi->ifp);
2883 			struct netmap_kring *kring = &na->rx_rings[i];
2884 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2885 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2886 		} else
2887 #endif /* DEV_NETMAP */
2888 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2889 	}
2890 	return (err);
2891 }
2892 
2893 
2894 /*********************************************************************
2895  *
2896  *  Free all VSI structs.
2897  *
2898  **********************************************************************/
2899 void
2900 ixl_free_vsi(struct ixl_vsi *vsi)
2901 {
2902 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2903 	struct ixl_queue	*que = vsi->queues;
2904 
2905 	/* Free station queues */
2906 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2907 		struct tx_ring *txr = &que->txr;
2908 		struct rx_ring *rxr = &que->rxr;
2909 
2910 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2911 			continue;
2912 		IXL_TX_LOCK(txr);
2913 		ixl_free_que_tx(que);
2914 		if (txr->base)
2915 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2916 		IXL_TX_UNLOCK(txr);
2917 		IXL_TX_LOCK_DESTROY(txr);
2918 
2919 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2920 			continue;
2921 		IXL_RX_LOCK(rxr);
2922 		ixl_free_que_rx(que);
2923 		if (rxr->base)
2924 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2925 		IXL_RX_UNLOCK(rxr);
2926 		IXL_RX_LOCK_DESTROY(rxr);
2927 
2928 	}
2929 	free(vsi->queues, M_DEVBUF);
2930 
2931 	/* Free VSI filter list */
2932 	ixl_free_mac_filters(vsi);
2933 }
2934 
2935 static void
2936 ixl_free_mac_filters(struct ixl_vsi *vsi)
2937 {
2938 	struct ixl_mac_filter *f;
2939 
2940 	while (!SLIST_EMPTY(&vsi->ftl)) {
2941 		f = SLIST_FIRST(&vsi->ftl);
2942 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2943 		free(f, M_DEVBUF);
2944 	}
2945 }
2946 
2947 
2948 /*********************************************************************
2949  *
2950  *  Allocate memory for the VSI (virtual station interface) and their
2951  *  associated queues, rings and the descriptors associated with each,
2952  *  called only once at attach.
2953  *
2954  **********************************************************************/
2955 static int
2956 ixl_setup_stations(struct ixl_pf *pf)
2957 {
2958 	device_t		dev = pf->dev;
2959 	struct ixl_vsi		*vsi;
2960 	struct ixl_queue	*que;
2961 	struct tx_ring		*txr;
2962 	struct rx_ring		*rxr;
2963 	int 			rsize, tsize;
2964 	int			error = I40E_SUCCESS;
2965 
2966 	vsi = &pf->vsi;
2967 	vsi->back = (void *)pf;
2968 	vsi->hw = &pf->hw;
2969 	vsi->id = 0;
2970 	vsi->num_vlans = 0;
2971 	vsi->back = pf;
2972 
2973 	/* Get memory for the station queues */
2974         if (!(vsi->queues =
2975             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2976             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2977                 device_printf(dev, "Unable to allocate queue memory\n");
2978                 error = ENOMEM;
2979                 goto early;
2980         }
2981 
2982 	for (int i = 0; i < vsi->num_queues; i++) {
2983 		que = &vsi->queues[i];
2984 		que->num_desc = ixl_ringsz;
2985 		que->me = i;
2986 		que->vsi = vsi;
2987 		/* mark the queue as active */
2988 		vsi->active_queues |= (u64)1 << que->me;
2989 		txr = &que->txr;
2990 		txr->que = que;
2991 		txr->tail = I40E_QTX_TAIL(que->me);
2992 
2993 		/* Initialize the TX lock */
2994 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2995 		    device_get_nameunit(dev), que->me);
2996 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2997 		/* Create the TX descriptor ring */
2998 		tsize = roundup2((que->num_desc *
2999 		    sizeof(struct i40e_tx_desc)) +
3000 		    sizeof(u32), DBA_ALIGN);
3001 		if (i40e_allocate_dma_mem(&pf->hw,
3002 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3003 			device_printf(dev,
3004 			    "Unable to allocate TX Descriptor memory\n");
3005 			error = ENOMEM;
3006 			goto fail;
3007 		}
3008 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3009 		bzero((void *)txr->base, tsize);
3010        		/* Now allocate transmit soft structs for the ring */
3011        		if (ixl_allocate_tx_data(que)) {
3012 			device_printf(dev,
3013 			    "Critical Failure setting up TX structures\n");
3014 			error = ENOMEM;
3015 			goto fail;
3016        		}
3017 		/* Allocate a buf ring */
3018 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3019 		    M_WAITOK, &txr->mtx);
3020 		if (txr->br == NULL) {
3021 			device_printf(dev,
3022 			    "Critical Failure setting up TX buf ring\n");
3023 			error = ENOMEM;
3024 			goto fail;
3025        		}
3026 
3027 		/*
3028 		 * Next the RX queues...
3029 		 */
3030 		rsize = roundup2(que->num_desc *
3031 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3032 		rxr = &que->rxr;
3033 		rxr->que = que;
3034 		rxr->tail = I40E_QRX_TAIL(que->me);
3035 
3036 		/* Initialize the RX side lock */
3037 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3038 		    device_get_nameunit(dev), que->me);
3039 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3040 
3041 		if (i40e_allocate_dma_mem(&pf->hw,
3042 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3043 			device_printf(dev,
3044 			    "Unable to allocate RX Descriptor memory\n");
3045 			error = ENOMEM;
3046 			goto fail;
3047 		}
3048 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3049 		bzero((void *)rxr->base, rsize);
3050 
3051         	/* Allocate receive soft structs for the ring*/
3052 		if (ixl_allocate_rx_data(que)) {
3053 			device_printf(dev,
3054 			    "Critical Failure setting up receive structs\n");
3055 			error = ENOMEM;
3056 			goto fail;
3057 		}
3058 	}
3059 
3060 	return (0);
3061 
3062 fail:
3063 	for (int i = 0; i < vsi->num_queues; i++) {
3064 		que = &vsi->queues[i];
3065 		rxr = &que->rxr;
3066 		txr = &que->txr;
3067 		if (rxr->base)
3068 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3069 		if (txr->base)
3070 			i40e_free_dma_mem(&pf->hw, &txr->dma);
3071 	}
3072 
3073 early:
3074 	return (error);
3075 }
3076 
3077 /*
3078 ** Provide a update to the queue RX
3079 ** interrupt moderation value.
3080 */
3081 static void
3082 ixl_set_queue_rx_itr(struct ixl_queue *que)
3083 {
3084 	struct ixl_vsi	*vsi = que->vsi;
3085 	struct i40e_hw	*hw = vsi->hw;
3086 	struct rx_ring	*rxr = &que->rxr;
3087 	u16		rx_itr;
3088 	u16		rx_latency = 0;
3089 	int		rx_bytes;
3090 
3091 
3092 	/* Idle, do nothing */
3093 	if (rxr->bytes == 0)
3094 		return;
3095 
3096 	if (ixl_dynamic_rx_itr) {
3097 		rx_bytes = rxr->bytes/rxr->itr;
3098 		rx_itr = rxr->itr;
3099 
3100 		/* Adjust latency range */
3101 		switch (rxr->latency) {
3102 		case IXL_LOW_LATENCY:
3103 			if (rx_bytes > 10) {
3104 				rx_latency = IXL_AVE_LATENCY;
3105 				rx_itr = IXL_ITR_20K;
3106 			}
3107 			break;
3108 		case IXL_AVE_LATENCY:
3109 			if (rx_bytes > 20) {
3110 				rx_latency = IXL_BULK_LATENCY;
3111 				rx_itr = IXL_ITR_8K;
3112 			} else if (rx_bytes <= 10) {
3113 				rx_latency = IXL_LOW_LATENCY;
3114 				rx_itr = IXL_ITR_100K;
3115 			}
3116 			break;
3117 		case IXL_BULK_LATENCY:
3118 			if (rx_bytes <= 20) {
3119 				rx_latency = IXL_AVE_LATENCY;
3120 				rx_itr = IXL_ITR_20K;
3121 			}
3122 			break;
3123        		 }
3124 
3125 		rxr->latency = rx_latency;
3126 
3127 		if (rx_itr != rxr->itr) {
3128 			/* do an exponential smoothing */
3129 			rx_itr = (10 * rx_itr * rxr->itr) /
3130 			    ((9 * rx_itr) + rxr->itr);
3131 			rxr->itr = rx_itr & IXL_MAX_ITR;
3132 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3133 			    que->me), rxr->itr);
3134 		}
3135 	} else { /* We may have have toggled to non-dynamic */
3136 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3137 			vsi->rx_itr_setting = ixl_rx_itr;
3138 		/* Update the hardware if needed */
3139 		if (rxr->itr != vsi->rx_itr_setting) {
3140 			rxr->itr = vsi->rx_itr_setting;
3141 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3142 			    que->me), rxr->itr);
3143 		}
3144 	}
3145 	rxr->bytes = 0;
3146 	rxr->packets = 0;
3147 	return;
3148 }
3149 
3150 
3151 /*
3152 ** Provide a update to the queue TX
3153 ** interrupt moderation value.
3154 */
3155 static void
3156 ixl_set_queue_tx_itr(struct ixl_queue *que)
3157 {
3158 	struct ixl_vsi	*vsi = que->vsi;
3159 	struct i40e_hw	*hw = vsi->hw;
3160 	struct tx_ring	*txr = &que->txr;
3161 	u16		tx_itr;
3162 	u16		tx_latency = 0;
3163 	int		tx_bytes;
3164 
3165 
3166 	/* Idle, do nothing */
3167 	if (txr->bytes == 0)
3168 		return;
3169 
3170 	if (ixl_dynamic_tx_itr) {
3171 		tx_bytes = txr->bytes/txr->itr;
3172 		tx_itr = txr->itr;
3173 
3174 		switch (txr->latency) {
3175 		case IXL_LOW_LATENCY:
3176 			if (tx_bytes > 10) {
3177 				tx_latency = IXL_AVE_LATENCY;
3178 				tx_itr = IXL_ITR_20K;
3179 			}
3180 			break;
3181 		case IXL_AVE_LATENCY:
3182 			if (tx_bytes > 20) {
3183 				tx_latency = IXL_BULK_LATENCY;
3184 				tx_itr = IXL_ITR_8K;
3185 			} else if (tx_bytes <= 10) {
3186 				tx_latency = IXL_LOW_LATENCY;
3187 				tx_itr = IXL_ITR_100K;
3188 			}
3189 			break;
3190 		case IXL_BULK_LATENCY:
3191 			if (tx_bytes <= 20) {
3192 				tx_latency = IXL_AVE_LATENCY;
3193 				tx_itr = IXL_ITR_20K;
3194 			}
3195 			break;
3196 		}
3197 
3198 		txr->latency = tx_latency;
3199 
3200 		if (tx_itr != txr->itr) {
3201        	         /* do an exponential smoothing */
3202 			tx_itr = (10 * tx_itr * txr->itr) /
3203 			    ((9 * tx_itr) + txr->itr);
3204 			txr->itr = tx_itr & IXL_MAX_ITR;
3205 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3206 			    que->me), txr->itr);
3207 		}
3208 
3209 	} else { /* We may have have toggled to non-dynamic */
3210 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3211 			vsi->tx_itr_setting = ixl_tx_itr;
3212 		/* Update the hardware if needed */
3213 		if (txr->itr != vsi->tx_itr_setting) {
3214 			txr->itr = vsi->tx_itr_setting;
3215 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3216 			    que->me), txr->itr);
3217 		}
3218 	}
3219 	txr->bytes = 0;
3220 	txr->packets = 0;
3221 	return;
3222 }
3223 
3224 #define QUEUE_NAME_LEN 32
3225 
3226 static void
3227 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3228     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3229 {
3230 	struct sysctl_oid *tree;
3231 	struct sysctl_oid_list *child;
3232 	struct sysctl_oid_list *vsi_list;
3233 
3234 	tree = device_get_sysctl_tree(pf->dev);
3235 	child = SYSCTL_CHILDREN(tree);
3236 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3237 				   CTLFLAG_RD, NULL, "VSI Number");
3238 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3239 
3240 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3241 }
3242 
3243 static void
3244 ixl_add_hw_stats(struct ixl_pf *pf)
3245 {
3246 	device_t dev = pf->dev;
3247 	struct ixl_vsi *vsi = &pf->vsi;
3248 	struct ixl_queue *queues = vsi->queues;
3249 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3250 
3251 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3252 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3253 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3254 	struct sysctl_oid_list *vsi_list;
3255 
3256 	struct sysctl_oid *queue_node;
3257 	struct sysctl_oid_list *queue_list;
3258 
3259 	struct tx_ring *txr;
3260 	struct rx_ring *rxr;
3261 	char queue_namebuf[QUEUE_NAME_LEN];
3262 
3263 	/* Driver statistics */
3264 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3265 			CTLFLAG_RD, &pf->watchdog_events,
3266 			"Watchdog timeouts");
3267 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3268 			CTLFLAG_RD, &pf->admin_irq,
3269 			"Admin Queue IRQ Handled");
3270 
3271 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3272 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3273 
3274 	/* Queue statistics */
3275 	for (int q = 0; q < vsi->num_queues; q++) {
3276 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3277 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3278 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3279 		queue_list = SYSCTL_CHILDREN(queue_node);
3280 
3281 		txr = &(queues[q].txr);
3282 		rxr = &(queues[q].rxr);
3283 
3284 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3285 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3286 				"m_defrag() failed");
3287 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3288 				CTLFLAG_RD, &(queues[q].dropped_pkts),
3289 				"Driver dropped packets");
3290 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3291 				CTLFLAG_RD, &(queues[q].irqs),
3292 				"irqs on this queue");
3293 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3294 				CTLFLAG_RD, &(queues[q].tso),
3295 				"TSO");
3296 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3297 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3298 				"Driver tx dma failure in xmit");
3299 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3300 				CTLFLAG_RD, &(txr->no_desc),
3301 				"Queue No Descriptor Available");
3302 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3303 				CTLFLAG_RD, &(txr->total_packets),
3304 				"Queue Packets Transmitted");
3305 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3306 				CTLFLAG_RD, &(txr->tx_bytes),
3307 				"Queue Bytes Transmitted");
3308 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3309 				CTLFLAG_RD, &(rxr->rx_packets),
3310 				"Queue Packets Received");
3311 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3312 				CTLFLAG_RD, &(rxr->rx_bytes),
3313 				"Queue Bytes Received");
3314 	}
3315 
3316 	/* MAC stats */
3317 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3318 }
3319 
3320 static void
3321 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3322 	struct sysctl_oid_list *child,
3323 	struct i40e_eth_stats *eth_stats)
3324 {
3325 	struct ixl_sysctl_info ctls[] =
3326 	{
3327 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3328 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3329 			"Unicast Packets Received"},
3330 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3331 			"Multicast Packets Received"},
3332 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3333 			"Broadcast Packets Received"},
3334 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3335 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3336 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3337 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3338 			"Multicast Packets Transmitted"},
3339 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3340 			"Broadcast Packets Transmitted"},
3341 		// end
3342 		{0,0,0}
3343 	};
3344 
3345 	struct ixl_sysctl_info *entry = ctls;
3346 	while (entry->stat != 0)
3347 	{
3348 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3349 				CTLFLAG_RD, entry->stat,
3350 				entry->description);
3351 		entry++;
3352 	}
3353 }
3354 
3355 static void
3356 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3357 	struct sysctl_oid_list *child,
3358 	struct i40e_hw_port_stats *stats)
3359 {
3360 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3361 				    CTLFLAG_RD, NULL, "Mac Statistics");
3362 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3363 
3364 	struct i40e_eth_stats *eth_stats = &stats->eth;
3365 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3366 
3367 	struct ixl_sysctl_info ctls[] =
3368 	{
3369 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3370 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3371 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3372 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3373 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3374 		/* Packet Reception Stats */
3375 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3376 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3377 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3378 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3379 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3380 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3381 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3382 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3383 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3384 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3385 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3386 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3387 		/* Packet Transmission Stats */
3388 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3389 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3390 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3391 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3392 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3393 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3394 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3395 		/* Flow control */
3396 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3397 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3398 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3399 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3400 		/* End */
3401 		{0,0,0}
3402 	};
3403 
3404 	struct ixl_sysctl_info *entry = ctls;
3405 	while (entry->stat != 0)
3406 	{
3407 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3408 				CTLFLAG_RD, entry->stat,
3409 				entry->description);
3410 		entry++;
3411 	}
3412 }
3413 
3414 
3415 /*
3416 ** ixl_config_rss - setup RSS
3417 **  - note this is done for the single vsi
3418 */
3419 static void ixl_config_rss(struct ixl_vsi *vsi)
3420 {
3421 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3422 	struct i40e_hw	*hw = vsi->hw;
3423 	u32		lut = 0;
3424 	u64		set_hena = 0, hena;
3425 	int		i, j, que_id;
3426 #ifdef RSS
3427 	u32		rss_hash_config;
3428 	u32		rss_seed[IXL_KEYSZ];
3429 #else
3430 	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3431 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3432 			    0x35897377, 0x328b25e1, 0x4fa98922,
3433 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3434 #endif
3435 
3436 #ifdef RSS
3437         /* Fetch the configured RSS key */
3438         rss_getkey((uint8_t *) &rss_seed);
3439 #endif
3440 
3441 	/* Fill out hash function seed */
3442 	for (i = 0; i < IXL_KEYSZ; i++)
3443                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3444 
3445 	/* Enable PCTYPES for RSS: */
3446 #ifdef RSS
3447 	rss_hash_config = rss_gethashconfig();
3448 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3449                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3450 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3451                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3452 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3453                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3454 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3455                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3456 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3457 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3458 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3459                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3460         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3461                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3462 #else
3463 	set_hena =
3464 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3465 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3466 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3467 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3468 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3469 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3470 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3471 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3472 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3473 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3474 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3475 #endif
3476 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3477 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3478 	hena |= set_hena;
3479 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3480 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3481 
3482 	/* Populate the LUT with max no. of queues in round robin fashion */
3483 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3484 		if (j == vsi->num_queues)
3485 			j = 0;
3486 #ifdef RSS
3487 		/*
3488 		 * Fetch the RSS bucket id for the given indirection entry.
3489 		 * Cap it at the number of configured buckets (which is
3490 		 * num_queues.)
3491 		 */
3492 		que_id = rss_get_indirection_to_bucket(i);
3493 		que_id = que_id % vsi->num_queues;
3494 #else
3495 		que_id = j;
3496 #endif
3497 		/* lut = 4-byte sliding window of 4 lut entries */
3498 		lut = (lut << 8) | (que_id &
3499 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3500 		/* On i = 3, we have 4 entries in lut; write to the register */
3501 		if ((i & 3) == 3)
3502 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3503 	}
3504 	ixl_flush(hw);
3505 }
3506 
3507 
3508 /*
3509 ** This routine is run via an vlan config EVENT,
3510 ** it enables us to use the HW Filter table since
3511 ** we can get the vlan id. This just creates the
3512 ** entry in the soft version of the VFTA, init will
3513 ** repopulate the real table.
3514 */
3515 static void
3516 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3517 {
3518 	struct ixl_vsi	*vsi = ifp->if_softc;
3519 	struct i40e_hw	*hw = vsi->hw;
3520 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3521 
3522 	if (ifp->if_softc !=  arg)   /* Not our event */
3523 		return;
3524 
3525 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3526 		return;
3527 
3528 	IXL_PF_LOCK(pf);
3529 	++vsi->num_vlans;
3530 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3531 	IXL_PF_UNLOCK(pf);
3532 }
3533 
3534 /*
3535 ** This routine is run via an vlan
3536 ** unconfig EVENT, remove our entry
3537 ** in the soft vfta.
3538 */
3539 static void
3540 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3541 {
3542 	struct ixl_vsi	*vsi = ifp->if_softc;
3543 	struct i40e_hw	*hw = vsi->hw;
3544 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3545 
3546 	if (ifp->if_softc !=  arg)
3547 		return;
3548 
3549 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3550 		return;
3551 
3552 	IXL_PF_LOCK(pf);
3553 	--vsi->num_vlans;
3554 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3555 	IXL_PF_UNLOCK(pf);
3556 }
3557 
3558 /*
3559 ** This routine updates vlan filters, called by init
3560 ** it scans the filter table and then updates the hw
3561 ** after a soft reset.
3562 */
3563 static void
3564 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3565 {
3566 	struct ixl_mac_filter	*f;
3567 	int			cnt = 0, flags;
3568 
3569 	if (vsi->num_vlans == 0)
3570 		return;
3571 	/*
3572 	** Scan the filter list for vlan entries,
3573 	** mark them for addition and then call
3574 	** for the AQ update.
3575 	*/
3576 	SLIST_FOREACH(f, &vsi->ftl, next) {
3577 		if (f->flags & IXL_FILTER_VLAN) {
3578 			f->flags |=
3579 			    (IXL_FILTER_ADD |
3580 			    IXL_FILTER_USED);
3581 			cnt++;
3582 		}
3583 	}
3584 	if (cnt == 0) {
3585 		printf("setup vlan: no filters found!\n");
3586 		return;
3587 	}
3588 	flags = IXL_FILTER_VLAN;
3589 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3590 	ixl_add_hw_filters(vsi, flags, cnt);
3591 	return;
3592 }
3593 
3594 /*
3595 ** Initialize filter list and add filters that the hardware
3596 ** needs to know about.
3597 */
3598 static void
3599 ixl_init_filters(struct ixl_vsi *vsi)
3600 {
3601 	/* Add broadcast address */
3602 	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3603 }
3604 
3605 /*
3606 ** This routine adds mulicast filters
3607 */
3608 static void
3609 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3610 {
3611 	struct ixl_mac_filter *f;
3612 
3613 	/* Does one already exist */
3614 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3615 	if (f != NULL)
3616 		return;
3617 
3618 	f = ixl_get_filter(vsi);
3619 	if (f == NULL) {
3620 		printf("WARNING: no filter available!!\n");
3621 		return;
3622 	}
3623 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3624 	f->vlan = IXL_VLAN_ANY;
3625 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3626 	    | IXL_FILTER_MC);
3627 
3628 	return;
3629 }
3630 
3631 static void
3632 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3633 {
3634 
3635 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3636 }
3637 
3638 /*
3639 ** This routine adds macvlan filters
3640 */
3641 static void
3642 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3643 {
3644 	struct ixl_mac_filter	*f, *tmp;
3645 	struct ixl_pf		*pf;
3646 	device_t		dev;
3647 
3648 	DEBUGOUT("ixl_add_filter: begin");
3649 
3650 	pf = vsi->back;
3651 	dev = pf->dev;
3652 
3653 	/* Does one already exist */
3654 	f = ixl_find_filter(vsi, macaddr, vlan);
3655 	if (f != NULL)
3656 		return;
3657 	/*
3658 	** Is this the first vlan being registered, if so we
3659 	** need to remove the ANY filter that indicates we are
3660 	** not in a vlan, and replace that with a 0 filter.
3661 	*/
3662 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3663 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3664 		if (tmp != NULL) {
3665 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3666 			ixl_add_filter(vsi, macaddr, 0);
3667 		}
3668 	}
3669 
3670 	f = ixl_get_filter(vsi);
3671 	if (f == NULL) {
3672 		device_printf(dev, "WARNING: no filter available!!\n");
3673 		return;
3674 	}
3675 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3676 	f->vlan = vlan;
3677 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3678 	if (f->vlan != IXL_VLAN_ANY)
3679 		f->flags |= IXL_FILTER_VLAN;
3680 	else
3681 		vsi->num_macs++;
3682 
3683 	ixl_add_hw_filters(vsi, f->flags, 1);
3684 	return;
3685 }
3686 
3687 static void
3688 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3689 {
3690 	struct ixl_mac_filter *f;
3691 
3692 	f = ixl_find_filter(vsi, macaddr, vlan);
3693 	if (f == NULL)
3694 		return;
3695 
3696 	f->flags |= IXL_FILTER_DEL;
3697 	ixl_del_hw_filters(vsi, 1);
3698 	vsi->num_macs--;
3699 
3700 	/* Check if this is the last vlan removal */
3701 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3702 		/* Switch back to a non-vlan filter */
3703 		ixl_del_filter(vsi, macaddr, 0);
3704 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3705 	}
3706 	return;
3707 }
3708 
3709 /*
3710 ** Find the filter with both matching mac addr and vlan id
3711 */
3712 static struct ixl_mac_filter *
3713 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3714 {
3715 	struct ixl_mac_filter	*f;
3716 	bool			match = FALSE;
3717 
3718 	SLIST_FOREACH(f, &vsi->ftl, next) {
3719 		if (!cmp_etheraddr(f->macaddr, macaddr))
3720 			continue;
3721 		if (f->vlan == vlan) {
3722 			match = TRUE;
3723 			break;
3724 		}
3725 	}
3726 
3727 	if (!match)
3728 		f = NULL;
3729 	return (f);
3730 }
3731 
3732 /*
3733 ** This routine takes additions to the vsi filter
3734 ** table and creates an Admin Queue call to create
3735 ** the filters in the hardware.
3736 */
3737 static void
3738 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3739 {
3740 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3741 	struct ixl_mac_filter	*f;
3742 	struct ixl_pf		*pf;
3743 	struct i40e_hw		*hw;
3744 	device_t		dev;
3745 	int			err, j = 0;
3746 
3747 	pf = vsi->back;
3748 	dev = pf->dev;
3749 	hw = &pf->hw;
3750 	IXL_PF_LOCK_ASSERT(pf);
3751 
3752 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3753 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3754 	if (a == NULL) {
3755 		device_printf(dev, "add_hw_filters failed to get memory\n");
3756 		return;
3757 	}
3758 
3759 	/*
3760 	** Scan the filter list, each time we find one
3761 	** we add it to the admin queue array and turn off
3762 	** the add bit.
3763 	*/
3764 	SLIST_FOREACH(f, &vsi->ftl, next) {
3765 		if (f->flags == flags) {
3766 			b = &a[j]; // a pox on fvl long names :)
3767 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3768 			if (f->vlan == IXL_VLAN_ANY) {
3769 				b->vlan_tag = 0;
3770 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3771 			} else {
3772 				b->vlan_tag = f->vlan;
3773 				b->flags = 0;
3774 			}
3775 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3776 			f->flags &= ~IXL_FILTER_ADD;
3777 			j++;
3778 		}
3779 		if (j == cnt)
3780 			break;
3781 	}
3782 	if (j > 0) {
3783 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3784 		if (err)
3785 			device_printf(dev, "aq_add_macvlan err %d, "
3786 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3787 		else
3788 			vsi->hw_filters_add += j;
3789 	}
3790 	free(a, M_DEVBUF);
3791 	return;
3792 }
3793 
3794 /*
3795 ** This routine takes removals in the vsi filter
3796 ** table and creates an Admin Queue call to delete
3797 ** the filters in the hardware.
3798 */
3799 static void
3800 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3801 {
3802 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3803 	struct ixl_pf		*pf;
3804 	struct i40e_hw		*hw;
3805 	device_t		dev;
3806 	struct ixl_mac_filter	*f, *f_temp;
3807 	int			err, j = 0;
3808 
3809 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3810 
3811 	pf = vsi->back;
3812 	hw = &pf->hw;
3813 	dev = pf->dev;
3814 
3815 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3816 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3817 	if (d == NULL) {
3818 		printf("del hw filter failed to get memory\n");
3819 		return;
3820 	}
3821 
3822 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3823 		if (f->flags & IXL_FILTER_DEL) {
3824 			e = &d[j]; // a pox on fvl long names :)
3825 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3826 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3827 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3828 			/* delete entry from vsi list */
3829 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3830 			free(f, M_DEVBUF);
3831 			j++;
3832 		}
3833 		if (j == cnt)
3834 			break;
3835 	}
3836 	if (j > 0) {
3837 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3838 		/* NOTE: returns ENOENT every time but seems to work fine,
3839 		   so we'll ignore that specific error. */
3840 		// TODO: Does this still occur on current firmwares?
3841 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3842 			int sc = 0;
3843 			for (int i = 0; i < j; i++)
3844 				sc += (!d[i].error_code);
3845 			vsi->hw_filters_del += sc;
3846 			device_printf(dev,
3847 			    "Failed to remove %d/%d filters, aq error %d\n",
3848 			    j - sc, j, hw->aq.asq_last_status);
3849 		} else
3850 			vsi->hw_filters_del += j;
3851 	}
3852 	free(d, M_DEVBUF);
3853 
3854 	DEBUGOUT("ixl_del_hw_filters: end\n");
3855 	return;
3856 }
3857 
3858 static int
3859 ixl_enable_rings(struct ixl_vsi *vsi)
3860 {
3861 	struct ixl_pf	*pf = vsi->back;
3862 	struct i40e_hw	*hw = &pf->hw;
3863 	int		index, error;
3864 	u32		reg;
3865 
3866 	error = 0;
3867 	for (int i = 0; i < vsi->num_queues; i++) {
3868 		index = vsi->first_queue + i;
3869 		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3870 
3871 		reg = rd32(hw, I40E_QTX_ENA(index));
3872 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3873 		    I40E_QTX_ENA_QENA_STAT_MASK;
3874 		wr32(hw, I40E_QTX_ENA(index), reg);
3875 		/* Verify the enable took */
3876 		for (int j = 0; j < 10; j++) {
3877 			reg = rd32(hw, I40E_QTX_ENA(index));
3878 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3879 				break;
3880 			i40e_msec_delay(10);
3881 		}
3882 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3883 			device_printf(pf->dev, "TX queue %d disabled!\n",
3884 			    index);
3885 			error = ETIMEDOUT;
3886 		}
3887 
3888 		reg = rd32(hw, I40E_QRX_ENA(index));
3889 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3890 		    I40E_QRX_ENA_QENA_STAT_MASK;
3891 		wr32(hw, I40E_QRX_ENA(index), reg);
3892 		/* Verify the enable took */
3893 		for (int j = 0; j < 10; j++) {
3894 			reg = rd32(hw, I40E_QRX_ENA(index));
3895 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3896 				break;
3897 			i40e_msec_delay(10);
3898 		}
3899 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3900 			device_printf(pf->dev, "RX queue %d disabled!\n",
3901 			    index);
3902 			error = ETIMEDOUT;
3903 		}
3904 	}
3905 
3906 	return (error);
3907 }
3908 
3909 static int
3910 ixl_disable_rings(struct ixl_vsi *vsi)
3911 {
3912 	struct ixl_pf	*pf = vsi->back;
3913 	struct i40e_hw	*hw = &pf->hw;
3914 	int		index, error;
3915 	u32		reg;
3916 
3917 	error = 0;
3918 	for (int i = 0; i < vsi->num_queues; i++) {
3919 		index = vsi->first_queue + i;
3920 
3921 		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3922 		i40e_usec_delay(500);
3923 
3924 		reg = rd32(hw, I40E_QTX_ENA(index));
3925 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3926 		wr32(hw, I40E_QTX_ENA(index), reg);
3927 		/* Verify the disable took */
3928 		for (int j = 0; j < 10; j++) {
3929 			reg = rd32(hw, I40E_QTX_ENA(index));
3930 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3931 				break;
3932 			i40e_msec_delay(10);
3933 		}
3934 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3935 			device_printf(pf->dev, "TX queue %d still enabled!\n",
3936 			    index);
3937 			error = ETIMEDOUT;
3938 		}
3939 
3940 		reg = rd32(hw, I40E_QRX_ENA(index));
3941 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3942 		wr32(hw, I40E_QRX_ENA(index), reg);
3943 		/* Verify the disable took */
3944 		for (int j = 0; j < 10; j++) {
3945 			reg = rd32(hw, I40E_QRX_ENA(index));
3946 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3947 				break;
3948 			i40e_msec_delay(10);
3949 		}
3950 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3951 			device_printf(pf->dev, "RX queue %d still enabled!\n",
3952 			    index);
3953 			error = ETIMEDOUT;
3954 		}
3955 	}
3956 
3957 	return (error);
3958 }
3959 
3960 /**
3961  * ixl_handle_mdd_event
3962  *
3963  * Called from interrupt handler to identify possibly malicious vfs
3964  * (But also detects events from the PF, as well)
3965  **/
3966 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3967 {
3968 	struct i40e_hw *hw = &pf->hw;
3969 	device_t dev = pf->dev;
3970 	bool mdd_detected = false;
3971 	bool pf_mdd_detected = false;
3972 	u32 reg;
3973 
3974 	/* find what triggered the MDD event */
3975 	reg = rd32(hw, I40E_GL_MDET_TX);
3976 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3977 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3978 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3979 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3980 				I40E_GL_MDET_TX_EVENT_SHIFT;
3981 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3982 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3983 		device_printf(dev,
3984 			 "Malicious Driver Detection event 0x%02x"
3985 			 " on TX queue %d pf number 0x%02x\n",
3986 			 event, queue, pf_num);
3987 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3988 		mdd_detected = true;
3989 	}
3990 	reg = rd32(hw, I40E_GL_MDET_RX);
3991 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3992 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3993 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3994 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3995 				I40E_GL_MDET_RX_EVENT_SHIFT;
3996 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3997 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3998 		device_printf(dev,
3999 			 "Malicious Driver Detection event 0x%02x"
4000 			 " on RX queue %d of function 0x%02x\n",
4001 			 event, queue, func);
4002 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4003 		mdd_detected = true;
4004 	}
4005 
4006 	if (mdd_detected) {
4007 		reg = rd32(hw, I40E_PF_MDET_TX);
4008 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4009 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4010 			device_printf(dev,
4011 				 "MDD TX event is for this function 0x%08x",
4012 				 reg);
4013 			pf_mdd_detected = true;
4014 		}
4015 		reg = rd32(hw, I40E_PF_MDET_RX);
4016 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4017 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4018 			device_printf(dev,
4019 				 "MDD RX event is for this function 0x%08x",
4020 				 reg);
4021 			pf_mdd_detected = true;
4022 		}
4023 	}
4024 
4025 	/* re-enable mdd interrupt cause */
4026 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4027 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4028 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4029 	ixl_flush(hw);
4030 }
4031 
4032 static void
4033 ixl_enable_intr(struct ixl_vsi *vsi)
4034 {
4035 	struct i40e_hw		*hw = vsi->hw;
4036 	struct ixl_queue	*que = vsi->queues;
4037 
4038 	if (ixl_enable_msix) {
4039 		ixl_enable_adminq(hw);
4040 		for (int i = 0; i < vsi->num_queues; i++, que++)
4041 			ixl_enable_queue(hw, que->me);
4042 	} else
4043 		ixl_enable_legacy(hw);
4044 }
4045 
4046 static void
4047 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4048 {
4049 	struct i40e_hw		*hw = vsi->hw;
4050 	struct ixl_queue	*que = vsi->queues;
4051 
4052 	for (int i = 0; i < vsi->num_queues; i++, que++)
4053 		ixl_disable_queue(hw, que->me);
4054 }
4055 
4056 static void
4057 ixl_disable_intr(struct ixl_vsi *vsi)
4058 {
4059 	struct i40e_hw		*hw = vsi->hw;
4060 
4061 	if (ixl_enable_msix)
4062 		ixl_disable_adminq(hw);
4063 	else
4064 		ixl_disable_legacy(hw);
4065 }
4066 
4067 static void
4068 ixl_enable_adminq(struct i40e_hw *hw)
4069 {
4070 	u32		reg;
4071 
4072 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4073 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4074 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4075 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4076 	ixl_flush(hw);
4077 	return;
4078 }
4079 
4080 static void
4081 ixl_disable_adminq(struct i40e_hw *hw)
4082 {
4083 	u32		reg;
4084 
4085 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4086 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4087 
4088 	return;
4089 }
4090 
4091 static void
4092 ixl_enable_queue(struct i40e_hw *hw, int id)
4093 {
4094 	u32		reg;
4095 
4096 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4097 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4098 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4099 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4100 }
4101 
4102 static void
4103 ixl_disable_queue(struct i40e_hw *hw, int id)
4104 {
4105 	u32		reg;
4106 
4107 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4108 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4109 
4110 	return;
4111 }
4112 
4113 static void
4114 ixl_enable_legacy(struct i40e_hw *hw)
4115 {
4116 	u32		reg;
4117 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4118 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4119 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4120 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4121 }
4122 
4123 static void
4124 ixl_disable_legacy(struct i40e_hw *hw)
4125 {
4126 	u32		reg;
4127 
4128 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4129 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4130 
4131 	return;
4132 }
4133 
4134 static void
4135 ixl_update_stats_counters(struct ixl_pf *pf)
4136 {
4137 	struct i40e_hw	*hw = &pf->hw;
4138 	struct ixl_vsi	*vsi = &pf->vsi;
4139 	struct ixl_vf	*vf;
4140 
4141 	struct i40e_hw_port_stats *nsd = &pf->stats;
4142 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4143 
4144 	/* Update hw stats */
4145 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4146 			   pf->stat_offsets_loaded,
4147 			   &osd->crc_errors, &nsd->crc_errors);
4148 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4149 			   pf->stat_offsets_loaded,
4150 			   &osd->illegal_bytes, &nsd->illegal_bytes);
4151 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4152 			   I40E_GLPRT_GORCL(hw->port),
4153 			   pf->stat_offsets_loaded,
4154 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4155 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4156 			   I40E_GLPRT_GOTCL(hw->port),
4157 			   pf->stat_offsets_loaded,
4158 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4159 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4160 			   pf->stat_offsets_loaded,
4161 			   &osd->eth.rx_discards,
4162 			   &nsd->eth.rx_discards);
4163 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4164 			   I40E_GLPRT_UPRCL(hw->port),
4165 			   pf->stat_offsets_loaded,
4166 			   &osd->eth.rx_unicast,
4167 			   &nsd->eth.rx_unicast);
4168 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4169 			   I40E_GLPRT_UPTCL(hw->port),
4170 			   pf->stat_offsets_loaded,
4171 			   &osd->eth.tx_unicast,
4172 			   &nsd->eth.tx_unicast);
4173 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4174 			   I40E_GLPRT_MPRCL(hw->port),
4175 			   pf->stat_offsets_loaded,
4176 			   &osd->eth.rx_multicast,
4177 			   &nsd->eth.rx_multicast);
4178 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4179 			   I40E_GLPRT_MPTCL(hw->port),
4180 			   pf->stat_offsets_loaded,
4181 			   &osd->eth.tx_multicast,
4182 			   &nsd->eth.tx_multicast);
4183 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4184 			   I40E_GLPRT_BPRCL(hw->port),
4185 			   pf->stat_offsets_loaded,
4186 			   &osd->eth.rx_broadcast,
4187 			   &nsd->eth.rx_broadcast);
4188 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4189 			   I40E_GLPRT_BPTCL(hw->port),
4190 			   pf->stat_offsets_loaded,
4191 			   &osd->eth.tx_broadcast,
4192 			   &nsd->eth.tx_broadcast);
4193 
4194 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4195 			   pf->stat_offsets_loaded,
4196 			   &osd->tx_dropped_link_down,
4197 			   &nsd->tx_dropped_link_down);
4198 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4199 			   pf->stat_offsets_loaded,
4200 			   &osd->mac_local_faults,
4201 			   &nsd->mac_local_faults);
4202 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4203 			   pf->stat_offsets_loaded,
4204 			   &osd->mac_remote_faults,
4205 			   &nsd->mac_remote_faults);
4206 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4207 			   pf->stat_offsets_loaded,
4208 			   &osd->rx_length_errors,
4209 			   &nsd->rx_length_errors);
4210 
4211 	/* Flow control (LFC) stats */
4212 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4213 			   pf->stat_offsets_loaded,
4214 			   &osd->link_xon_rx, &nsd->link_xon_rx);
4215 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4216 			   pf->stat_offsets_loaded,
4217 			   &osd->link_xon_tx, &nsd->link_xon_tx);
4218 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4219 			   pf->stat_offsets_loaded,
4220 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4221 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4222 			   pf->stat_offsets_loaded,
4223 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4224 
4225 	/* Packet size stats rx */
4226 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4227 			   I40E_GLPRT_PRC64L(hw->port),
4228 			   pf->stat_offsets_loaded,
4229 			   &osd->rx_size_64, &nsd->rx_size_64);
4230 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4231 			   I40E_GLPRT_PRC127L(hw->port),
4232 			   pf->stat_offsets_loaded,
4233 			   &osd->rx_size_127, &nsd->rx_size_127);
4234 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4235 			   I40E_GLPRT_PRC255L(hw->port),
4236 			   pf->stat_offsets_loaded,
4237 			   &osd->rx_size_255, &nsd->rx_size_255);
4238 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4239 			   I40E_GLPRT_PRC511L(hw->port),
4240 			   pf->stat_offsets_loaded,
4241 			   &osd->rx_size_511, &nsd->rx_size_511);
4242 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4243 			   I40E_GLPRT_PRC1023L(hw->port),
4244 			   pf->stat_offsets_loaded,
4245 			   &osd->rx_size_1023, &nsd->rx_size_1023);
4246 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4247 			   I40E_GLPRT_PRC1522L(hw->port),
4248 			   pf->stat_offsets_loaded,
4249 			   &osd->rx_size_1522, &nsd->rx_size_1522);
4250 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4251 			   I40E_GLPRT_PRC9522L(hw->port),
4252 			   pf->stat_offsets_loaded,
4253 			   &osd->rx_size_big, &nsd->rx_size_big);
4254 
4255 	/* Packet size stats tx */
4256 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4257 			   I40E_GLPRT_PTC64L(hw->port),
4258 			   pf->stat_offsets_loaded,
4259 			   &osd->tx_size_64, &nsd->tx_size_64);
4260 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4261 			   I40E_GLPRT_PTC127L(hw->port),
4262 			   pf->stat_offsets_loaded,
4263 			   &osd->tx_size_127, &nsd->tx_size_127);
4264 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4265 			   I40E_GLPRT_PTC255L(hw->port),
4266 			   pf->stat_offsets_loaded,
4267 			   &osd->tx_size_255, &nsd->tx_size_255);
4268 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4269 			   I40E_GLPRT_PTC511L(hw->port),
4270 			   pf->stat_offsets_loaded,
4271 			   &osd->tx_size_511, &nsd->tx_size_511);
4272 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4273 			   I40E_GLPRT_PTC1023L(hw->port),
4274 			   pf->stat_offsets_loaded,
4275 			   &osd->tx_size_1023, &nsd->tx_size_1023);
4276 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4277 			   I40E_GLPRT_PTC1522L(hw->port),
4278 			   pf->stat_offsets_loaded,
4279 			   &osd->tx_size_1522, &nsd->tx_size_1522);
4280 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4281 			   I40E_GLPRT_PTC9522L(hw->port),
4282 			   pf->stat_offsets_loaded,
4283 			   &osd->tx_size_big, &nsd->tx_size_big);
4284 
4285 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4286 			   pf->stat_offsets_loaded,
4287 			   &osd->rx_undersize, &nsd->rx_undersize);
4288 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4289 			   pf->stat_offsets_loaded,
4290 			   &osd->rx_fragments, &nsd->rx_fragments);
4291 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4292 			   pf->stat_offsets_loaded,
4293 			   &osd->rx_oversize, &nsd->rx_oversize);
4294 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4295 			   pf->stat_offsets_loaded,
4296 			   &osd->rx_jabber, &nsd->rx_jabber);
4297 	pf->stat_offsets_loaded = true;
4298 	/* End hw stats */
4299 
4300 	/* Update vsi stats */
4301 	ixl_update_vsi_stats(vsi);
4302 
4303 	for (int i = 0; i < pf->num_vfs; i++) {
4304 		vf = &pf->vfs[i];
4305 		if (vf->vf_flags & VF_FLAG_ENABLED)
4306 			ixl_update_eth_stats(&pf->vfs[i].vsi);
4307 	}
4308 }
4309 
4310 /*
4311 ** Tasklet handler for MSIX Adminq interrupts
4312 **  - do outside interrupt since it might sleep
4313 */
4314 static void
4315 ixl_do_adminq(void *context, int pending)
4316 {
4317 	struct ixl_pf			*pf = context;
4318 	struct i40e_hw			*hw = &pf->hw;
4319 	struct ixl_vsi			*vsi = &pf->vsi;
4320 	struct i40e_arq_event_info	event;
4321 	i40e_status			ret;
4322 	u32				reg, loop = 0;
4323 	u16				opcode, result;
4324 
4325 	event.buf_len = IXL_AQ_BUF_SZ;
4326 	event.msg_buf = malloc(event.buf_len,
4327 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4328 	if (!event.msg_buf) {
4329 		printf("Unable to allocate adminq memory\n");
4330 		return;
4331 	}
4332 
4333 	IXL_PF_LOCK(pf);
4334 	/* clean and process any events */
4335 	do {
4336 		ret = i40e_clean_arq_element(hw, &event, &result);
4337 		if (ret)
4338 			break;
4339 		opcode = LE16_TO_CPU(event.desc.opcode);
4340 		switch (opcode) {
4341 		case i40e_aqc_opc_get_link_status:
4342 			ixl_link_event(pf, &event);
4343 			ixl_update_link_status(pf);
4344 			break;
4345 		case i40e_aqc_opc_send_msg_to_pf:
4346 #ifdef PCI_IOV
4347 			ixl_handle_vf_msg(pf, &event);
4348 #endif
4349 			break;
4350 		case i40e_aqc_opc_event_lan_overflow:
4351 			break;
4352 		default:
4353 #ifdef IXL_DEBUG
4354 			printf("AdminQ unknown event %x\n", opcode);
4355 #endif
4356 			break;
4357 		}
4358 
4359 	} while (result && (loop++ < IXL_ADM_LIMIT));
4360 
4361 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4362 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4363 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4364 	free(event.msg_buf, M_DEVBUF);
4365 
4366 	/*
4367 	 * If there are still messages to process, reschedule ourselves.
4368 	 * Otherwise, re-enable our interrupt and go to sleep.
4369 	 */
4370 	if (result > 0)
4371 		taskqueue_enqueue(pf->tq, &pf->adminq);
4372 	else
4373 		ixl_enable_intr(vsi);
4374 
4375 	IXL_PF_UNLOCK(pf);
4376 }
4377 
4378 #ifdef IXL_DEBUG_SYSCTL
4379 static int
4380 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4381 {
4382 	struct ixl_pf	*pf;
4383 	int		error, input = 0;
4384 
4385 	error = sysctl_handle_int(oidp, &input, 0, req);
4386 
4387 	if (error || !req->newptr)
4388 		return (error);
4389 
4390 	if (input == 1) {
4391 		pf = (struct ixl_pf *)arg1;
4392 		ixl_print_debug_info(pf);
4393 	}
4394 
4395 	return (error);
4396 }
4397 
4398 static void
4399 ixl_print_debug_info(struct ixl_pf *pf)
4400 {
4401 	struct i40e_hw		*hw = &pf->hw;
4402 	struct ixl_vsi		*vsi = &pf->vsi;
4403 	struct ixl_queue	*que = vsi->queues;
4404 	struct rx_ring		*rxr = &que->rxr;
4405 	struct tx_ring		*txr = &que->txr;
4406 	u32			reg;
4407 
4408 
4409 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4410 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4411 	printf("RX next check = %x\n", rxr->next_check);
4412 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4413 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4414 	printf("TX desc avail = %x\n", txr->avail);
4415 
4416 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4417 	 printf("RX Bytes = %x\n", reg);
4418 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4419 	 printf("Port RX Bytes = %x\n", reg);
4420 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4421 	 printf("RX discard = %x\n", reg);
4422 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4423 	 printf("Port RX discard = %x\n", reg);
4424 
4425 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4426 	 printf("TX errors = %x\n", reg);
4427 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4428 	 printf("TX Bytes = %x\n", reg);
4429 
4430 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4431 	 printf("RX undersize = %x\n", reg);
4432 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4433 	 printf("RX fragments = %x\n", reg);
4434 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4435 	 printf("RX oversize = %x\n", reg);
4436 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4437 	 printf("RX length error = %x\n", reg);
4438 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4439 	 printf("mac remote fault = %x\n", reg);
4440 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4441 	 printf("mac local fault = %x\n", reg);
4442 }
4443 #endif
4444 
4445 /**
4446  * Update VSI-specific ethernet statistics counters.
4447  **/
4448 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4449 {
4450 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4451 	struct i40e_hw *hw = &pf->hw;
4452 	struct i40e_eth_stats *es;
4453 	struct i40e_eth_stats *oes;
4454 	struct i40e_hw_port_stats *nsd;
4455 	u16 stat_idx = vsi->info.stat_counter_idx;
4456 
4457 	es = &vsi->eth_stats;
4458 	oes = &vsi->eth_stats_offsets;
4459 	nsd = &pf->stats;
4460 
4461 	/* Gather up the stats that the hw collects */
4462 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4463 			   vsi->stat_offsets_loaded,
4464 			   &oes->tx_errors, &es->tx_errors);
4465 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4466 			   vsi->stat_offsets_loaded,
4467 			   &oes->rx_discards, &es->rx_discards);
4468 
4469 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4470 			   I40E_GLV_GORCL(stat_idx),
4471 			   vsi->stat_offsets_loaded,
4472 			   &oes->rx_bytes, &es->rx_bytes);
4473 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4474 			   I40E_GLV_UPRCL(stat_idx),
4475 			   vsi->stat_offsets_loaded,
4476 			   &oes->rx_unicast, &es->rx_unicast);
4477 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4478 			   I40E_GLV_MPRCL(stat_idx),
4479 			   vsi->stat_offsets_loaded,
4480 			   &oes->rx_multicast, &es->rx_multicast);
4481 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4482 			   I40E_GLV_BPRCL(stat_idx),
4483 			   vsi->stat_offsets_loaded,
4484 			   &oes->rx_broadcast, &es->rx_broadcast);
4485 
4486 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4487 			   I40E_GLV_GOTCL(stat_idx),
4488 			   vsi->stat_offsets_loaded,
4489 			   &oes->tx_bytes, &es->tx_bytes);
4490 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4491 			   I40E_GLV_UPTCL(stat_idx),
4492 			   vsi->stat_offsets_loaded,
4493 			   &oes->tx_unicast, &es->tx_unicast);
4494 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4495 			   I40E_GLV_MPTCL(stat_idx),
4496 			   vsi->stat_offsets_loaded,
4497 			   &oes->tx_multicast, &es->tx_multicast);
4498 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4499 			   I40E_GLV_BPTCL(stat_idx),
4500 			   vsi->stat_offsets_loaded,
4501 			   &oes->tx_broadcast, &es->tx_broadcast);
4502 	vsi->stat_offsets_loaded = true;
4503 }
4504 
4505 static void
4506 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4507 {
4508 	struct ixl_pf		*pf;
4509 	struct ifnet		*ifp;
4510 	struct i40e_eth_stats	*es;
4511 	u64			tx_discards;
4512 
4513 	struct i40e_hw_port_stats *nsd;
4514 
4515 	pf = vsi->back;
4516 	ifp = vsi->ifp;
4517 	es = &vsi->eth_stats;
4518 	nsd = &pf->stats;
4519 
4520 	ixl_update_eth_stats(vsi);
4521 
4522 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4523 	for (int i = 0; i < vsi->num_queues; i++)
4524 		tx_discards += vsi->queues[i].txr.br->br_drops;
4525 
4526 	/* Update ifnet stats */
4527 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4528 	                   es->rx_multicast +
4529 			   es->rx_broadcast);
4530 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4531 	                   es->tx_multicast +
4532 			   es->tx_broadcast);
4533 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4534 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4535 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4536 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4537 
4538 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4539 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4540 	    nsd->rx_jabber);
4541 	IXL_SET_OERRORS(vsi, es->tx_errors);
4542 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4543 	IXL_SET_OQDROPS(vsi, tx_discards);
4544 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4545 	IXL_SET_COLLISIONS(vsi, 0);
4546 }
4547 
4548 /**
4549  * Reset all of the stats for the given pf
4550  **/
4551 void ixl_pf_reset_stats(struct ixl_pf *pf)
4552 {
4553 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4554 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4555 	pf->stat_offsets_loaded = false;
4556 }
4557 
4558 /**
4559  * Resets all stats of the given vsi
4560  **/
4561 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4562 {
4563 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4564 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4565 	vsi->stat_offsets_loaded = false;
4566 }
4567 
4568 /**
4569  * Read and update a 48 bit stat from the hw
4570  *
4571  * Since the device stats are not reset at PFReset, they likely will not
4572  * be zeroed when the driver starts.  We'll save the first values read
4573  * and use them as offsets to be subtracted from the raw values in order
4574  * to report stats that count from zero.
4575  **/
4576 static void
4577 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4578 	bool offset_loaded, u64 *offset, u64 *stat)
4579 {
4580 	u64 new_data;
4581 
4582 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4583 	new_data = rd64(hw, loreg);
4584 #else
4585 	/*
4586 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4587 	 * 10 don't support 8 byte bus reads/writes.
4588 	 */
4589 	new_data = rd32(hw, loreg);
4590 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4591 #endif
4592 
4593 	if (!offset_loaded)
4594 		*offset = new_data;
4595 	if (new_data >= *offset)
4596 		*stat = new_data - *offset;
4597 	else
4598 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4599 	*stat &= 0xFFFFFFFFFFFFULL;
4600 }
4601 
4602 /**
4603  * Read and update a 32 bit stat from the hw
4604  **/
4605 static void
4606 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4607 	bool offset_loaded, u64 *offset, u64 *stat)
4608 {
4609 	u32 new_data;
4610 
4611 	new_data = rd32(hw, reg);
4612 	if (!offset_loaded)
4613 		*offset = new_data;
4614 	if (new_data >= *offset)
4615 		*stat = (u32)(new_data - *offset);
4616 	else
4617 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4618 }
4619 
4620 /*
4621 ** Set flow control using sysctl:
4622 ** 	0 - off
4623 **	1 - rx pause
4624 **	2 - tx pause
4625 **	3 - full
4626 */
4627 static int
4628 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4629 {
4630 	/*
4631 	 * TODO: ensure flow control is disabled if
4632 	 * priority flow control is enabled
4633 	 *
4634 	 * TODO: ensure tx CRC by hardware should be enabled
4635 	 * if tx flow control is enabled.
4636 	 */
4637 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4638 	struct i40e_hw *hw = &pf->hw;
4639 	device_t dev = pf->dev;
4640 	int error = 0;
4641 	enum i40e_status_code aq_error = 0;
4642 	u8 fc_aq_err = 0;
4643 
4644 	/* Get request */
4645 	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4646 	if ((error) || (req->newptr == NULL))
4647 		return (error);
4648 	if (pf->fc < 0 || pf->fc > 3) {
4649 		device_printf(dev,
4650 		    "Invalid fc mode; valid modes are 0 through 3\n");
4651 		return (EINVAL);
4652 	}
4653 
4654 	/*
4655 	** Changing flow control mode currently does not work on
4656 	** 40GBASE-CR4 PHYs
4657 	*/
4658 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4659 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4660 		device_printf(dev, "Changing flow control mode unsupported"
4661 		    " on 40GBase-CR4 media.\n");
4662 		return (ENODEV);
4663 	}
4664 
4665 	/* Set fc ability for port */
4666 	hw->fc.requested_mode = pf->fc;
4667 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4668 	if (aq_error) {
4669 		device_printf(dev,
4670 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4671 		    __func__, aq_error, fc_aq_err);
4672 		return (EAGAIN);
4673 	}
4674 
4675 	return (0);
4676 }
4677 
4678 static int
4679 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4680 {
4681 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4682 	struct i40e_hw *hw = &pf->hw;
4683 	int error = 0, index = 0;
4684 
4685 	char *speeds[] = {
4686 		"Unknown",
4687 		"100M",
4688 		"1G",
4689 		"10G",
4690 		"40G",
4691 		"20G"
4692 	};
4693 
4694 	ixl_update_link_status(pf);
4695 
4696 	switch (hw->phy.link_info.link_speed) {
4697 	case I40E_LINK_SPEED_100MB:
4698 		index = 1;
4699 		break;
4700 	case I40E_LINK_SPEED_1GB:
4701 		index = 2;
4702 		break;
4703 	case I40E_LINK_SPEED_10GB:
4704 		index = 3;
4705 		break;
4706 	case I40E_LINK_SPEED_40GB:
4707 		index = 4;
4708 		break;
4709 	case I40E_LINK_SPEED_20GB:
4710 		index = 5;
4711 		break;
4712 	case I40E_LINK_SPEED_UNKNOWN:
4713 	default:
4714 		index = 0;
4715 		break;
4716 	}
4717 
4718 	error = sysctl_handle_string(oidp, speeds[index],
4719 	    strlen(speeds[index]), req);
4720 	return (error);
4721 }
4722 
4723 static int
4724 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4725 {
4726 	struct i40e_hw *hw = &pf->hw;
4727 	device_t dev = pf->dev;
4728 	struct i40e_aq_get_phy_abilities_resp abilities;
4729 	struct i40e_aq_set_phy_config config;
4730 	enum i40e_status_code aq_error = 0;
4731 
4732 	/* Get current capability information */
4733 	aq_error = i40e_aq_get_phy_capabilities(hw,
4734 	    FALSE, FALSE, &abilities, NULL);
4735 	if (aq_error) {
4736 		device_printf(dev,
4737 		    "%s: Error getting phy capabilities %d,"
4738 		    " aq error: %d\n", __func__, aq_error,
4739 		    hw->aq.asq_last_status);
4740 		return (EAGAIN);
4741 	}
4742 
4743 	/* Prepare new config */
4744 	bzero(&config, sizeof(config));
4745 	config.phy_type = abilities.phy_type;
4746 	config.abilities = abilities.abilities
4747 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4748 	config.eee_capability = abilities.eee_capability;
4749 	config.eeer = abilities.eeer_val;
4750 	config.low_power_ctrl = abilities.d3_lpan;
4751 	/* Translate into aq cmd link_speed */
4752 	if (speeds & 0x8)
4753 		config.link_speed |= I40E_LINK_SPEED_20GB;
4754 	if (speeds & 0x4)
4755 		config.link_speed |= I40E_LINK_SPEED_10GB;
4756 	if (speeds & 0x2)
4757 		config.link_speed |= I40E_LINK_SPEED_1GB;
4758 	if (speeds & 0x1)
4759 		config.link_speed |= I40E_LINK_SPEED_100MB;
4760 
4761 	/* Do aq command & restart link */
4762 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4763 	if (aq_error) {
4764 		device_printf(dev,
4765 		    "%s: Error setting new phy config %d,"
4766 		    " aq error: %d\n", __func__, aq_error,
4767 		    hw->aq.asq_last_status);
4768 		return (EAGAIN);
4769 	}
4770 
4771 	/*
4772 	** This seems a bit heavy handed, but we
4773 	** need to get a reinit on some devices
4774 	*/
4775 	IXL_PF_LOCK(pf);
4776 	ixl_stop(pf);
4777 	ixl_init_locked(pf);
4778 	IXL_PF_UNLOCK(pf);
4779 
4780 	return (0);
4781 }
4782 
4783 /*
4784 ** Control link advertise speed:
4785 **	Flags:
4786 **	0x1 - advertise 100 Mb
4787 **	0x2 - advertise 1G
4788 **	0x4 - advertise 10G
4789 **	0x8 - advertise 20G
4790 **
4791 ** Does not work on 40G devices.
4792 */
4793 static int
4794 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4795 {
4796 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4797 	struct i40e_hw *hw = &pf->hw;
4798 	device_t dev = pf->dev;
4799 	int requested_ls = 0;
4800 	int error = 0;
4801 
4802 	/*
4803 	** FW doesn't support changing advertised speed
4804 	** for 40G devices; speed is always 40G.
4805 	*/
4806 	if (i40e_is_40G_device(hw->device_id))
4807 		return (ENODEV);
4808 
4809 	/* Read in new mode */
4810 	requested_ls = pf->advertised_speed;
4811 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4812 	if ((error) || (req->newptr == NULL))
4813 		return (error);
4814 	/* Check for sane value */
4815 	if (requested_ls < 0x1 || requested_ls > 0xE) {
4816 		device_printf(dev, "Invalid advertised speed; "
4817 		    "valid modes are 0x1 through 0xE\n");
4818 		return (EINVAL);
4819 	}
4820 	/* Then check for validity based on adapter type */
4821 	switch (hw->device_id) {
4822 	case I40E_DEV_ID_10G_BASE_T:
4823 		if (requested_ls & 0x8) {
4824 			device_printf(dev,
4825 			    "20Gbs speed not supported on this device.\n");
4826 			return (EINVAL);
4827 		}
4828 		break;
4829 	case I40E_DEV_ID_20G_KR2:
4830 		if (requested_ls & 0x1) {
4831 			device_printf(dev,
4832 			    "100Mbs speed not supported on this device.\n");
4833 			return (EINVAL);
4834 		}
4835 		break;
4836 	default:
4837 		if (requested_ls & ~0x6) {
4838 			device_printf(dev,
4839 			    "Only 1/10Gbs speeds are supported on this device.\n");
4840 			return (EINVAL);
4841 		}
4842 		break;
4843 	}
4844 
4845 	/* Exit if no change */
4846 	if (pf->advertised_speed == requested_ls)
4847 		return (0);
4848 
4849 	error = ixl_set_advertised_speeds(pf, requested_ls);
4850 	if (error)
4851 		return (error);
4852 
4853 	pf->advertised_speed = requested_ls;
4854 	ixl_update_link_status(pf);
4855 	return (0);
4856 }
4857 
4858 /*
4859 ** Get the width and transaction speed of
4860 ** the bus this adapter is plugged into.
4861 */
4862 static u16
4863 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4864 {
4865         u16                     link;
4866         u32                     offset;
4867 
4868 
4869         /* Get the PCI Express Capabilities offset */
4870         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4871 
4872         /* ...and read the Link Status Register */
4873         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4874 
4875         switch (link & I40E_PCI_LINK_WIDTH) {
4876         case I40E_PCI_LINK_WIDTH_1:
4877                 hw->bus.width = i40e_bus_width_pcie_x1;
4878                 break;
4879         case I40E_PCI_LINK_WIDTH_2:
4880                 hw->bus.width = i40e_bus_width_pcie_x2;
4881                 break;
4882         case I40E_PCI_LINK_WIDTH_4:
4883                 hw->bus.width = i40e_bus_width_pcie_x4;
4884                 break;
4885         case I40E_PCI_LINK_WIDTH_8:
4886                 hw->bus.width = i40e_bus_width_pcie_x8;
4887                 break;
4888         default:
4889                 hw->bus.width = i40e_bus_width_unknown;
4890                 break;
4891         }
4892 
4893         switch (link & I40E_PCI_LINK_SPEED) {
4894         case I40E_PCI_LINK_SPEED_2500:
4895                 hw->bus.speed = i40e_bus_speed_2500;
4896                 break;
4897         case I40E_PCI_LINK_SPEED_5000:
4898                 hw->bus.speed = i40e_bus_speed_5000;
4899                 break;
4900         case I40E_PCI_LINK_SPEED_8000:
4901                 hw->bus.speed = i40e_bus_speed_8000;
4902                 break;
4903         default:
4904                 hw->bus.speed = i40e_bus_speed_unknown;
4905                 break;
4906         }
4907 
4908 
4909         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4910             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4911             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4912             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4913             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4914             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4915             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4916             ("Unknown"));
4917 
4918         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4919             (hw->bus.speed < i40e_bus_speed_8000)) {
4920                 device_printf(dev, "PCI-Express bandwidth available"
4921                     " for this device\n     may be insufficient for"
4922                     " optimal performance.\n");
4923                 device_printf(dev, "For expected performance a x8 "
4924                     "PCIE Gen3 slot is required.\n");
4925         }
4926 
4927         return (link);
4928 }
4929 
4930 static int
4931 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4932 {
4933 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4934 	struct i40e_hw	*hw = &pf->hw;
4935 	char		buf[32];
4936 
4937 	snprintf(buf, sizeof(buf),
4938 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4939 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4940 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4941 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4942 	    IXL_NVM_VERSION_HI_SHIFT,
4943 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4944 	    IXL_NVM_VERSION_LO_SHIFT,
4945 	    hw->nvm.eetrack);
4946 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4947 }
4948 
4949 
4950 #ifdef IXL_DEBUG_SYSCTL
4951 static int
4952 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4953 {
4954 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4955 	struct i40e_hw *hw = &pf->hw;
4956 	struct i40e_link_status link_status;
4957 	char buf[512];
4958 
4959 	enum i40e_status_code aq_error = 0;
4960 
4961 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4962 	if (aq_error) {
4963 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4964 		return (EPERM);
4965 	}
4966 
4967 	sprintf(buf, "\n"
4968 	    "PHY Type : %#04x\n"
4969 	    "Speed    : %#04x\n"
4970 	    "Link info: %#04x\n"
4971 	    "AN info  : %#04x\n"
4972 	    "Ext info : %#04x",
4973 	    link_status.phy_type, link_status.link_speed,
4974 	    link_status.link_info, link_status.an_info,
4975 	    link_status.ext_info);
4976 
4977 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4978 }
4979 
4980 static int
4981 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4982 {
4983 	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
4984 	struct i40e_hw		*hw = &pf->hw;
4985 	char			buf[512];
4986 	enum i40e_status_code	aq_error = 0;
4987 
4988 	struct i40e_aq_get_phy_abilities_resp abilities;
4989 
4990 	aq_error = i40e_aq_get_phy_capabilities(hw,
4991 	    TRUE, FALSE, &abilities, NULL);
4992 	if (aq_error) {
4993 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4994 		return (EPERM);
4995 	}
4996 
4997 	sprintf(buf, "\n"
4998 	    "PHY Type : %#010x\n"
4999 	    "Speed    : %#04x\n"
5000 	    "Abilities: %#04x\n"
5001 	    "EEE cap  : %#06x\n"
5002 	    "EEER reg : %#010x\n"
5003 	    "D3 Lpan  : %#04x",
5004 	    abilities.phy_type, abilities.link_speed,
5005 	    abilities.abilities, abilities.eee_capability,
5006 	    abilities.eeer_val, abilities.d3_lpan);
5007 
5008 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5009 }
5010 
5011 static int
5012 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5013 {
5014 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5015 	struct ixl_vsi *vsi = &pf->vsi;
5016 	struct ixl_mac_filter *f;
5017 	char *buf, *buf_i;
5018 
5019 	int error = 0;
5020 	int ftl_len = 0;
5021 	int ftl_counter = 0;
5022 	int buf_len = 0;
5023 	int entry_len = 42;
5024 
5025 	SLIST_FOREACH(f, &vsi->ftl, next) {
5026 		ftl_len++;
5027 	}
5028 
5029 	if (ftl_len < 1) {
5030 		sysctl_handle_string(oidp, "(none)", 6, req);
5031 		return (0);
5032 	}
5033 
5034 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5035 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5036 
5037 	sprintf(buf_i++, "\n");
5038 	SLIST_FOREACH(f, &vsi->ftl, next) {
5039 		sprintf(buf_i,
5040 		    MAC_FORMAT ", vlan %4d, flags %#06x",
5041 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5042 		buf_i += entry_len;
5043 		/* don't print '\n' for last entry */
5044 		if (++ftl_counter != ftl_len) {
5045 			sprintf(buf_i, "\n");
5046 			buf_i++;
5047 		}
5048 	}
5049 
5050 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5051 	if (error)
5052 		printf("sysctl error: %d\n", error);
5053 	free(buf, M_DEVBUF);
5054 	return error;
5055 }
5056 
5057 #define IXL_SW_RES_SIZE 0x14
5058 static int
5059 ixl_res_alloc_cmp(const void *a, const void *b)
5060 {
5061 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5062 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5063 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5064 
5065 	return ((int)one->resource_type - (int)two->resource_type);
5066 }
5067 
5068 static int
5069 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5070 {
5071 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5072 	struct i40e_hw *hw = &pf->hw;
5073 	device_t dev = pf->dev;
5074 	struct sbuf *buf;
5075 	int error = 0;
5076 
5077 	u8 num_entries;
5078 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5079 
5080 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5081 	if (!buf) {
5082 		device_printf(dev, "Could not allocate sbuf for output.\n");
5083 		return (ENOMEM);
5084 	}
5085 
5086 	bzero(resp, sizeof(resp));
5087 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5088 				resp,
5089 				IXL_SW_RES_SIZE,
5090 				NULL);
5091 	if (error) {
5092 		device_printf(dev,
5093 		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5094 		    __func__, error, hw->aq.asq_last_status);
5095 		sbuf_delete(buf);
5096 		return error;
5097 	}
5098 
5099 	/* Sort entries by type for display */
5100 	qsort(resp, num_entries,
5101 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5102 	    &ixl_res_alloc_cmp);
5103 
5104 	sbuf_cat(buf, "\n");
5105 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5106 	sbuf_printf(buf,
5107 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5108 	    "     | (this)     | (all) | (this) | (all)       \n");
5109 	for (int i = 0; i < num_entries; i++) {
5110 		sbuf_printf(buf,
5111 		    "%#4x | %10d   %5d   %6d   %12d",
5112 		    resp[i].resource_type,
5113 		    resp[i].guaranteed,
5114 		    resp[i].total,
5115 		    resp[i].used,
5116 		    resp[i].total_unalloced);
5117 		if (i < num_entries - 1)
5118 			sbuf_cat(buf, "\n");
5119 	}
5120 
5121 	error = sbuf_finish(buf);
5122 	sbuf_delete(buf);
5123 
5124 	return (error);
5125 }
5126 
5127 /*
5128 ** Caller must init and delete sbuf; this function will clear and
5129 ** finish it for caller.
5130 */
5131 static char *
5132 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5133 {
5134 	sbuf_clear(s);
5135 
5136 	if (seid == 0 && uplink)
5137 		sbuf_cat(s, "Network");
5138 	else if (seid == 0)
5139 		sbuf_cat(s, "Host");
5140 	else if (seid == 1)
5141 		sbuf_cat(s, "EMP");
5142 	else if (seid <= 5)
5143 		sbuf_printf(s, "MAC %d", seid - 2);
5144 	else if (seid <= 15)
5145 		sbuf_cat(s, "Reserved");
5146 	else if (seid <= 31)
5147 		sbuf_printf(s, "PF %d", seid - 16);
5148 	else if (seid <= 159)
5149 		sbuf_printf(s, "VF %d", seid - 32);
5150 	else if (seid <= 287)
5151 		sbuf_cat(s, "Reserved");
5152 	else if (seid <= 511)
5153 		sbuf_cat(s, "Other"); // for other structures
5154 	else if (seid <= 895)
5155 		sbuf_printf(s, "VSI %d", seid - 512);
5156 	else if (seid <= 1023)
5157 		sbuf_printf(s, "Reserved");
5158 	else
5159 		sbuf_cat(s, "Invalid");
5160 
5161 	sbuf_finish(s);
5162 	return sbuf_data(s);
5163 }
5164 
5165 static int
5166 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5167 {
5168 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5169 	struct i40e_hw *hw = &pf->hw;
5170 	device_t dev = pf->dev;
5171 	struct sbuf *buf;
5172 	struct sbuf *nmbuf;
5173 	int error = 0;
5174 	u8 aq_buf[I40E_AQ_LARGE_BUF];
5175 
5176 	u16 next = 0;
5177 	struct i40e_aqc_get_switch_config_resp *sw_config;
5178 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5179 
5180 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5181 	if (!buf) {
5182 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5183 		return (ENOMEM);
5184 	}
5185 
5186 	error = i40e_aq_get_switch_config(hw, sw_config,
5187 	    sizeof(aq_buf), &next, NULL);
5188 	if (error) {
5189 		device_printf(dev,
5190 		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5191 		    __func__, error, hw->aq.asq_last_status);
5192 		sbuf_delete(buf);
5193 		return error;
5194 	}
5195 
5196 	nmbuf = sbuf_new_auto();
5197 	if (!nmbuf) {
5198 		device_printf(dev, "Could not allocate sbuf for name output.\n");
5199 		return (ENOMEM);
5200 	}
5201 
5202 	sbuf_cat(buf, "\n");
5203 	// Assuming <= 255 elements in switch
5204 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5205 	/* Exclude:
5206 	** Revision -- all elements are revision 1 for now
5207 	*/
5208 	sbuf_printf(buf,
5209 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5210 	    "                |          |          | (uplink)\n");
5211 	for (int i = 0; i < sw_config->header.num_reported; i++) {
5212 		// "%4d (%8s) | %8s   %8s   %#8x",
5213 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5214 		sbuf_cat(buf, " ");
5215 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5216 		    sw_config->element[i].seid, false));
5217 		sbuf_cat(buf, " | ");
5218 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5219 		    sw_config->element[i].uplink_seid, true));
5220 		sbuf_cat(buf, "   ");
5221 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5222 		    sw_config->element[i].downlink_seid, false));
5223 		sbuf_cat(buf, "   ");
5224 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5225 		if (i < sw_config->header.num_reported - 1)
5226 			sbuf_cat(buf, "\n");
5227 	}
5228 	sbuf_delete(nmbuf);
5229 
5230 	error = sbuf_finish(buf);
5231 	sbuf_delete(buf);
5232 
5233 	return (error);
5234 }
5235 #endif /* IXL_DEBUG_SYSCTL */
5236 
5237 
5238 #ifdef PCI_IOV
5239 static int
5240 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5241 {
5242 	struct i40e_hw *hw;
5243 	struct ixl_vsi *vsi;
5244 	struct i40e_vsi_context vsi_ctx;
5245 	int i;
5246 	uint16_t first_queue;
5247 	enum i40e_status_code code;
5248 
5249 	hw = &pf->hw;
5250 	vsi = &pf->vsi;
5251 
5252 	vsi_ctx.pf_num = hw->pf_id;
5253 	vsi_ctx.uplink_seid = pf->veb_seid;
5254 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5255 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5256 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5257 
5258 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5259 
5260 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5261 	vsi_ctx.info.switch_id = htole16(0);
5262 
5263 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5264 	vsi_ctx.info.sec_flags = 0;
5265 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5266 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5267 
5268 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5269 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5270 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5271 
5272 	vsi_ctx.info.valid_sections |=
5273 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5274 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5275 	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5276 	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5277 		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5278 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5279 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5280 
5281 	vsi_ctx.info.tc_mapping[0] = htole16(
5282 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5283 	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5284 
5285 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5286 	if (code != I40E_SUCCESS)
5287 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5288 	vf->vsi.seid = vsi_ctx.seid;
5289 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5290 	vf->vsi.first_queue = first_queue;
5291 	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5292 
5293 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5294 	if (code != I40E_SUCCESS)
5295 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5296 
5297 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5298 	if (code != I40E_SUCCESS) {
5299 		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5300 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5301 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5302 	}
5303 
5304 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5305 	return (0);
5306 }
5307 
5308 static int
5309 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5310 {
5311 	struct i40e_hw *hw;
5312 	int error;
5313 
5314 	hw = &pf->hw;
5315 
5316 	error = ixl_vf_alloc_vsi(pf, vf);
5317 	if (error != 0)
5318 		return (error);
5319 
5320 	vf->vsi.hw_filters_add = 0;
5321 	vf->vsi.hw_filters_del = 0;
5322 	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5323 	ixl_reconfigure_filters(&vf->vsi);
5324 
5325 	return (0);
5326 }
5327 
5328 static void
5329 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5330     uint32_t val)
5331 {
5332 	uint32_t qtable;
5333 	int index, shift;
5334 
5335 	/*
5336 	 * Two queues are mapped in a single register, so we have to do some
5337 	 * gymnastics to convert the queue number into a register index and
5338 	 * shift.
5339 	 */
5340 	index = qnum / 2;
5341 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5342 
5343 	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5344 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5345 	qtable |= val << shift;
5346 	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5347 }
5348 
5349 static void
5350 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5351 {
5352 	struct i40e_hw *hw;
5353 	uint32_t qtable;
5354 	int i;
5355 
5356 	hw = &pf->hw;
5357 
5358 	/*
5359 	 * Contiguous mappings aren't actually supported by the hardware,
5360 	 * so we have to use non-contiguous mappings.
5361 	 */
5362 	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5363 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5364 
5365 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5366 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5367 
5368 	for (i = 0; i < vf->vsi.num_queues; i++) {
5369 		qtable = (vf->vsi.first_queue + i) <<
5370 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5371 
5372 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5373 	}
5374 
5375 	/* Map queues allocated to VF to its VSI. */
5376 	for (i = 0; i < vf->vsi.num_queues; i++)
5377 		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5378 
5379 	/* Set rest of VSI queues as unused. */
5380 	for (; i < IXL_MAX_VSI_QUEUES; i++)
5381 		ixl_vf_map_vsi_queue(hw, vf, i,
5382 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5383 
5384 	ixl_flush(hw);
5385 }
5386 
5387 static void
5388 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5389 {
5390 	struct i40e_hw *hw;
5391 
5392 	hw = &pf->hw;
5393 
5394 	if (vsi->seid == 0)
5395 		return;
5396 
5397 	i40e_aq_delete_element(hw, vsi->seid, NULL);
5398 }
5399 
5400 static void
5401 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5402 {
5403 
5404 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5405 	ixl_flush(hw);
5406 }
5407 
5408 static void
5409 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5410 {
5411 
5412 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5413 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5414 	ixl_flush(hw);
5415 }
5416 
5417 static void
5418 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5419 {
5420 	struct i40e_hw *hw;
5421 	uint32_t vfint_reg, vpint_reg;
5422 	int i;
5423 
5424 	hw = &pf->hw;
5425 
5426 	ixl_vf_vsi_release(pf, &vf->vsi);
5427 
5428 	/* Index 0 has a special register. */
5429 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5430 
5431 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5432 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5433 		ixl_vf_disable_queue_intr(hw, vfint_reg);
5434 	}
5435 
5436 	/* Index 0 has a special register. */
5437 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5438 
5439 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5440 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5441 		ixl_vf_unregister_intr(hw, vpint_reg);
5442 	}
5443 
5444 	vf->vsi.num_queues = 0;
5445 }
5446 
5447 static int
5448 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5449 {
5450 	struct i40e_hw *hw;
5451 	int i;
5452 	uint16_t global_vf_num;
5453 	uint32_t ciad;
5454 
5455 	hw = &pf->hw;
5456 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5457 
5458 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5459 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5460 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5461 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5462 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5463 			return (0);
5464 		DELAY(1);
5465 	}
5466 
5467 	return (ETIMEDOUT);
5468 }
5469 
5470 static void
5471 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5472 {
5473 	struct i40e_hw *hw;
5474 	uint32_t vfrtrig;
5475 
5476 	hw = &pf->hw;
5477 
5478 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5479 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5480 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5481 	ixl_flush(hw);
5482 
5483 	ixl_reinit_vf(pf, vf);
5484 }
5485 
5486 static void
5487 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5488 {
5489 	struct i40e_hw *hw;
5490 	uint32_t vfrstat, vfrtrig;
5491 	int i, error;
5492 
5493 	hw = &pf->hw;
5494 
5495 	error = ixl_flush_pcie(pf, vf);
5496 	if (error != 0)
5497 		device_printf(pf->dev,
5498 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5499 		    vf->vf_num);
5500 
5501 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5502 		DELAY(10);
5503 
5504 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5505 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5506 			break;
5507 	}
5508 
5509 	if (i == IXL_VF_RESET_TIMEOUT)
5510 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5511 
5512 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5513 
5514 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5515 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5516 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5517 
5518 	if (vf->vsi.seid != 0)
5519 		ixl_disable_rings(&vf->vsi);
5520 
5521 	ixl_vf_release_resources(pf, vf);
5522 	ixl_vf_setup_vsi(pf, vf);
5523 	ixl_vf_map_queues(pf, vf);
5524 
5525 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5526 	ixl_flush(hw);
5527 }
5528 
5529 static const char *
5530 ixl_vc_opcode_str(uint16_t op)
5531 {
5532 
5533 	switch (op) {
5534 	case I40E_VIRTCHNL_OP_VERSION:
5535 		return ("VERSION");
5536 	case I40E_VIRTCHNL_OP_RESET_VF:
5537 		return ("RESET_VF");
5538 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5539 		return ("GET_VF_RESOURCES");
5540 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5541 		return ("CONFIG_TX_QUEUE");
5542 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5543 		return ("CONFIG_RX_QUEUE");
5544 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5545 		return ("CONFIG_VSI_QUEUES");
5546 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5547 		return ("CONFIG_IRQ_MAP");
5548 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5549 		return ("ENABLE_QUEUES");
5550 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5551 		return ("DISABLE_QUEUES");
5552 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5553 		return ("ADD_ETHER_ADDRESS");
5554 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5555 		return ("DEL_ETHER_ADDRESS");
5556 	case I40E_VIRTCHNL_OP_ADD_VLAN:
5557 		return ("ADD_VLAN");
5558 	case I40E_VIRTCHNL_OP_DEL_VLAN:
5559 		return ("DEL_VLAN");
5560 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5561 		return ("CONFIG_PROMISCUOUS_MODE");
5562 	case I40E_VIRTCHNL_OP_GET_STATS:
5563 		return ("GET_STATS");
5564 	case I40E_VIRTCHNL_OP_FCOE:
5565 		return ("FCOE");
5566 	case I40E_VIRTCHNL_OP_EVENT:
5567 		return ("EVENT");
5568 	default:
5569 		return ("UNKNOWN");
5570 	}
5571 }
5572 
5573 static int
5574 ixl_vc_opcode_level(uint16_t opcode)
5575 {
5576 
5577 	switch (opcode) {
5578 	case I40E_VIRTCHNL_OP_GET_STATS:
5579 		return (10);
5580 	default:
5581 		return (5);
5582 	}
5583 }
5584 
5585 static void
5586 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5587     enum i40e_status_code status, void *msg, uint16_t len)
5588 {
5589 	struct i40e_hw *hw;
5590 	int global_vf_id;
5591 
5592 	hw = &pf->hw;
5593 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5594 
5595 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5596 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5597 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5598 
5599 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5600 }
5601 
5602 static void
5603 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5604 {
5605 
5606 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5607 }
5608 
5609 static void
5610 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5611     enum i40e_status_code status, const char *file, int line)
5612 {
5613 
5614 	I40E_VC_DEBUG(pf, 1,
5615 	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5616 	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5617 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5618 }
5619 
5620 static void
5621 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5622     uint16_t msg_size)
5623 {
5624 	struct i40e_virtchnl_version_info reply;
5625 
5626 	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5627 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5628 		    I40E_ERR_PARAM);
5629 		return;
5630 	}
5631 
5632 	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5633 	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5634 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5635 	    sizeof(reply));
5636 }
5637 
5638 static void
5639 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5640     uint16_t msg_size)
5641 {
5642 
5643 	if (msg_size != 0) {
5644 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5645 		    I40E_ERR_PARAM);
5646 		return;
5647 	}
5648 
5649 	ixl_reset_vf(pf, vf);
5650 
5651 	/* No response to a reset message. */
5652 }
5653 
5654 static void
5655 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5656     uint16_t msg_size)
5657 {
5658 	struct i40e_virtchnl_vf_resource reply;
5659 
5660 	if (msg_size != 0) {
5661 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5662 		    I40E_ERR_PARAM);
5663 		return;
5664 	}
5665 
5666 	bzero(&reply, sizeof(reply));
5667 
5668 	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5669 
5670 	reply.num_vsis = 1;
5671 	reply.num_queue_pairs = vf->vsi.num_queues;
5672 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5673 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5674 	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5675 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5676 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5677 
5678 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5679 	    I40E_SUCCESS, &reply, sizeof(reply));
5680 }
5681 
5682 static int
5683 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5684     struct i40e_virtchnl_txq_info *info)
5685 {
5686 	struct i40e_hw *hw;
5687 	struct i40e_hmc_obj_txq txq;
5688 	uint16_t global_queue_num, global_vf_num;
5689 	enum i40e_status_code status;
5690 	uint32_t qtx_ctl;
5691 
5692 	hw = &pf->hw;
5693 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5694 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5695 	bzero(&txq, sizeof(txq));
5696 
5697 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5698 	if (status != I40E_SUCCESS)
5699 		return (EINVAL);
5700 
5701 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5702 
5703 	txq.head_wb_ena = info->headwb_enabled;
5704 	txq.head_wb_addr = info->dma_headwb_addr;
5705 	txq.qlen = info->ring_len;
5706 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5707 	txq.rdylist_act = 0;
5708 
5709 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5710 	if (status != I40E_SUCCESS)
5711 		return (EINVAL);
5712 
5713 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5714 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5715 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5716 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5717 	ixl_flush(hw);
5718 
5719 	return (0);
5720 }
5721 
5722 static int
5723 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5724     struct i40e_virtchnl_rxq_info *info)
5725 {
5726 	struct i40e_hw *hw;
5727 	struct i40e_hmc_obj_rxq rxq;
5728 	uint16_t global_queue_num;
5729 	enum i40e_status_code status;
5730 
5731 	hw = &pf->hw;
5732 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5733 	bzero(&rxq, sizeof(rxq));
5734 
5735 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5736 		return (EINVAL);
5737 
5738 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5739 	    info->max_pkt_size < ETHER_MIN_LEN)
5740 		return (EINVAL);
5741 
5742 	if (info->splithdr_enabled) {
5743 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5744 			return (EINVAL);
5745 
5746 		rxq.hsplit_0 = info->rx_split_pos &
5747 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5748 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5749 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5750 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5751 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5752 
5753 		rxq.dtype = 2;
5754 	}
5755 
5756 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5757 	if (status != I40E_SUCCESS)
5758 		return (EINVAL);
5759 
5760 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5761 	rxq.qlen = info->ring_len;
5762 
5763 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5764 
5765 	rxq.dsize = 1;
5766 	rxq.crcstrip = 1;
5767 	rxq.l2tsel = 1;
5768 
5769 	rxq.rxmax = info->max_pkt_size;
5770 	rxq.tphrdesc_ena = 1;
5771 	rxq.tphwdesc_ena = 1;
5772 	rxq.tphdata_ena = 1;
5773 	rxq.tphhead_ena = 1;
5774 	rxq.lrxqthresh = 2;
5775 	rxq.prefena = 1;
5776 
5777 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5778 	if (status != I40E_SUCCESS)
5779 		return (EINVAL);
5780 
5781 	return (0);
5782 }
5783 
5784 static void
5785 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5786     uint16_t msg_size)
5787 {
5788 	struct i40e_virtchnl_vsi_queue_config_info *info;
5789 	struct i40e_virtchnl_queue_pair_info *pair;
5790 	int i;
5791 
5792 	if (msg_size < sizeof(*info)) {
5793 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5794 		    I40E_ERR_PARAM);
5795 		return;
5796 	}
5797 
5798 	info = msg;
5799 	if (info->num_queue_pairs == 0) {
5800 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5801 		    I40E_ERR_PARAM);
5802 		return;
5803 	}
5804 
5805 	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5806 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5807 		    I40E_ERR_PARAM);
5808 		return;
5809 	}
5810 
5811 	if (info->vsi_id != vf->vsi.vsi_num) {
5812 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5813 		    I40E_ERR_PARAM);
5814 		return;
5815 	}
5816 
5817 	for (i = 0; i < info->num_queue_pairs; i++) {
5818 		pair = &info->qpair[i];
5819 
5820 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5821 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5822 		    pair->txq.queue_id != pair->rxq.queue_id ||
5823 		    pair->txq.queue_id >= vf->vsi.num_queues) {
5824 
5825 			i40e_send_vf_nack(pf, vf,
5826 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5827 			return;
5828 		}
5829 
5830 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5831 			i40e_send_vf_nack(pf, vf,
5832 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5833 			return;
5834 		}
5835 
5836 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5837 			i40e_send_vf_nack(pf, vf,
5838 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5839 			return;
5840 		}
5841 	}
5842 
5843 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5844 }
5845 
5846 static void
5847 ixl_vf_set_qctl(struct ixl_pf *pf,
5848     const struct i40e_virtchnl_vector_map *vector,
5849     enum i40e_queue_type cur_type, uint16_t cur_queue,
5850     enum i40e_queue_type *last_type, uint16_t *last_queue)
5851 {
5852 	uint32_t offset, qctl;
5853 	uint16_t itr_indx;
5854 
5855 	if (cur_type == I40E_QUEUE_TYPE_RX) {
5856 		offset = I40E_QINT_RQCTL(cur_queue);
5857 		itr_indx = vector->rxitr_idx;
5858 	} else {
5859 		offset = I40E_QINT_TQCTL(cur_queue);
5860 		itr_indx = vector->txitr_idx;
5861 	}
5862 
5863 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5864 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5865 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5866 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5867 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5868 
5869 	wr32(&pf->hw, offset, qctl);
5870 
5871 	*last_type = cur_type;
5872 	*last_queue = cur_queue;
5873 }
5874 
5875 static void
5876 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5877     const struct i40e_virtchnl_vector_map *vector)
5878 {
5879 	struct i40e_hw *hw;
5880 	u_int qindex;
5881 	enum i40e_queue_type type, last_type;
5882 	uint32_t lnklst_reg;
5883 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5884 
5885 	hw = &pf->hw;
5886 
5887 	rxq_map = vector->rxq_map;
5888 	txq_map = vector->txq_map;
5889 
5890 	last_queue = IXL_END_OF_INTR_LNKLST;
5891 	last_type = I40E_QUEUE_TYPE_RX;
5892 
5893 	/*
5894 	 * The datasheet says to optimize performance, RX queues and TX queues
5895 	 * should be interleaved in the interrupt linked list, so we process
5896 	 * both at once here.
5897 	 */
5898 	while ((rxq_map != 0) || (txq_map != 0)) {
5899 		if (txq_map != 0) {
5900 			qindex = ffs(txq_map) - 1;
5901 			type = I40E_QUEUE_TYPE_TX;
5902 			cur_queue = vf->vsi.first_queue + qindex;
5903 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5904 			    &last_type, &last_queue);
5905 			txq_map &= ~(1 << qindex);
5906 		}
5907 
5908 		if (rxq_map != 0) {
5909 			qindex = ffs(rxq_map) - 1;
5910 			type = I40E_QUEUE_TYPE_RX;
5911 			cur_queue = vf->vsi.first_queue + qindex;
5912 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5913 			    &last_type, &last_queue);
5914 			rxq_map &= ~(1 << qindex);
5915 		}
5916 	}
5917 
5918 	if (vector->vector_id == 0)
5919 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5920 	else
5921 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5922 		    vf->vf_num);
5923 	wr32(hw, lnklst_reg,
5924 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5925 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5926 
5927 	ixl_flush(hw);
5928 }
5929 
5930 static void
5931 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5932     uint16_t msg_size)
5933 {
5934 	struct i40e_virtchnl_irq_map_info *map;
5935 	struct i40e_virtchnl_vector_map *vector;
5936 	struct i40e_hw *hw;
5937 	int i, largest_txq, largest_rxq;
5938 
5939 	hw = &pf->hw;
5940 
5941 	if (msg_size < sizeof(*map)) {
5942 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5943 		    I40E_ERR_PARAM);
5944 		return;
5945 	}
5946 
5947 	map = msg;
5948 	if (map->num_vectors == 0) {
5949 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5950 		    I40E_ERR_PARAM);
5951 		return;
5952 	}
5953 
5954 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5955 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5956 		    I40E_ERR_PARAM);
5957 		return;
5958 	}
5959 
5960 	for (i = 0; i < map->num_vectors; i++) {
5961 		vector = &map->vecmap[i];
5962 
5963 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5964 		    vector->vsi_id != vf->vsi.vsi_num) {
5965 			i40e_send_vf_nack(pf, vf,
5966 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5967 			return;
5968 		}
5969 
5970 		if (vector->rxq_map != 0) {
5971 			largest_rxq = fls(vector->rxq_map) - 1;
5972 			if (largest_rxq >= vf->vsi.num_queues) {
5973 				i40e_send_vf_nack(pf, vf,
5974 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5975 				    I40E_ERR_PARAM);
5976 				return;
5977 			}
5978 		}
5979 
5980 		if (vector->txq_map != 0) {
5981 			largest_txq = fls(vector->txq_map) - 1;
5982 			if (largest_txq >= vf->vsi.num_queues) {
5983 				i40e_send_vf_nack(pf, vf,
5984 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5985 				    I40E_ERR_PARAM);
5986 				return;
5987 			}
5988 		}
5989 
5990 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5991 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
5992 			i40e_send_vf_nack(pf, vf,
5993 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5994 			    I40E_ERR_PARAM);
5995 			return;
5996 		}
5997 
5998 		ixl_vf_config_vector(pf, vf, vector);
5999 	}
6000 
6001 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6002 }
6003 
6004 static void
6005 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6006     uint16_t msg_size)
6007 {
6008 	struct i40e_virtchnl_queue_select *select;
6009 	int error;
6010 
6011 	if (msg_size != sizeof(*select)) {
6012 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6013 		    I40E_ERR_PARAM);
6014 		return;
6015 	}
6016 
6017 	select = msg;
6018 	if (select->vsi_id != vf->vsi.vsi_num ||
6019 	    select->rx_queues == 0 || select->tx_queues == 0) {
6020 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6021 		    I40E_ERR_PARAM);
6022 		return;
6023 	}
6024 
6025 	error = ixl_enable_rings(&vf->vsi);
6026 	if (error) {
6027 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6028 		    I40E_ERR_TIMEOUT);
6029 		return;
6030 	}
6031 
6032 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6033 }
6034 
6035 static void
6036 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6037     void *msg, uint16_t msg_size)
6038 {
6039 	struct i40e_virtchnl_queue_select *select;
6040 	int error;
6041 
6042 	if (msg_size != sizeof(*select)) {
6043 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6044 		    I40E_ERR_PARAM);
6045 		return;
6046 	}
6047 
6048 	select = msg;
6049 	if (select->vsi_id != vf->vsi.vsi_num ||
6050 	    select->rx_queues == 0 || select->tx_queues == 0) {
6051 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6052 		    I40E_ERR_PARAM);
6053 		return;
6054 	}
6055 
6056 	error = ixl_disable_rings(&vf->vsi);
6057 	if (error) {
6058 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6059 		    I40E_ERR_TIMEOUT);
6060 		return;
6061 	}
6062 
6063 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6064 }
6065 
6066 static boolean_t
6067 ixl_zero_mac(const uint8_t *addr)
6068 {
6069 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6070 
6071 	return (cmp_etheraddr(addr, zero));
6072 }
6073 
6074 static boolean_t
6075 ixl_bcast_mac(const uint8_t *addr)
6076 {
6077 
6078 	return (cmp_etheraddr(addr, ixl_bcast_addr));
6079 }
6080 
6081 static int
6082 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6083 {
6084 
6085 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6086 		return (EINVAL);
6087 
6088 	/*
6089 	 * If the VF is not allowed to change its MAC address, don't let it
6090 	 * set a MAC filter for an address that is not a multicast address and
6091 	 * is not its assigned MAC.
6092 	 */
6093 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6094 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6095 		return (EPERM);
6096 
6097 	return (0);
6098 }
6099 
6100 static void
6101 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6102     uint16_t msg_size)
6103 {
6104 	struct i40e_virtchnl_ether_addr_list *addr_list;
6105 	struct i40e_virtchnl_ether_addr *addr;
6106 	struct ixl_vsi *vsi;
6107 	int i;
6108 	size_t expected_size;
6109 
6110 	vsi = &vf->vsi;
6111 
6112 	if (msg_size < sizeof(*addr_list)) {
6113 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6114 		    I40E_ERR_PARAM);
6115 		return;
6116 	}
6117 
6118 	addr_list = msg;
6119 	expected_size = sizeof(*addr_list) +
6120 	    addr_list->num_elements * sizeof(*addr);
6121 
6122 	if (addr_list->num_elements == 0 ||
6123 	    addr_list->vsi_id != vsi->vsi_num ||
6124 	    msg_size != expected_size) {
6125 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6126 		    I40E_ERR_PARAM);
6127 		return;
6128 	}
6129 
6130 	for (i = 0; i < addr_list->num_elements; i++) {
6131 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6132 			i40e_send_vf_nack(pf, vf,
6133 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6134 			return;
6135 		}
6136 	}
6137 
6138 	for (i = 0; i < addr_list->num_elements; i++) {
6139 		addr = &addr_list->list[i];
6140 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6141 	}
6142 
6143 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6144 }
6145 
6146 static void
6147 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6148     uint16_t msg_size)
6149 {
6150 	struct i40e_virtchnl_ether_addr_list *addr_list;
6151 	struct i40e_virtchnl_ether_addr *addr;
6152 	size_t expected_size;
6153 	int i;
6154 
6155 	if (msg_size < sizeof(*addr_list)) {
6156 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6157 		    I40E_ERR_PARAM);
6158 		return;
6159 	}
6160 
6161 	addr_list = msg;
6162 	expected_size = sizeof(*addr_list) +
6163 	    addr_list->num_elements * sizeof(*addr);
6164 
6165 	if (addr_list->num_elements == 0 ||
6166 	    addr_list->vsi_id != vf->vsi.vsi_num ||
6167 	    msg_size != expected_size) {
6168 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6169 		    I40E_ERR_PARAM);
6170 		return;
6171 	}
6172 
6173 	for (i = 0; i < addr_list->num_elements; i++) {
6174 		addr = &addr_list->list[i];
6175 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6176 			i40e_send_vf_nack(pf, vf,
6177 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6178 			return;
6179 		}
6180 	}
6181 
6182 	for (i = 0; i < addr_list->num_elements; i++) {
6183 		addr = &addr_list->list[i];
6184 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6185 	}
6186 
6187 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6188 }
6189 
6190 static enum i40e_status_code
6191 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6192 {
6193 	struct i40e_vsi_context vsi_ctx;
6194 
6195 	vsi_ctx.seid = vf->vsi.seid;
6196 
6197 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6198 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6199 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6200 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6201 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6202 }
6203 
6204 static void
6205 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6206     uint16_t msg_size)
6207 {
6208 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6209 	enum i40e_status_code code;
6210 	size_t expected_size;
6211 	int i;
6212 
6213 	if (msg_size < sizeof(*filter_list)) {
6214 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6215 		    I40E_ERR_PARAM);
6216 		return;
6217 	}
6218 
6219 	filter_list = msg;
6220 	expected_size = sizeof(*filter_list) +
6221 	    filter_list->num_elements * sizeof(uint16_t);
6222 	if (filter_list->num_elements == 0 ||
6223 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6224 	    msg_size != expected_size) {
6225 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6226 		    I40E_ERR_PARAM);
6227 		return;
6228 	}
6229 
6230 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6231 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6232 		    I40E_ERR_PARAM);
6233 		return;
6234 	}
6235 
6236 	for (i = 0; i < filter_list->num_elements; i++) {
6237 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6238 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6239 			    I40E_ERR_PARAM);
6240 			return;
6241 		}
6242 	}
6243 
6244 	code = ixl_vf_enable_vlan_strip(pf, vf);
6245 	if (code != I40E_SUCCESS) {
6246 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6247 		    I40E_ERR_PARAM);
6248 	}
6249 
6250 	for (i = 0; i < filter_list->num_elements; i++)
6251 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6252 
6253 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6254 }
6255 
6256 static void
6257 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6258     uint16_t msg_size)
6259 {
6260 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6261 	int i;
6262 	size_t expected_size;
6263 
6264 	if (msg_size < sizeof(*filter_list)) {
6265 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6266 		    I40E_ERR_PARAM);
6267 		return;
6268 	}
6269 
6270 	filter_list = msg;
6271 	expected_size = sizeof(*filter_list) +
6272 	    filter_list->num_elements * sizeof(uint16_t);
6273 	if (filter_list->num_elements == 0 ||
6274 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6275 	    msg_size != expected_size) {
6276 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6277 		    I40E_ERR_PARAM);
6278 		return;
6279 	}
6280 
6281 	for (i = 0; i < filter_list->num_elements; i++) {
6282 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6283 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6284 			    I40E_ERR_PARAM);
6285 			return;
6286 		}
6287 	}
6288 
6289 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6290 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6291 		    I40E_ERR_PARAM);
6292 		return;
6293 	}
6294 
6295 	for (i = 0; i < filter_list->num_elements; i++)
6296 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6297 
6298 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6299 }
6300 
6301 static void
6302 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6303     void *msg, uint16_t msg_size)
6304 {
6305 	struct i40e_virtchnl_promisc_info *info;
6306 	enum i40e_status_code code;
6307 
6308 	if (msg_size != sizeof(*info)) {
6309 		i40e_send_vf_nack(pf, vf,
6310 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6311 		return;
6312 	}
6313 
6314 	if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6315 		i40e_send_vf_nack(pf, vf,
6316 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6317 		return;
6318 	}
6319 
6320 	info = msg;
6321 	if (info->vsi_id != vf->vsi.vsi_num) {
6322 		i40e_send_vf_nack(pf, vf,
6323 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6324 		return;
6325 	}
6326 
6327 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6328 	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6329 	if (code != I40E_SUCCESS) {
6330 		i40e_send_vf_nack(pf, vf,
6331 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6332 		return;
6333 	}
6334 
6335 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6336 	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6337 	if (code != I40E_SUCCESS) {
6338 		i40e_send_vf_nack(pf, vf,
6339 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6340 		return;
6341 	}
6342 
6343 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6344 }
6345 
6346 static void
6347 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6348     uint16_t msg_size)
6349 {
6350 	struct i40e_virtchnl_queue_select *queue;
6351 
6352 	if (msg_size != sizeof(*queue)) {
6353 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6354 		    I40E_ERR_PARAM);
6355 		return;
6356 	}
6357 
6358 	queue = msg;
6359 	if (queue->vsi_id != vf->vsi.vsi_num) {
6360 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6361 		    I40E_ERR_PARAM);
6362 		return;
6363 	}
6364 
6365 	ixl_update_eth_stats(&vf->vsi);
6366 
6367 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6368 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6369 }
6370 
6371 static void
6372 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6373 {
6374 	struct ixl_vf *vf;
6375 	void *msg;
6376 	uint16_t vf_num, msg_size;
6377 	uint32_t opcode;
6378 
6379 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6380 	opcode = le32toh(event->desc.cookie_high);
6381 
6382 	if (vf_num >= pf->num_vfs) {
6383 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6384 		return;
6385 	}
6386 
6387 	vf = &pf->vfs[vf_num];
6388 	msg = event->msg_buf;
6389 	msg_size = event->msg_len;
6390 
6391 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6392 	    "Got msg %s(%d) from VF-%d of size %d\n",
6393 	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6394 
6395 	switch (opcode) {
6396 	case I40E_VIRTCHNL_OP_VERSION:
6397 		ixl_vf_version_msg(pf, vf, msg, msg_size);
6398 		break;
6399 	case I40E_VIRTCHNL_OP_RESET_VF:
6400 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6401 		break;
6402 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6403 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6404 		break;
6405 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6406 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6407 		break;
6408 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6409 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6410 		break;
6411 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6412 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6413 		break;
6414 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6415 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6416 		break;
6417 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6418 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6419 		break;
6420 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6421 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6422 		break;
6423 	case I40E_VIRTCHNL_OP_ADD_VLAN:
6424 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6425 		break;
6426 	case I40E_VIRTCHNL_OP_DEL_VLAN:
6427 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6428 		break;
6429 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6430 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6431 		break;
6432 	case I40E_VIRTCHNL_OP_GET_STATS:
6433 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6434 		break;
6435 
6436 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6437 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6438 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6439 	default:
6440 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6441 		break;
6442 	}
6443 }
6444 
6445 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6446 static void
6447 ixl_handle_vflr(void *arg, int pending)
6448 {
6449 	struct ixl_pf *pf;
6450 	struct i40e_hw *hw;
6451 	uint16_t global_vf_num;
6452 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6453 	int i;
6454 
6455 	pf = arg;
6456 	hw = &pf->hw;
6457 
6458 	IXL_PF_LOCK(pf);
6459 	for (i = 0; i < pf->num_vfs; i++) {
6460 		global_vf_num = hw->func_caps.vf_base_id + i;
6461 
6462 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6463 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6464 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6465 		if (vflrstat & vflrstat_mask) {
6466 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6467 			    vflrstat_mask);
6468 
6469 			ixl_reinit_vf(pf, &pf->vfs[i]);
6470 		}
6471 	}
6472 
6473 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6474 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6475 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6476 	ixl_flush(hw);
6477 
6478 	IXL_PF_UNLOCK(pf);
6479 }
6480 
6481 static int
6482 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6483 {
6484 
6485 	switch (err) {
6486 	case I40E_AQ_RC_EPERM:
6487 		return (EPERM);
6488 	case I40E_AQ_RC_ENOENT:
6489 		return (ENOENT);
6490 	case I40E_AQ_RC_ESRCH:
6491 		return (ESRCH);
6492 	case I40E_AQ_RC_EINTR:
6493 		return (EINTR);
6494 	case I40E_AQ_RC_EIO:
6495 		return (EIO);
6496 	case I40E_AQ_RC_ENXIO:
6497 		return (ENXIO);
6498 	case I40E_AQ_RC_E2BIG:
6499 		return (E2BIG);
6500 	case I40E_AQ_RC_EAGAIN:
6501 		return (EAGAIN);
6502 	case I40E_AQ_RC_ENOMEM:
6503 		return (ENOMEM);
6504 	case I40E_AQ_RC_EACCES:
6505 		return (EACCES);
6506 	case I40E_AQ_RC_EFAULT:
6507 		return (EFAULT);
6508 	case I40E_AQ_RC_EBUSY:
6509 		return (EBUSY);
6510 	case I40E_AQ_RC_EEXIST:
6511 		return (EEXIST);
6512 	case I40E_AQ_RC_EINVAL:
6513 		return (EINVAL);
6514 	case I40E_AQ_RC_ENOTTY:
6515 		return (ENOTTY);
6516 	case I40E_AQ_RC_ENOSPC:
6517 		return (ENOSPC);
6518 	case I40E_AQ_RC_ENOSYS:
6519 		return (ENOSYS);
6520 	case I40E_AQ_RC_ERANGE:
6521 		return (ERANGE);
6522 	case I40E_AQ_RC_EFLUSHED:
6523 		return (EINVAL);	/* No exact equivalent in errno.h */
6524 	case I40E_AQ_RC_BAD_ADDR:
6525 		return (EFAULT);
6526 	case I40E_AQ_RC_EMODE:
6527 		return (EPERM);
6528 	case I40E_AQ_RC_EFBIG:
6529 		return (EFBIG);
6530 	default:
6531 		return (EINVAL);
6532 	}
6533 }
6534 
6535 static int
6536 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6537 {
6538 	struct ixl_pf *pf;
6539 	struct i40e_hw *hw;
6540 	struct ixl_vsi *pf_vsi;
6541 	enum i40e_status_code ret;
6542 	int i, error;
6543 
6544 	pf = device_get_softc(dev);
6545 	hw = &pf->hw;
6546 	pf_vsi = &pf->vsi;
6547 
6548 	IXL_PF_LOCK(pf);
6549 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6550 	    M_ZERO);
6551 
6552 	if (pf->vfs == NULL) {
6553 		error = ENOMEM;
6554 		goto fail;
6555 	}
6556 
6557 	for (i = 0; i < num_vfs; i++)
6558 		sysctl_ctx_init(&pf->vfs[i].ctx);
6559 
6560 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6561 	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6562 	if (ret != I40E_SUCCESS) {
6563 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6564 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6565 		    error);
6566 		goto fail;
6567 	}
6568 
6569 	ixl_configure_msix(pf);
6570 	ixl_enable_adminq(hw);
6571 
6572 	pf->num_vfs = num_vfs;
6573 	IXL_PF_UNLOCK(pf);
6574 	return (0);
6575 
6576 fail:
6577 	free(pf->vfs, M_IXL);
6578 	pf->vfs = NULL;
6579 	IXL_PF_UNLOCK(pf);
6580 	return (error);
6581 }
6582 
6583 static void
6584 ixl_iov_uninit(device_t dev)
6585 {
6586 	struct ixl_pf *pf;
6587 	struct i40e_hw *hw;
6588 	struct ixl_vsi *vsi;
6589 	struct ifnet *ifp;
6590 	struct ixl_vf *vfs;
6591 	int i, num_vfs;
6592 
6593 	pf = device_get_softc(dev);
6594 	hw = &pf->hw;
6595 	vsi = &pf->vsi;
6596 	ifp = vsi->ifp;
6597 
6598 	IXL_PF_LOCK(pf);
6599 	for (i = 0; i < pf->num_vfs; i++) {
6600 		if (pf->vfs[i].vsi.seid != 0)
6601 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6602 	}
6603 
6604 	if (pf->veb_seid != 0) {
6605 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6606 		pf->veb_seid = 0;
6607 	}
6608 
6609 #if __FreeBSD_version > 1100022
6610 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6611 #else
6612 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
6613 #endif
6614 		ixl_disable_intr(vsi);
6615 
6616 	vfs = pf->vfs;
6617 	num_vfs = pf->num_vfs;
6618 
6619 	pf->vfs = NULL;
6620 	pf->num_vfs = 0;
6621 	IXL_PF_UNLOCK(pf);
6622 
6623 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6624 	for (i = 0; i < num_vfs; i++)
6625 		sysctl_ctx_free(&vfs[i].ctx);
6626 	free(vfs, M_IXL);
6627 }
6628 
6629 static int
6630 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6631 {
6632 	char sysctl_name[QUEUE_NAME_LEN];
6633 	struct ixl_pf *pf;
6634 	struct ixl_vf *vf;
6635 	const void *mac;
6636 	size_t size;
6637 	int error;
6638 
6639 	pf = device_get_softc(dev);
6640 	vf = &pf->vfs[vfnum];
6641 
6642 	IXL_PF_LOCK(pf);
6643 	vf->vf_num = vfnum;
6644 
6645 	vf->vsi.back = pf;
6646 	vf->vf_flags = VF_FLAG_ENABLED;
6647 	SLIST_INIT(&vf->vsi.ftl);
6648 
6649 	error = ixl_vf_setup_vsi(pf, vf);
6650 	if (error != 0)
6651 		goto out;
6652 
6653 	if (nvlist_exists_binary(params, "mac-addr")) {
6654 		mac = nvlist_get_binary(params, "mac-addr", &size);
6655 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6656 
6657 		if (nvlist_get_bool(params, "allow-set-mac"))
6658 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6659 	} else
6660 		/*
6661 		 * If the administrator has not specified a MAC address then
6662 		 * we must allow the VF to choose one.
6663 		 */
6664 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6665 
6666 	if (nvlist_get_bool(params, "mac-anti-spoof"))
6667 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6668 
6669 	if (nvlist_get_bool(params, "allow-promisc"))
6670 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6671 
6672 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6673 
6674 	ixl_reset_vf(pf, vf);
6675 out:
6676 	IXL_PF_UNLOCK(pf);
6677 	if (error == 0) {
6678 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6679 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6680 	}
6681 
6682 	return (error);
6683 }
6684 #endif /* PCI_IOV */
6685