xref: /freebsd/sys/dev/ixl/if_ixl.c (revision bd81e07d2761cf1c13063eb49a5c0cb4a6951318)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40 
41 #include "ixl.h"
42 #include "ixl_pf.h"
43 
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47 
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixl_driver_version[] = "1.4.3";
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixl_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {
65 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
74 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
75 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76 #ifdef X722_SUPPORT
77 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
78 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
79 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
80 #endif
81 	/* required last entry */
82 	{0, 0, 0, 0, 0}
83 };
84 
85 /*********************************************************************
86  *  Table of branding strings
87  *********************************************************************/
88 
89 static char    *ixl_strings[] = {
90 	"Intel(R) Ethernet Connection XL710 Driver"
91 };
92 
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 static int      ixl_probe(device_t);
98 static int      ixl_attach(device_t);
99 static int      ixl_detach(device_t);
100 static int      ixl_shutdown(device_t);
101 static int	ixl_get_hw_capabilities(struct ixl_pf *);
102 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
103 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
104 static void	ixl_init(void *);
105 static void	ixl_init_locked(struct ixl_pf *);
106 static void     ixl_stop(struct ixl_pf *);
107 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
108 static int      ixl_media_change(struct ifnet *);
109 static void     ixl_update_link_status(struct ixl_pf *);
110 static int      ixl_allocate_pci_resources(struct ixl_pf *);
111 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
112 static int	ixl_setup_stations(struct ixl_pf *);
113 static int	ixl_switch_config(struct ixl_pf *);
114 static int	ixl_initialize_vsi(struct ixl_vsi *);
115 static int	ixl_assign_vsi_msix(struct ixl_pf *);
116 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
117 static int	ixl_init_msix(struct ixl_pf *);
118 static void	ixl_configure_msix(struct ixl_pf *);
119 static void	ixl_configure_itr(struct ixl_pf *);
120 static void	ixl_configure_legacy(struct ixl_pf *);
121 static void	ixl_free_pci_resources(struct ixl_pf *);
122 static void	ixl_local_timer(void *);
123 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
124 static void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
125 static void	ixl_config_rss(struct ixl_vsi *);
126 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
127 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
128 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
129 
130 static int	ixl_enable_rings(struct ixl_vsi *);
131 static int	ixl_disable_rings(struct ixl_vsi *);
132 static void	ixl_enable_intr(struct ixl_vsi *);
133 static void	ixl_disable_intr(struct ixl_vsi *);
134 static void	ixl_disable_rings_intr(struct ixl_vsi *);
135 
136 static void     ixl_enable_adminq(struct i40e_hw *);
137 static void     ixl_disable_adminq(struct i40e_hw *);
138 static void     ixl_enable_queue(struct i40e_hw *, int);
139 static void     ixl_disable_queue(struct i40e_hw *, int);
140 static void     ixl_enable_legacy(struct i40e_hw *);
141 static void     ixl_disable_legacy(struct i40e_hw *);
142 
143 static void     ixl_set_promisc(struct ixl_vsi *);
144 static void     ixl_add_multi(struct ixl_vsi *);
145 static void     ixl_del_multi(struct ixl_vsi *);
146 static void	ixl_register_vlan(void *, struct ifnet *, u16);
147 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
148 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
149 
150 static void	ixl_init_filters(struct ixl_vsi *);
151 static void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
152 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
154 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
155 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
156 static struct ixl_mac_filter *
157 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
158 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
159 static void	ixl_free_mac_filters(struct ixl_vsi *vsi);
160 
161 
162 /* Sysctl debug interface */
163 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
164 static void	ixl_print_debug_info(struct ixl_pf *);
165 
166 /* The MSI/X Interrupt handlers */
167 static void	ixl_intr(void *);
168 static void	ixl_msix_que(void *);
169 static void	ixl_msix_adminq(void *);
170 static void	ixl_handle_mdd_event(struct ixl_pf *);
171 
172 /* Deferred interrupt tasklets */
173 static void	ixl_do_adminq(void *, int);
174 
175 /* Sysctl handlers */
176 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
177 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
178 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
179 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
180 
181 /* Statistics */
182 static void     ixl_add_hw_stats(struct ixl_pf *);
183 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
184 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
185 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
186 		    struct sysctl_oid_list *,
187 		    struct i40e_eth_stats *);
188 static void	ixl_update_stats_counters(struct ixl_pf *);
189 static void	ixl_update_eth_stats(struct ixl_vsi *);
190 static void	ixl_update_vsi_stats(struct ixl_vsi *);
191 static void	ixl_pf_reset_stats(struct ixl_pf *);
192 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
193 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
194 		    u64 *, u64 *);
195 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
196 		    u64 *, u64 *);
197 
198 #ifdef IXL_DEBUG_SYSCTL
199 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
200 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
201 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
202 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
203 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
204 #endif
205 
206 #ifdef PCI_IOV
207 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
208 
209 static int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
210 static void	ixl_iov_uninit(device_t dev);
211 static int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
212 
213 static void	ixl_handle_vf_msg(struct ixl_pf *,
214 		    struct i40e_arq_event_info *);
215 static void	ixl_handle_vflr(void *arg, int pending);
216 
217 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
218 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
219 #endif
220 
221 /*********************************************************************
222  *  FreeBSD Device Interface Entry Points
223  *********************************************************************/
224 
225 static device_method_t ixl_methods[] = {
226 	/* Device interface */
227 	DEVMETHOD(device_probe, ixl_probe),
228 	DEVMETHOD(device_attach, ixl_attach),
229 	DEVMETHOD(device_detach, ixl_detach),
230 	DEVMETHOD(device_shutdown, ixl_shutdown),
231 #ifdef PCI_IOV
232 	DEVMETHOD(pci_iov_init, ixl_iov_init),
233 	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
234 	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
235 #endif
236 	{0, 0}
237 };
238 
239 static driver_t ixl_driver = {
240 	"ixl", ixl_methods, sizeof(struct ixl_pf),
241 };
242 
243 devclass_t ixl_devclass;
244 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
245 
246 MODULE_DEPEND(ixl, pci, 1, 1, 1);
247 MODULE_DEPEND(ixl, ether, 1, 1, 1);
248 #ifdef DEV_NETMAP
249 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
250 #endif /* DEV_NETMAP */
251 
252 /*
253 ** Global reset mutex
254 */
255 static struct mtx ixl_reset_mtx;
256 
257 /*
258 ** TUNEABLE PARAMETERS:
259 */
260 
261 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
262                    "IXL driver parameters");
263 
264 /*
265  * MSIX should be the default for best performance,
266  * but this allows it to be forced off for testing.
267  */
268 static int ixl_enable_msix = 1;
269 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
271     "Enable MSI-X interrupts");
272 
273 /*
274 ** Number of descriptors per ring:
275 **   - TX and RX are the same size
276 */
277 static int ixl_ringsz = DEFAULT_RING;
278 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
279 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
280     &ixl_ringsz, 0, "Descriptor Ring Size");
281 
282 /*
283 ** This can be set manually, if left as 0 the
284 ** number of queues will be calculated based
285 ** on cpus and msix vectors available.
286 */
287 int ixl_max_queues = 0;
288 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
289 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
290     &ixl_max_queues, 0, "Number of Queues");
291 
292 /*
293 ** Controls for Interrupt Throttling
294 **	- true/false for dynamic adjustment
295 ** 	- default values for static ITR
296 */
297 int ixl_dynamic_rx_itr = 0;
298 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
300     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
301 
302 int ixl_dynamic_tx_itr = 0;
303 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
305     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
306 
307 int ixl_rx_itr = IXL_ITR_8K;
308 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
309 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
310     &ixl_rx_itr, 0, "RX Interrupt Rate");
311 
312 int ixl_tx_itr = IXL_ITR_4K;
313 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
315     &ixl_tx_itr, 0, "TX Interrupt Rate");
316 
317 #ifdef IXL_FDIR
318 static int ixl_enable_fdir = 1;
319 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
320 /* Rate at which we sample */
321 int ixl_atr_rate = 20;
322 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
323 #endif
324 
325 #ifdef DEV_NETMAP
326 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
327 #include <dev/netmap/if_ixl_netmap.h>
328 #endif /* DEV_NETMAP */
329 
330 static char *ixl_fc_string[6] = {
331 	"None",
332 	"Rx",
333 	"Tx",
334 	"Full",
335 	"Priority",
336 	"Default"
337 };
338 
339 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
340 
341 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
342     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
343 
344 /*********************************************************************
345  *  Device identification routine
346  *
347  *  ixl_probe determines if the driver should be loaded on
348  *  the hardware based on PCI vendor/device id of the device.
349  *
350  *  return BUS_PROBE_DEFAULT on success, positive on failure
351  *********************************************************************/
352 
353 static int
354 ixl_probe(device_t dev)
355 {
356 	ixl_vendor_info_t *ent;
357 
358 	u16	pci_vendor_id, pci_device_id;
359 	u16	pci_subvendor_id, pci_subdevice_id;
360 	char	device_name[256];
361 	static bool lock_init = FALSE;
362 
363 	INIT_DEBUGOUT("ixl_probe: begin");
364 
365 	pci_vendor_id = pci_get_vendor(dev);
366 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
367 		return (ENXIO);
368 
369 	pci_device_id = pci_get_device(dev);
370 	pci_subvendor_id = pci_get_subvendor(dev);
371 	pci_subdevice_id = pci_get_subdevice(dev);
372 
373 	ent = ixl_vendor_info_array;
374 	while (ent->vendor_id != 0) {
375 		if ((pci_vendor_id == ent->vendor_id) &&
376 		    (pci_device_id == ent->device_id) &&
377 
378 		    ((pci_subvendor_id == ent->subvendor_id) ||
379 		     (ent->subvendor_id == 0)) &&
380 
381 		    ((pci_subdevice_id == ent->subdevice_id) ||
382 		     (ent->subdevice_id == 0))) {
383 			sprintf(device_name, "%s, Version - %s",
384 				ixl_strings[ent->index],
385 				ixl_driver_version);
386 			device_set_desc_copy(dev, device_name);
387 			/* One shot mutex init */
388 			if (lock_init == FALSE) {
389 				lock_init = TRUE;
390 				mtx_init(&ixl_reset_mtx,
391 				    "ixl_reset",
392 				    "IXL RESET Lock", MTX_DEF);
393 			}
394 			return (BUS_PROBE_DEFAULT);
395 		}
396 		ent++;
397 	}
398 	return (ENXIO);
399 }
400 
401 /*********************************************************************
402  *  Device initialization routine
403  *
404  *  The attach entry point is called when the driver is being loaded.
405  *  This routine identifies the type of hardware, allocates all resources
406  *  and initializes the hardware.
407  *
408  *  return 0 on success, positive on failure
409  *********************************************************************/
410 
411 static int
412 ixl_attach(device_t dev)
413 {
414 	struct ixl_pf	*pf;
415 	struct i40e_hw	*hw;
416 	struct ixl_vsi *vsi;
417 	u16		bus;
418 	int             error = 0;
419 #ifdef PCI_IOV
420 	nvlist_t	*pf_schema, *vf_schema;
421 	int		iov_error;
422 #endif
423 
424 	INIT_DEBUGOUT("ixl_attach: begin");
425 
426 	/* Allocate, clear, and link in our primary soft structure */
427 	pf = device_get_softc(dev);
428 	pf->dev = pf->osdep.dev = dev;
429 	hw = &pf->hw;
430 
431 	/*
432 	** Note this assumes we have a single embedded VSI,
433 	** this could be enhanced later to allocate multiple
434 	*/
435 	vsi = &pf->vsi;
436 	vsi->dev = pf->dev;
437 
438 	/* Core Lock Init*/
439 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
440 
441 	/* Set up the timer callout */
442 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
443 
444 	/* Set up sysctls */
445 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
448 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
449 
450 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
453 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
454 
455 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
458 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
459 
460 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
461 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
463 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
464 
465 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
466 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
468 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
469 
470 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
471 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
473 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
474 
475 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
476 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
477 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
478 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
479 
480 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
481 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
482 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
483 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
484 
485 #ifdef IXL_DEBUG_SYSCTL
486 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
489 	    ixl_debug_info, "I", "Debug Information");
490 
491 	/* Debug shared-code message level */
492 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
493 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
494 	    OID_AUTO, "debug_mask", CTLFLAG_RW,
495 	    &pf->hw.debug_mask, 0, "Debug Message Level");
496 
497 	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
498 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
499 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
500 	    0, "PF/VF Virtual Channel debug level");
501 
502 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
503 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
504 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
505 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
506 
507 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
508 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
509 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
510 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
511 
512 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
513 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
514 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
515 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
516 
517 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
518 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
519 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
520 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
521 
522 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
523 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
524 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
525 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
526 #endif
527 
528 	/* Save off the PCI information */
529 	hw->vendor_id = pci_get_vendor(dev);
530 	hw->device_id = pci_get_device(dev);
531 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
532 	hw->subsystem_vendor_id =
533 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
534 	hw->subsystem_device_id =
535 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
536 
537 	hw->bus.device = pci_get_slot(dev);
538 	hw->bus.func = pci_get_function(dev);
539 
540 	pf->vc_debug_lvl = 1;
541 
542 	/* Do PCI setup - map BAR0, etc */
543 	if (ixl_allocate_pci_resources(pf)) {
544 		device_printf(dev, "Allocation of PCI resources failed\n");
545 		error = ENXIO;
546 		goto err_out;
547 	}
548 
549 	/* Establish a clean starting point */
550 	i40e_clear_hw(hw);
551 	error = i40e_pf_reset(hw);
552 	if (error) {
553 		device_printf(dev,"PF reset failure %x\n", error);
554 		error = EIO;
555 		goto err_out;
556 	}
557 
558 	/* Set admin queue parameters */
559 	hw->aq.num_arq_entries = IXL_AQ_LEN;
560 	hw->aq.num_asq_entries = IXL_AQ_LEN;
561 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
562 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
563 
564 	/* Initialize the shared code */
565 	error = i40e_init_shared_code(hw);
566 	if (error) {
567 		device_printf(dev,"Unable to initialize the shared code\n");
568 		error = EIO;
569 		goto err_out;
570 	}
571 
572 	/* Set up the admin queue */
573 	error = i40e_init_adminq(hw);
574 	if (error) {
575 		device_printf(dev, "The driver for the device stopped "
576 		    "because the NVM image is newer than expected.\n"
577 		    "You must install the most recent version of "
578 		    " the network driver.\n");
579 		goto err_out;
580 	}
581 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
582 
583         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
584 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
585 		device_printf(dev, "The driver for the device detected "
586 		    "a newer version of the NVM image than expected.\n"
587 		    "Please install the most recent version of the network driver.\n");
588 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
589 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
590 		device_printf(dev, "The driver for the device detected "
591 		    "an older version of the NVM image than expected.\n"
592 		    "Please update the NVM image.\n");
593 
594 	/* Clear PXE mode */
595 	i40e_clear_pxe_mode(hw);
596 
597 	/* Get capabilities from the device */
598 	error = ixl_get_hw_capabilities(pf);
599 	if (error) {
600 		device_printf(dev, "HW capabilities failure!\n");
601 		goto err_get_cap;
602 	}
603 
604 	/* Set up host memory cache */
605 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
606 	    hw->func_caps.num_rx_qp, 0, 0);
607 	if (error) {
608 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
609 		goto err_get_cap;
610 	}
611 
612 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
613 	if (error) {
614 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
615 		goto err_mac_hmc;
616 	}
617 
618 	/* Disable LLDP from the firmware */
619 	i40e_aq_stop_lldp(hw, TRUE, NULL);
620 
621 	i40e_get_mac_addr(hw, hw->mac.addr);
622 	error = i40e_validate_mac_addr(hw->mac.addr);
623 	if (error) {
624 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
625 		goto err_mac_hmc;
626 	}
627 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
628 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
629 
630 	/* Set up VSI and queues */
631 	if (ixl_setup_stations(pf) != 0) {
632 		device_printf(dev, "setup stations failed!\n");
633 		error = ENOMEM;
634 		goto err_mac_hmc;
635 	}
636 
637 	/* Initialize mac filter list for VSI */
638 	SLIST_INIT(&vsi->ftl);
639 
640 	/* Set up interrupt routing here */
641 	if (pf->msix > 1)
642 		error = ixl_assign_vsi_msix(pf);
643 	else
644 		error = ixl_assign_vsi_legacy(pf);
645 	if (error)
646 		goto err_late;
647 
648 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
649 	    (hw->aq.fw_maj_ver < 4)) {
650 		i40e_msec_delay(75);
651 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
652 		if (error)
653 			device_printf(dev, "link restart failed, aq_err=%d\n",
654 			    pf->hw.aq.asq_last_status);
655 	}
656 
657 	/* Determine link state */
658 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
659 	i40e_get_link_status(hw, &pf->link_up);
660 
661 	/* Setup OS specific network interface */
662 	if (ixl_setup_interface(dev, vsi) != 0) {
663 		device_printf(dev, "interface setup failed!\n");
664 		error = EIO;
665 		goto err_late;
666 	}
667 
668 	error = ixl_switch_config(pf);
669 	if (error) {
670 		device_printf(dev, "Initial switch config failed: %d\n", error);
671 		goto err_mac_hmc;
672 	}
673 
674 	/* Limit phy interrupts to link and modules failure */
675 	error = i40e_aq_set_phy_int_mask(hw,
676 	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
677         if (error)
678 		device_printf(dev, "set phy mask failed: %d\n", error);
679 
680 	/* Get the bus configuration and set the shared code */
681 	bus = ixl_get_bus_info(hw, dev);
682 	i40e_set_pci_config_data(hw, bus);
683 
684 	/* Initialize statistics */
685 	ixl_pf_reset_stats(pf);
686 	ixl_update_stats_counters(pf);
687 	ixl_add_hw_stats(pf);
688 
689 	/* Register for VLAN events */
690 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
691 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
692 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
693 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
694 
695 #ifdef PCI_IOV
696 	/* SR-IOV is only supported when MSI-X is in use. */
697 	if (pf->msix > 1) {
698 		pf_schema = pci_iov_schema_alloc_node();
699 		vf_schema = pci_iov_schema_alloc_node();
700 		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
701 		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
702 		    IOV_SCHEMA_HASDEFAULT, TRUE);
703 		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
704 		    IOV_SCHEMA_HASDEFAULT, FALSE);
705 		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
706 		    IOV_SCHEMA_HASDEFAULT, FALSE);
707 
708 		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
709 		if (iov_error != 0)
710 			device_printf(dev,
711 			    "Failed to initialize SR-IOV (error=%d)\n",
712 			    iov_error);
713 	}
714 #endif
715 
716 #ifdef DEV_NETMAP
717 	ixl_netmap_attach(vsi);
718 #endif /* DEV_NETMAP */
719 	INIT_DEBUGOUT("ixl_attach: end");
720 	return (0);
721 
722 err_late:
723 	if (vsi->ifp != NULL)
724 		if_free(vsi->ifp);
725 err_mac_hmc:
726 	i40e_shutdown_lan_hmc(hw);
727 err_get_cap:
728 	i40e_shutdown_adminq(hw);
729 err_out:
730 	ixl_free_pci_resources(pf);
731 	ixl_free_vsi(vsi);
732 	IXL_PF_LOCK_DESTROY(pf);
733 	return (error);
734 }
735 
736 /*********************************************************************
737  *  Device removal routine
738  *
739  *  The detach entry point is called when the driver is being removed.
740  *  This routine stops the adapter and deallocates all the resources
741  *  that were allocated for driver operation.
742  *
743  *  return 0 on success, positive on failure
744  *********************************************************************/
745 
746 static int
747 ixl_detach(device_t dev)
748 {
749 	struct ixl_pf		*pf = device_get_softc(dev);
750 	struct i40e_hw		*hw = &pf->hw;
751 	struct ixl_vsi		*vsi = &pf->vsi;
752 	struct ixl_queue	*que = vsi->queues;
753 	i40e_status		status;
754 #ifdef PCI_IOV
755 	int			error;
756 #endif
757 
758 	INIT_DEBUGOUT("ixl_detach: begin");
759 
760 	/* Make sure VLANS are not using driver */
761 	if (vsi->ifp->if_vlantrunk != NULL) {
762 		device_printf(dev,"Vlan in use, detach first\n");
763 		return (EBUSY);
764 	}
765 
766 #ifdef PCI_IOV
767 	error = pci_iov_detach(dev);
768 	if (error != 0) {
769 		device_printf(dev, "SR-IOV in use; detach first.\n");
770 		return (error);
771 	}
772 #endif
773 
774 	ether_ifdetach(vsi->ifp);
775 	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
776 		IXL_PF_LOCK(pf);
777 		ixl_stop(pf);
778 		IXL_PF_UNLOCK(pf);
779 	}
780 
781 	for (int i = 0; i < vsi->num_queues; i++, que++) {
782 		if (que->tq) {
783 			taskqueue_drain(que->tq, &que->task);
784 			taskqueue_drain(que->tq, &que->tx_task);
785 			taskqueue_free(que->tq);
786 		}
787 	}
788 
789 	/* Shutdown LAN HMC */
790 	status = i40e_shutdown_lan_hmc(hw);
791 	if (status)
792 		device_printf(dev,
793 		    "Shutdown LAN HMC failed with code %d\n", status);
794 
795 	/* Shutdown admin queue */
796 	status = i40e_shutdown_adminq(hw);
797 	if (status)
798 		device_printf(dev,
799 		    "Shutdown Admin queue failed with code %d\n", status);
800 
801 	/* Unregister VLAN events */
802 	if (vsi->vlan_attach != NULL)
803 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
804 	if (vsi->vlan_detach != NULL)
805 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
806 
807 	callout_drain(&pf->timer);
808 #ifdef DEV_NETMAP
809 	netmap_detach(vsi->ifp);
810 #endif /* DEV_NETMAP */
811 	ixl_free_pci_resources(pf);
812 	bus_generic_detach(dev);
813 	if_free(vsi->ifp);
814 	ixl_free_vsi(vsi);
815 	IXL_PF_LOCK_DESTROY(pf);
816 	return (0);
817 }
818 
819 /*********************************************************************
820  *
821  *  Shutdown entry point
822  *
823  **********************************************************************/
824 
825 static int
826 ixl_shutdown(device_t dev)
827 {
828 	struct ixl_pf *pf = device_get_softc(dev);
829 	IXL_PF_LOCK(pf);
830 	ixl_stop(pf);
831 	IXL_PF_UNLOCK(pf);
832 	return (0);
833 }
834 
835 
836 /*********************************************************************
837  *
838  *  Get the hardware capabilities
839  *
840  **********************************************************************/
841 
842 static int
843 ixl_get_hw_capabilities(struct ixl_pf *pf)
844 {
845 	struct i40e_aqc_list_capabilities_element_resp *buf;
846 	struct i40e_hw	*hw = &pf->hw;
847 	device_t 	dev = pf->dev;
848 	int             error, len;
849 	u16		needed;
850 	bool		again = TRUE;
851 
852 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
853 retry:
854 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
855 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
856 		device_printf(dev, "Unable to allocate cap memory\n");
857                 return (ENOMEM);
858 	}
859 
860 	/* This populates the hw struct */
861         error = i40e_aq_discover_capabilities(hw, buf, len,
862 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
863 	free(buf, M_DEVBUF);
864 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
865 	    (again == TRUE)) {
866 		/* retry once with a larger buffer */
867 		again = FALSE;
868 		len = needed;
869 		goto retry;
870 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
871 		device_printf(dev, "capability discovery failed: %d\n",
872 		    pf->hw.aq.asq_last_status);
873 		return (ENODEV);
874 	}
875 
876 	/* Capture this PF's starting queue pair */
877 	pf->qbase = hw->func_caps.base_queue;
878 
879 #ifdef IXL_DEBUG
880 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
881 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
882 	    hw->pf_id, hw->func_caps.num_vfs,
883 	    hw->func_caps.num_msix_vectors,
884 	    hw->func_caps.num_msix_vectors_vf,
885 	    hw->func_caps.fd_filters_guaranteed,
886 	    hw->func_caps.fd_filters_best_effort,
887 	    hw->func_caps.num_tx_qp,
888 	    hw->func_caps.num_rx_qp,
889 	    hw->func_caps.base_queue);
890 #endif
891 	return (error);
892 }
893 
894 static void
895 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
896 {
897 	device_t 	dev = vsi->dev;
898 
899 	/* Enable/disable TXCSUM/TSO4 */
900 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
901 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
902 		if (mask & IFCAP_TXCSUM) {
903 			ifp->if_capenable |= IFCAP_TXCSUM;
904 			/* enable TXCSUM, restore TSO if previously enabled */
905 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
906 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
907 				ifp->if_capenable |= IFCAP_TSO4;
908 			}
909 		}
910 		else if (mask & IFCAP_TSO4) {
911 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
912 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
913 			device_printf(dev,
914 			    "TSO4 requires txcsum, enabling both...\n");
915 		}
916 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
917 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
918 		if (mask & IFCAP_TXCSUM)
919 			ifp->if_capenable &= ~IFCAP_TXCSUM;
920 		else if (mask & IFCAP_TSO4)
921 			ifp->if_capenable |= IFCAP_TSO4;
922 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
923 	    && (ifp->if_capenable & IFCAP_TSO4)) {
924 		if (mask & IFCAP_TXCSUM) {
925 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
926 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
927 			device_printf(dev,
928 			    "TSO4 requires txcsum, disabling both...\n");
929 		} else if (mask & IFCAP_TSO4)
930 			ifp->if_capenable &= ~IFCAP_TSO4;
931 	}
932 
933 	/* Enable/disable TXCSUM_IPV6/TSO6 */
934 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
935 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
936 		if (mask & IFCAP_TXCSUM_IPV6) {
937 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
938 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
939 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
940 				ifp->if_capenable |= IFCAP_TSO6;
941 			}
942 		} else if (mask & IFCAP_TSO6) {
943 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
944 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
945 			device_printf(dev,
946 			    "TSO6 requires txcsum6, enabling both...\n");
947 		}
948 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
949 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
950 		if (mask & IFCAP_TXCSUM_IPV6)
951 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
952 		else if (mask & IFCAP_TSO6)
953 			ifp->if_capenable |= IFCAP_TSO6;
954 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
955 	    && (ifp->if_capenable & IFCAP_TSO6)) {
956 		if (mask & IFCAP_TXCSUM_IPV6) {
957 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
958 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
959 			device_printf(dev,
960 			    "TSO6 requires txcsum6, disabling both...\n");
961 		} else if (mask & IFCAP_TSO6)
962 			ifp->if_capenable &= ~IFCAP_TSO6;
963 	}
964 }
965 
966 /*********************************************************************
967  *  Ioctl entry point
968  *
969  *  ixl_ioctl is called when the user wants to configure the
970  *  interface.
971  *
972  *  return 0 on success, positive on failure
973  **********************************************************************/
974 
975 static int
976 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
977 {
978 	struct ixl_vsi	*vsi = ifp->if_softc;
979 	struct ixl_pf	*pf = vsi->back;
980 	struct ifreq	*ifr = (struct ifreq *) data;
981 #if defined(INET) || defined(INET6)
982 	struct ifaddr *ifa = (struct ifaddr *)data;
983 	bool		avoid_reset = FALSE;
984 #endif
985 	int             error = 0;
986 
987 	switch (command) {
988 
989         case SIOCSIFADDR:
990 #ifdef INET
991 		if (ifa->ifa_addr->sa_family == AF_INET)
992 			avoid_reset = TRUE;
993 #endif
994 #ifdef INET6
995 		if (ifa->ifa_addr->sa_family == AF_INET6)
996 			avoid_reset = TRUE;
997 #endif
998 #if defined(INET) || defined(INET6)
999 		/*
1000 		** Calling init results in link renegotiation,
1001 		** so we avoid doing it when possible.
1002 		*/
1003 		if (avoid_reset) {
1004 			ifp->if_flags |= IFF_UP;
1005 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1006 				ixl_init(pf);
1007 #ifdef INET
1008 			if (!(ifp->if_flags & IFF_NOARP))
1009 				arp_ifinit(ifp, ifa);
1010 #endif
1011 		} else
1012 			error = ether_ioctl(ifp, command, data);
1013 		break;
1014 #endif
1015 	case SIOCSIFMTU:
1016 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1017 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1018 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1019 			error = EINVAL;
1020 		} else {
1021 			IXL_PF_LOCK(pf);
1022 			ifp->if_mtu = ifr->ifr_mtu;
1023 			vsi->max_frame_size =
1024 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1025 			    + ETHER_VLAN_ENCAP_LEN;
1026 			ixl_init_locked(pf);
1027 			IXL_PF_UNLOCK(pf);
1028 		}
1029 		break;
1030 	case SIOCSIFFLAGS:
1031 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1032 		IXL_PF_LOCK(pf);
1033 		if (ifp->if_flags & IFF_UP) {
1034 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1035 				if ((ifp->if_flags ^ pf->if_flags) &
1036 				    (IFF_PROMISC | IFF_ALLMULTI)) {
1037 					ixl_set_promisc(vsi);
1038 				}
1039 			} else
1040 				ixl_init_locked(pf);
1041 		} else
1042 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1043 				ixl_stop(pf);
1044 		pf->if_flags = ifp->if_flags;
1045 		IXL_PF_UNLOCK(pf);
1046 		break;
1047 	case SIOCADDMULTI:
1048 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1049 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1050 			IXL_PF_LOCK(pf);
1051 			ixl_disable_intr(vsi);
1052 			ixl_add_multi(vsi);
1053 			ixl_enable_intr(vsi);
1054 			IXL_PF_UNLOCK(pf);
1055 		}
1056 		break;
1057 	case SIOCDELMULTI:
1058 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1059 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1060 			IXL_PF_LOCK(pf);
1061 			ixl_disable_intr(vsi);
1062 			ixl_del_multi(vsi);
1063 			ixl_enable_intr(vsi);
1064 			IXL_PF_UNLOCK(pf);
1065 		}
1066 		break;
1067 	case SIOCSIFMEDIA:
1068 	case SIOCGIFMEDIA:
1069 #ifdef IFM_ETH_XTYPE
1070 	case SIOCGIFXMEDIA:
1071 #endif
1072 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1073 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1074 		break;
1075 	case SIOCSIFCAP:
1076 	{
1077 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1078 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1079 
1080 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1081 
1082 		if (mask & IFCAP_RXCSUM)
1083 			ifp->if_capenable ^= IFCAP_RXCSUM;
1084 		if (mask & IFCAP_RXCSUM_IPV6)
1085 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1086 		if (mask & IFCAP_LRO)
1087 			ifp->if_capenable ^= IFCAP_LRO;
1088 		if (mask & IFCAP_VLAN_HWTAGGING)
1089 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1090 		if (mask & IFCAP_VLAN_HWFILTER)
1091 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1092 		if (mask & IFCAP_VLAN_HWTSO)
1093 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1094 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1095 			IXL_PF_LOCK(pf);
1096 			ixl_init_locked(pf);
1097 			IXL_PF_UNLOCK(pf);
1098 		}
1099 		VLAN_CAPABILITIES(ifp);
1100 
1101 		break;
1102 	}
1103 
1104 	default:
1105 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1106 		error = ether_ioctl(ifp, command, data);
1107 		break;
1108 	}
1109 
1110 	return (error);
1111 }
1112 
1113 
1114 /*********************************************************************
1115  *  Init entry point
1116  *
1117  *  This routine is used in two ways. It is used by the stack as
1118  *  init entry point in network interface structure. It is also used
1119  *  by the driver as a hw/sw initialization routine to get to a
1120  *  consistent state.
1121  *
1122  *  return 0 on success, positive on failure
1123  **********************************************************************/
1124 
1125 static void
1126 ixl_init_locked(struct ixl_pf *pf)
1127 {
1128 	struct i40e_hw	*hw = &pf->hw;
1129 	struct ixl_vsi	*vsi = &pf->vsi;
1130 	struct ifnet	*ifp = vsi->ifp;
1131 	device_t 	dev = pf->dev;
1132 	struct i40e_filter_control_settings	filter;
1133 	u8		tmpaddr[ETHER_ADDR_LEN];
1134 	int		ret;
1135 
1136 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1137 	INIT_DEBUGOUT("ixl_init: begin");
1138 	ixl_stop(pf);
1139 
1140 	/* Get the latest mac address... User might use a LAA */
1141 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1142 	      I40E_ETH_LENGTH_OF_ADDRESS);
1143 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1144 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1145 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1146 		bcopy(tmpaddr, hw->mac.addr,
1147 		    I40E_ETH_LENGTH_OF_ADDRESS);
1148 		ret = i40e_aq_mac_address_write(hw,
1149 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1150 		    hw->mac.addr, NULL);
1151 		if (ret) {
1152 			device_printf(dev, "LLA address"
1153 			 "change failed!!\n");
1154 			return;
1155 		} else {
1156 			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1157 		}
1158 	}
1159 
1160 	/* Set the various hardware offload abilities */
1161 	ifp->if_hwassist = 0;
1162 	if (ifp->if_capenable & IFCAP_TSO)
1163 		ifp->if_hwassist |= CSUM_TSO;
1164 	if (ifp->if_capenable & IFCAP_TXCSUM)
1165 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1166 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1167 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1168 
1169 	/* Set up the device filtering */
1170 	bzero(&filter, sizeof(filter));
1171 	filter.enable_ethtype = TRUE;
1172 	filter.enable_macvlan = TRUE;
1173 #ifdef IXL_FDIR
1174 	filter.enable_fdir = TRUE;
1175 #endif
1176 	if (i40e_set_filter_control(hw, &filter))
1177 		device_printf(dev, "set_filter_control() failed\n");
1178 
1179 	/* Set up RSS */
1180 	ixl_config_rss(vsi);
1181 
1182 	/*
1183 	** Prepare the VSI: rings, hmc contexts, etc...
1184 	*/
1185 	if (ixl_initialize_vsi(vsi)) {
1186 		device_printf(dev, "initialize vsi failed!!\n");
1187 		return;
1188 	}
1189 
1190 	/* Add protocol filters to list */
1191 	ixl_init_filters(vsi);
1192 
1193 	/* Setup vlan's if needed */
1194 	ixl_setup_vlan_filters(vsi);
1195 
1196 	/* Start the local timer */
1197 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1198 
1199 	/* Set up MSI/X routing and the ITR settings */
1200 	if (ixl_enable_msix) {
1201 		ixl_configure_msix(pf);
1202 		ixl_configure_itr(pf);
1203 	} else
1204 		ixl_configure_legacy(pf);
1205 
1206 	ixl_enable_rings(vsi);
1207 
1208 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1209 
1210 	ixl_reconfigure_filters(vsi);
1211 
1212 	/* Set MTU in hardware*/
1213 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1214 	    TRUE, 0, NULL);
1215 	if (aq_error)
1216 		device_printf(vsi->dev,
1217 			"aq_set_mac_config in init error, code %d\n",
1218 		    aq_error);
1219 
1220 	/* And now turn on interrupts */
1221 	ixl_enable_intr(vsi);
1222 
1223 	/* Now inform the stack we're ready */
1224 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1225 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1226 
1227 	return;
1228 }
1229 
1230 static void
1231 ixl_init(void *arg)
1232 {
1233 	struct ixl_pf *pf = arg;
1234 
1235 	IXL_PF_LOCK(pf);
1236 	ixl_init_locked(pf);
1237 	IXL_PF_UNLOCK(pf);
1238 	return;
1239 }
1240 
1241 /*
1242 **
1243 ** MSIX Interrupt Handlers and Tasklets
1244 **
1245 */
1246 static void
1247 ixl_handle_que(void *context, int pending)
1248 {
1249 	struct ixl_queue *que = context;
1250 	struct ixl_vsi *vsi = que->vsi;
1251 	struct i40e_hw  *hw = vsi->hw;
1252 	struct tx_ring  *txr = &que->txr;
1253 	struct ifnet    *ifp = vsi->ifp;
1254 	bool		more;
1255 
1256 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1257 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1258 		IXL_TX_LOCK(txr);
1259 		ixl_txeof(que);
1260 		if (!drbr_empty(ifp, txr->br))
1261 			ixl_mq_start_locked(ifp, txr);
1262 		IXL_TX_UNLOCK(txr);
1263 		if (more) {
1264 			taskqueue_enqueue(que->tq, &que->task);
1265 			return;
1266 		}
1267 	}
1268 
1269 	/* Reenable this interrupt - hmmm */
1270 	ixl_enable_queue(hw, que->me);
1271 	return;
1272 }
1273 
1274 
1275 /*********************************************************************
1276  *
1277  *  Legacy Interrupt Service routine
1278  *
1279  **********************************************************************/
1280 void
1281 ixl_intr(void *arg)
1282 {
1283 	struct ixl_pf		*pf = arg;
1284 	struct i40e_hw		*hw =  &pf->hw;
1285 	struct ixl_vsi		*vsi = &pf->vsi;
1286 	struct ixl_queue	*que = vsi->queues;
1287 	struct ifnet		*ifp = vsi->ifp;
1288 	struct tx_ring		*txr = &que->txr;
1289         u32			reg, icr0, mask;
1290 	bool			more_tx, more_rx;
1291 
1292 	++que->irqs;
1293 
1294 	/* Protect against spurious interrupts */
1295 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1296 		return;
1297 
1298 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1299 
1300 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1301 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1302 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1303 
1304         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1305 
1306 #ifdef PCI_IOV
1307 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1308 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1309 #endif
1310 
1311 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1312 		taskqueue_enqueue(pf->tq, &pf->adminq);
1313 		return;
1314 	}
1315 
1316 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1317 
1318 	IXL_TX_LOCK(txr);
1319 	more_tx = ixl_txeof(que);
1320 	if (!drbr_empty(vsi->ifp, txr->br))
1321 		more_tx = 1;
1322 	IXL_TX_UNLOCK(txr);
1323 
1324 	/* re-enable other interrupt causes */
1325 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1326 
1327 	/* And now the queues */
1328 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1329 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1330 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1331 
1332 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1333 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1334 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1335 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1336 
1337 	ixl_enable_legacy(hw);
1338 
1339 	return;
1340 }
1341 
1342 
1343 /*********************************************************************
1344  *
1345  *  MSIX VSI Interrupt Service routine
1346  *
1347  **********************************************************************/
1348 void
1349 ixl_msix_que(void *arg)
1350 {
1351 	struct ixl_queue	*que = arg;
1352 	struct ixl_vsi	*vsi = que->vsi;
1353 	struct i40e_hw	*hw = vsi->hw;
1354 	struct tx_ring	*txr = &que->txr;
1355 	bool		more_tx, more_rx;
1356 
1357 	/* Protect against spurious interrupts */
1358 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1359 		return;
1360 
1361 	++que->irqs;
1362 
1363 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1364 
1365 	IXL_TX_LOCK(txr);
1366 	more_tx = ixl_txeof(que);
1367 	/*
1368 	** Make certain that if the stack
1369 	** has anything queued the task gets
1370 	** scheduled to handle it.
1371 	*/
1372 	if (!drbr_empty(vsi->ifp, txr->br))
1373 		more_tx = 1;
1374 	IXL_TX_UNLOCK(txr);
1375 
1376 	ixl_set_queue_rx_itr(que);
1377 	ixl_set_queue_tx_itr(que);
1378 
1379 	if (more_tx || more_rx)
1380 		taskqueue_enqueue(que->tq, &que->task);
1381 	else
1382 		ixl_enable_queue(hw, que->me);
1383 
1384 	return;
1385 }
1386 
1387 
1388 /*********************************************************************
1389  *
1390  *  MSIX Admin Queue Interrupt Service routine
1391  *
1392  **********************************************************************/
1393 static void
1394 ixl_msix_adminq(void *arg)
1395 {
1396 	struct ixl_pf	*pf = arg;
1397 	struct i40e_hw	*hw = &pf->hw;
1398 	u32		reg, mask;
1399 
1400 	++pf->admin_irq;
1401 
1402 	reg = rd32(hw, I40E_PFINT_ICR0);
1403 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1404 
1405 	/* Check on the cause */
1406 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1407 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1408 
1409 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1410 		ixl_handle_mdd_event(pf);
1411 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1412 	}
1413 
1414 #ifdef PCI_IOV
1415 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1416 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1417 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1418 	}
1419 #endif
1420 
1421 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1422 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1423 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1424 
1425 	taskqueue_enqueue(pf->tq, &pf->adminq);
1426 	return;
1427 }
1428 
1429 /*********************************************************************
1430  *
1431  *  Media Ioctl callback
1432  *
1433  *  This routine is called whenever the user queries the status of
1434  *  the interface using ifconfig.
1435  *
1436  **********************************************************************/
1437 static void
1438 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1439 {
1440 	struct ixl_vsi	*vsi = ifp->if_softc;
1441 	struct ixl_pf	*pf = vsi->back;
1442 	struct i40e_hw  *hw = &pf->hw;
1443 
1444 	INIT_DEBUGOUT("ixl_media_status: begin");
1445 	IXL_PF_LOCK(pf);
1446 
1447 	hw->phy.get_link_info = TRUE;
1448 	i40e_get_link_status(hw, &pf->link_up);
1449 	ixl_update_link_status(pf);
1450 
1451 	ifmr->ifm_status = IFM_AVALID;
1452 	ifmr->ifm_active = IFM_ETHER;
1453 
1454 	if (!pf->link_up) {
1455 		IXL_PF_UNLOCK(pf);
1456 		return;
1457 	}
1458 
1459 	ifmr->ifm_status |= IFM_ACTIVE;
1460 	/* Hardware is always full-duplex */
1461 	ifmr->ifm_active |= IFM_FDX;
1462 
1463 	switch (hw->phy.link_info.phy_type) {
1464 		/* 100 M */
1465 		case I40E_PHY_TYPE_100BASE_TX:
1466 			ifmr->ifm_active |= IFM_100_TX;
1467 			break;
1468 		/* 1 G */
1469 		case I40E_PHY_TYPE_1000BASE_T:
1470 			ifmr->ifm_active |= IFM_1000_T;
1471 			break;
1472 		case I40E_PHY_TYPE_1000BASE_SX:
1473 			ifmr->ifm_active |= IFM_1000_SX;
1474 			break;
1475 		case I40E_PHY_TYPE_1000BASE_LX:
1476 			ifmr->ifm_active |= IFM_1000_LX;
1477 			break;
1478 		/* 10 G */
1479 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1480 			ifmr->ifm_active |= IFM_10G_TWINAX;
1481 			break;
1482 		case I40E_PHY_TYPE_10GBASE_SR:
1483 			ifmr->ifm_active |= IFM_10G_SR;
1484 			break;
1485 		case I40E_PHY_TYPE_10GBASE_LR:
1486 			ifmr->ifm_active |= IFM_10G_LR;
1487 			break;
1488 		case I40E_PHY_TYPE_10GBASE_T:
1489 			ifmr->ifm_active |= IFM_10G_T;
1490 			break;
1491 		/* 40 G */
1492 		case I40E_PHY_TYPE_40GBASE_CR4:
1493 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1494 			ifmr->ifm_active |= IFM_40G_CR4;
1495 			break;
1496 		case I40E_PHY_TYPE_40GBASE_SR4:
1497 			ifmr->ifm_active |= IFM_40G_SR4;
1498 			break;
1499 		case I40E_PHY_TYPE_40GBASE_LR4:
1500 			ifmr->ifm_active |= IFM_40G_LR4;
1501 			break;
1502 #ifndef IFM_ETH_XTYPE
1503 		case I40E_PHY_TYPE_1000BASE_KX:
1504 			ifmr->ifm_active |= IFM_1000_CX;
1505 			break;
1506 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1507 		case I40E_PHY_TYPE_10GBASE_CR1:
1508 			ifmr->ifm_active |= IFM_10G_TWINAX;
1509 			break;
1510 		case I40E_PHY_TYPE_10GBASE_KX4:
1511 			ifmr->ifm_active |= IFM_10G_CX4;
1512 			break;
1513 		case I40E_PHY_TYPE_10GBASE_KR:
1514 			ifmr->ifm_active |= IFM_10G_SR;
1515 			break;
1516 		case I40E_PHY_TYPE_40GBASE_KR4:
1517 		case I40E_PHY_TYPE_XLPPI:
1518 			ifmr->ifm_active |= IFM_40G_SR4;
1519 			break;
1520 #else
1521 		case I40E_PHY_TYPE_1000BASE_KX:
1522 			ifmr->ifm_active |= IFM_1000_KX;
1523 			break;
1524 		/* ERJ: What's the difference between these? */
1525 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1526 		case I40E_PHY_TYPE_10GBASE_CR1:
1527 			ifmr->ifm_active |= IFM_10G_CR1;
1528 			break;
1529 		case I40E_PHY_TYPE_10GBASE_KX4:
1530 			ifmr->ifm_active |= IFM_10G_KX4;
1531 			break;
1532 		case I40E_PHY_TYPE_10GBASE_KR:
1533 			ifmr->ifm_active |= IFM_10G_KR;
1534 			break;
1535 		case I40E_PHY_TYPE_20GBASE_KR2:
1536 			ifmr->ifm_active |= IFM_20G_KR2;
1537 			break;
1538 		case I40E_PHY_TYPE_40GBASE_KR4:
1539 			ifmr->ifm_active |= IFM_40G_KR4;
1540 			break;
1541 		case I40E_PHY_TYPE_XLPPI:
1542 			ifmr->ifm_active |= IFM_40G_XLPPI;
1543 			break;
1544 #endif
1545 		default:
1546 			ifmr->ifm_active |= IFM_UNKNOWN;
1547 			break;
1548 	}
1549 	/* Report flow control status as well */
1550 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1551 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1552 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1553 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1554 
1555 	IXL_PF_UNLOCK(pf);
1556 
1557 	return;
1558 }
1559 
1560 /*********************************************************************
1561  *
1562  *  Media Ioctl callback
1563  *
1564  *  This routine is called when the user changes speed/duplex using
1565  *  media/mediopt option with ifconfig.
1566  *
1567  **********************************************************************/
1568 static int
1569 ixl_media_change(struct ifnet * ifp)
1570 {
1571 	struct ixl_vsi *vsi = ifp->if_softc;
1572 	struct ifmedia *ifm = &vsi->media;
1573 
1574 	INIT_DEBUGOUT("ixl_media_change: begin");
1575 
1576 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1577 		return (EINVAL);
1578 
1579 	if_printf(ifp, "Media change is currently not supported.\n");
1580 
1581 	return (ENODEV);
1582 }
1583 
1584 
1585 #ifdef IXL_FDIR
1586 /*
1587 ** ATR: Application Targetted Receive - creates a filter
1588 **	based on TX flow info that will keep the receive
1589 **	portion of the flow on the same queue. Based on the
1590 **	implementation this is only available for TCP connections
1591 */
1592 void
1593 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1594 {
1595 	struct ixl_vsi			*vsi = que->vsi;
1596 	struct tx_ring			*txr = &que->txr;
1597 	struct i40e_filter_program_desc	*FDIR;
1598 	u32				ptype, dtype;
1599 	int				idx;
1600 
1601 	/* check if ATR is enabled and sample rate */
1602 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1603 		return;
1604 	/*
1605 	** We sample all TCP SYN/FIN packets,
1606 	** or at the selected sample rate
1607 	*/
1608 	txr->atr_count++;
1609 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1610 	    (txr->atr_count < txr->atr_rate))
1611                 return;
1612 	txr->atr_count = 0;
1613 
1614 	/* Get a descriptor to use */
1615 	idx = txr->next_avail;
1616 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1617 	if (++idx == que->num_desc)
1618 		idx = 0;
1619 	txr->avail--;
1620 	txr->next_avail = idx;
1621 
1622 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1623 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1624 
1625 	ptype |= (etype == ETHERTYPE_IP) ?
1626 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1627 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1628 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1629 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1630 
1631 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1632 
1633 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1634 
1635 	/*
1636 	** We use the TCP TH_FIN as a trigger to remove
1637 	** the filter, otherwise its an update.
1638 	*/
1639 	dtype |= (th->th_flags & TH_FIN) ?
1640 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1641 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1642 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1643 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1644 
1645 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1646 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1647 
1648 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1649 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1650 
1651 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1652 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1653 	return;
1654 }
1655 #endif
1656 
1657 
1658 static void
1659 ixl_set_promisc(struct ixl_vsi *vsi)
1660 {
1661 	struct ifnet	*ifp = vsi->ifp;
1662 	struct i40e_hw	*hw = vsi->hw;
1663 	int		err, mcnt = 0;
1664 	bool		uni = FALSE, multi = FALSE;
1665 
1666 	if (ifp->if_flags & IFF_ALLMULTI)
1667                 multi = TRUE;
1668 	else { /* Need to count the multicast addresses */
1669 		struct  ifmultiaddr *ifma;
1670 		if_maddr_rlock(ifp);
1671 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1672                         if (ifma->ifma_addr->sa_family != AF_LINK)
1673                                 continue;
1674                         if (mcnt == MAX_MULTICAST_ADDR)
1675                                 break;
1676                         mcnt++;
1677 		}
1678 		if_maddr_runlock(ifp);
1679 	}
1680 
1681 	if (mcnt >= MAX_MULTICAST_ADDR)
1682                 multi = TRUE;
1683         if (ifp->if_flags & IFF_PROMISC)
1684 		uni = TRUE;
1685 
1686 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1687 	    vsi->seid, uni, NULL);
1688 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1689 	    vsi->seid, multi, NULL);
1690 	return;
1691 }
1692 
1693 /*********************************************************************
1694  * 	Filter Routines
1695  *
1696  *	Routines for multicast and vlan filter management.
1697  *
1698  *********************************************************************/
1699 static void
1700 ixl_add_multi(struct ixl_vsi *vsi)
1701 {
1702 	struct	ifmultiaddr	*ifma;
1703 	struct ifnet		*ifp = vsi->ifp;
1704 	struct i40e_hw		*hw = vsi->hw;
1705 	int			mcnt = 0, flags;
1706 
1707 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1708 
1709 	if_maddr_rlock(ifp);
1710 	/*
1711 	** First just get a count, to decide if we
1712 	** we simply use multicast promiscuous.
1713 	*/
1714 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1715 		if (ifma->ifma_addr->sa_family != AF_LINK)
1716 			continue;
1717 		mcnt++;
1718 	}
1719 	if_maddr_runlock(ifp);
1720 
1721 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1722 		/* delete existing MC filters */
1723 		ixl_del_hw_filters(vsi, mcnt);
1724 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1725 		    vsi->seid, TRUE, NULL);
1726 		return;
1727 	}
1728 
1729 	mcnt = 0;
1730 	if_maddr_rlock(ifp);
1731 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1732 		if (ifma->ifma_addr->sa_family != AF_LINK)
1733 			continue;
1734 		ixl_add_mc_filter(vsi,
1735 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1736 		mcnt++;
1737 	}
1738 	if_maddr_runlock(ifp);
1739 	if (mcnt > 0) {
1740 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1741 		ixl_add_hw_filters(vsi, flags, mcnt);
1742 	}
1743 
1744 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1745 	return;
1746 }
1747 
1748 static void
1749 ixl_del_multi(struct ixl_vsi *vsi)
1750 {
1751 	struct ifnet		*ifp = vsi->ifp;
1752 	struct ifmultiaddr	*ifma;
1753 	struct ixl_mac_filter	*f;
1754 	int			mcnt = 0;
1755 	bool		match = FALSE;
1756 
1757 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1758 
1759 	/* Search for removed multicast addresses */
1760 	if_maddr_rlock(ifp);
1761 	SLIST_FOREACH(f, &vsi->ftl, next) {
1762 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1763 			match = FALSE;
1764 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1765 				if (ifma->ifma_addr->sa_family != AF_LINK)
1766 					continue;
1767 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1768 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1769 					match = TRUE;
1770 					break;
1771 				}
1772 			}
1773 			if (match == FALSE) {
1774 				f->flags |= IXL_FILTER_DEL;
1775 				mcnt++;
1776 			}
1777 		}
1778 	}
1779 	if_maddr_runlock(ifp);
1780 
1781 	if (mcnt > 0)
1782 		ixl_del_hw_filters(vsi, mcnt);
1783 }
1784 
1785 
1786 /*********************************************************************
1787  *  Timer routine
1788  *
1789  *  This routine checks for link status,updates statistics,
1790  *  and runs the watchdog check.
1791  *
1792  **********************************************************************/
1793 
1794 static void
1795 ixl_local_timer(void *arg)
1796 {
1797 	struct ixl_pf		*pf = arg;
1798 	struct i40e_hw		*hw = &pf->hw;
1799 	struct ixl_vsi		*vsi = &pf->vsi;
1800 	struct ixl_queue	*que = vsi->queues;
1801 	device_t		dev = pf->dev;
1802 	int			hung = 0;
1803 	u32			mask;
1804 
1805 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1806 
1807 	/* Fire off the adminq task */
1808 	taskqueue_enqueue(pf->tq, &pf->adminq);
1809 
1810 	/* Update stats */
1811 	ixl_update_stats_counters(pf);
1812 
1813 	/*
1814 	** Check status of the queues
1815 	*/
1816 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1817 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1818 
1819 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1820 		/* Any queues with outstanding work get a sw irq */
1821 		if (que->busy)
1822 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1823 		/*
1824 		** Each time txeof runs without cleaning, but there
1825 		** are uncleaned descriptors it increments busy. If
1826 		** we get to 5 we declare it hung.
1827 		*/
1828 		if (que->busy == IXL_QUEUE_HUNG) {
1829 			++hung;
1830 			/* Mark the queue as inactive */
1831 			vsi->active_queues &= ~((u64)1 << que->me);
1832 			continue;
1833 		} else {
1834 			/* Check if we've come back from hung */
1835 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1836 				vsi->active_queues |= ((u64)1 << que->me);
1837 		}
1838 		if (que->busy >= IXL_MAX_TX_BUSY) {
1839 #ifdef IXL_DEBUG
1840 			device_printf(dev,"Warning queue %d "
1841 			    "appears to be hung!\n", i);
1842 #endif
1843 			que->busy = IXL_QUEUE_HUNG;
1844 			++hung;
1845 		}
1846 	}
1847 	/* Only reinit if all queues show hung */
1848 	if (hung == vsi->num_queues)
1849 		goto hung;
1850 
1851 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1852 	return;
1853 
1854 hung:
1855 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1856 	ixl_init_locked(pf);
1857 }
1858 
1859 /*
1860 ** Note: this routine updates the OS on the link state
1861 **	the real check of the hardware only happens with
1862 **	a link interrupt.
1863 */
1864 static void
1865 ixl_update_link_status(struct ixl_pf *pf)
1866 {
1867 	struct ixl_vsi		*vsi = &pf->vsi;
1868 	struct i40e_hw		*hw = &pf->hw;
1869 	struct ifnet		*ifp = vsi->ifp;
1870 	device_t		dev = pf->dev;
1871 
1872 	if (pf->link_up){
1873 		if (vsi->link_active == FALSE) {
1874 			pf->fc = hw->fc.current_mode;
1875 			if (bootverbose) {
1876 				device_printf(dev,"Link is up %d Gbps %s,"
1877 				    " Flow Control: %s\n",
1878 				    ((pf->link_speed ==
1879 				    I40E_LINK_SPEED_40GB)? 40:10),
1880 				    "Full Duplex", ixl_fc_string[pf->fc]);
1881 			}
1882 			vsi->link_active = TRUE;
1883 			/*
1884 			** Warn user if link speed on NPAR enabled
1885 			** partition is not at least 10GB
1886 			*/
1887 			if (hw->func_caps.npar_enable &&
1888 			   (hw->phy.link_info.link_speed ==
1889 			   I40E_LINK_SPEED_1GB ||
1890 			   hw->phy.link_info.link_speed ==
1891 			   I40E_LINK_SPEED_100MB))
1892 				device_printf(dev, "The partition detected"
1893 				    "link speed that is less than 10Gbps\n");
1894 			if_link_state_change(ifp, LINK_STATE_UP);
1895 		}
1896 	} else { /* Link down */
1897 		if (vsi->link_active == TRUE) {
1898 			if (bootverbose)
1899 				device_printf(dev,"Link is Down\n");
1900 			if_link_state_change(ifp, LINK_STATE_DOWN);
1901 			vsi->link_active = FALSE;
1902 		}
1903 	}
1904 
1905 	return;
1906 }
1907 
1908 /*********************************************************************
1909  *
1910  *  This routine disables all traffic on the adapter by issuing a
1911  *  global reset on the MAC and deallocates TX/RX buffers.
1912  *
1913  **********************************************************************/
1914 
1915 static void
1916 ixl_stop(struct ixl_pf *pf)
1917 {
1918 	struct ixl_vsi	*vsi = &pf->vsi;
1919 	struct ifnet	*ifp = vsi->ifp;
1920 
1921 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1922 
1923 	INIT_DEBUGOUT("ixl_stop: begin\n");
1924 	if (pf->num_vfs == 0)
1925 		ixl_disable_intr(vsi);
1926 	else
1927 		ixl_disable_rings_intr(vsi);
1928 	ixl_disable_rings(vsi);
1929 
1930 	/* Tell the stack that the interface is no longer active */
1931 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1932 
1933 	/* Stop the local timer */
1934 	callout_stop(&pf->timer);
1935 
1936 	return;
1937 }
1938 
1939 
1940 /*********************************************************************
1941  *
1942  *  Setup MSIX Interrupt resources and handlers for the VSI
1943  *
1944  **********************************************************************/
1945 static int
1946 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1947 {
1948 	device_t        dev = pf->dev;
1949 	struct 		ixl_vsi *vsi = &pf->vsi;
1950 	struct		ixl_queue *que = vsi->queues;
1951 	int 		error, rid = 0;
1952 
1953 	if (pf->msix == 1)
1954 		rid = 1;
1955 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1956 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1957 	if (pf->res == NULL) {
1958 		device_printf(dev,"Unable to allocate"
1959 		    " bus resource: vsi legacy/msi interrupt\n");
1960 		return (ENXIO);
1961 	}
1962 
1963 	/* Set the handler function */
1964 	error = bus_setup_intr(dev, pf->res,
1965 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1966 	    ixl_intr, pf, &pf->tag);
1967 	if (error) {
1968 		pf->res = NULL;
1969 		device_printf(dev, "Failed to register legacy/msi handler");
1970 		return (error);
1971 	}
1972 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1973 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1974 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1975 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1976 	    taskqueue_thread_enqueue, &que->tq);
1977 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1978 	    device_get_nameunit(dev));
1979 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1980 
1981 #ifdef PCI_IOV
1982 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1983 #endif
1984 
1985 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1986 	    taskqueue_thread_enqueue, &pf->tq);
1987 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1988 	    device_get_nameunit(dev));
1989 
1990 	return (0);
1991 }
1992 
1993 
1994 /*********************************************************************
1995  *
1996  *  Setup MSIX Interrupt resources and handlers for the VSI
1997  *
1998  **********************************************************************/
1999 static int
2000 ixl_assign_vsi_msix(struct ixl_pf *pf)
2001 {
2002 	device_t	dev = pf->dev;
2003 	struct 		ixl_vsi *vsi = &pf->vsi;
2004 	struct 		ixl_queue *que = vsi->queues;
2005 	struct		tx_ring	 *txr;
2006 	int 		error, rid, vector = 0;
2007 #ifdef	RSS
2008 	cpuset_t cpu_mask;
2009 #endif
2010 
2011 	/* Admin Que is vector 0*/
2012 	rid = vector + 1;
2013 	pf->res = bus_alloc_resource_any(dev,
2014     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2015 	if (!pf->res) {
2016 		device_printf(dev,"Unable to allocate"
2017     	    " bus resource: Adminq interrupt [%d]\n", rid);
2018 		return (ENXIO);
2019 	}
2020 	/* Set the adminq vector and handler */
2021 	error = bus_setup_intr(dev, pf->res,
2022 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2023 	    ixl_msix_adminq, pf, &pf->tag);
2024 	if (error) {
2025 		pf->res = NULL;
2026 		device_printf(dev, "Failed to register Admin que handler");
2027 		return (error);
2028 	}
2029 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2030 	pf->admvec = vector;
2031 	/* Tasklet for Admin Queue */
2032 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2033 
2034 #ifdef PCI_IOV
2035 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2036 #endif
2037 
2038 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2039 	    taskqueue_thread_enqueue, &pf->tq);
2040 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2041 	    device_get_nameunit(pf->dev));
2042 	++vector;
2043 
2044 	/* Now set up the stations */
2045 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2046 		int cpu_id = i;
2047 		rid = vector + 1;
2048 		txr = &que->txr;
2049 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2050 		    RF_SHAREABLE | RF_ACTIVE);
2051 		if (que->res == NULL) {
2052 			device_printf(dev,"Unable to allocate"
2053 		    	    " bus resource: que interrupt [%d]\n", vector);
2054 			return (ENXIO);
2055 		}
2056 		/* Set the handler function */
2057 		error = bus_setup_intr(dev, que->res,
2058 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2059 		    ixl_msix_que, que, &que->tag);
2060 		if (error) {
2061 			que->res = NULL;
2062 			device_printf(dev, "Failed to register que handler");
2063 			return (error);
2064 		}
2065 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2066 		/* Bind the vector to a CPU */
2067 #ifdef RSS
2068 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2069 #endif
2070 		bus_bind_intr(dev, que->res, cpu_id);
2071 		que->msix = vector;
2072 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2073 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2074 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2075 		    taskqueue_thread_enqueue, &que->tq);
2076 #ifdef RSS
2077 		CPU_SETOF(cpu_id, &cpu_mask);
2078 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2079 		    &cpu_mask, "%s (bucket %d)",
2080 		    device_get_nameunit(dev), cpu_id);
2081 #else
2082 		taskqueue_start_threads(&que->tq, 1, PI_NET,
2083 		    "%s que", device_get_nameunit(dev));
2084 #endif
2085 	}
2086 
2087 	return (0);
2088 }
2089 
2090 
2091 /*
2092  * Allocate MSI/X vectors
2093  */
2094 static int
2095 ixl_init_msix(struct ixl_pf *pf)
2096 {
2097 	device_t dev = pf->dev;
2098 	int rid, want, vectors, queues, available;
2099 
2100 	/* Override by tuneable */
2101 	if (ixl_enable_msix == 0)
2102 		goto msi;
2103 
2104 	/*
2105 	** When used in a virtualized environment
2106 	** PCI BUSMASTER capability may not be set
2107 	** so explicity set it here and rewrite
2108 	** the ENABLE in the MSIX control register
2109 	** at this point to cause the host to
2110 	** successfully initialize us.
2111 	*/
2112 	{
2113 		u16 pci_cmd_word;
2114 		int msix_ctrl;
2115 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2116 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2117 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2118 		pci_find_cap(dev, PCIY_MSIX, &rid);
2119 		rid += PCIR_MSIX_CTRL;
2120 		msix_ctrl = pci_read_config(dev, rid, 2);
2121 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2122 		pci_write_config(dev, rid, msix_ctrl, 2);
2123 	}
2124 
2125 	/* First try MSI/X */
2126 	rid = PCIR_BAR(IXL_BAR);
2127 	pf->msix_mem = bus_alloc_resource_any(dev,
2128 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2129        	if (!pf->msix_mem) {
2130 		/* May not be enabled */
2131 		device_printf(pf->dev,
2132 		    "Unable to map MSIX table \n");
2133 		goto msi;
2134 	}
2135 
2136 	available = pci_msix_count(dev);
2137 	if (available == 0) { /* system has msix disabled */
2138 		bus_release_resource(dev, SYS_RES_MEMORY,
2139 		    rid, pf->msix_mem);
2140 		pf->msix_mem = NULL;
2141 		goto msi;
2142 	}
2143 
2144 	/* Figure out a reasonable auto config value */
2145 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2146 
2147 	/* Override with hardcoded value if sane */
2148 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2149 		queues = ixl_max_queues;
2150 
2151 #ifdef  RSS
2152 	/* If we're doing RSS, clamp at the number of RSS buckets */
2153 	if (queues > rss_getnumbuckets())
2154 		queues = rss_getnumbuckets();
2155 #endif
2156 
2157 	/*
2158 	** Want one vector (RX/TX pair) per queue
2159 	** plus an additional for the admin queue.
2160 	*/
2161 	want = queues + 1;
2162 	if (want <= available)	/* Have enough */
2163 		vectors = want;
2164 	else {
2165                	device_printf(pf->dev,
2166 		    "MSIX Configuration Problem, "
2167 		    "%d vectors available but %d wanted!\n",
2168 		    available, want);
2169 		return (0); /* Will go to Legacy setup */
2170 	}
2171 
2172 	if (pci_alloc_msix(dev, &vectors) == 0) {
2173                	device_printf(pf->dev,
2174 		    "Using MSIX interrupts with %d vectors\n", vectors);
2175 		pf->msix = vectors;
2176 		pf->vsi.num_queues = queues;
2177 #ifdef RSS
2178 		/*
2179 		 * If we're doing RSS, the number of queues needs to
2180 		 * match the number of RSS buckets that are configured.
2181 		 *
2182 		 * + If there's more queues than RSS buckets, we'll end
2183 		 *   up with queues that get no traffic.
2184 		 *
2185 		 * + If there's more RSS buckets than queues, we'll end
2186 		 *   up having multiple RSS buckets map to the same queue,
2187 		 *   so there'll be some contention.
2188 		 */
2189 		if (queues != rss_getnumbuckets()) {
2190 			device_printf(dev,
2191 			    "%s: queues (%d) != RSS buckets (%d)"
2192 			    "; performance will be impacted.\n",
2193 			    __func__, queues, rss_getnumbuckets());
2194 		}
2195 #endif
2196 		return (vectors);
2197 	}
2198 msi:
2199        	vectors = pci_msi_count(dev);
2200 	pf->vsi.num_queues = 1;
2201 	pf->msix = 1;
2202 	ixl_max_queues = 1;
2203 	ixl_enable_msix = 0;
2204        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2205                	device_printf(pf->dev,"Using an MSI interrupt\n");
2206 	else {
2207 		pf->msix = 0;
2208                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2209 	}
2210 	return (vectors);
2211 }
2212 
2213 
2214 /*
2215  * Plumb MSI/X vectors
2216  */
2217 static void
2218 ixl_configure_msix(struct ixl_pf *pf)
2219 {
2220 	struct i40e_hw	*hw = &pf->hw;
2221 	struct ixl_vsi *vsi = &pf->vsi;
2222 	u32		reg;
2223 	u16		vector = 1;
2224 
2225 	/* First set up the adminq - vector 0 */
2226 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2227 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2228 
2229 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2230 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2231 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2232 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2233 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2234 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2235 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2236 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2237 
2238 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2239 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2240 
2241 	wr32(hw, I40E_PFINT_DYN_CTL0,
2242 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2243 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2244 
2245 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2246 
2247 	/* Next configure the queues */
2248 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2249 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2250 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2251 
2252 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2253 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2254 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2255 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2256 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2257 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2258 
2259 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2260 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2261 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2262 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2263 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2264 		if (i == (vsi->num_queues - 1))
2265 			reg |= (IXL_QUEUE_EOL
2266 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2267 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2268 	}
2269 }
2270 
2271 /*
2272  * Configure for MSI single vector operation
2273  */
2274 static void
2275 ixl_configure_legacy(struct ixl_pf *pf)
2276 {
2277 	struct i40e_hw	*hw = &pf->hw;
2278 	u32		reg;
2279 
2280 
2281 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2282 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2283 
2284 
2285 	/* Setup "other" causes */
2286 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2287 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2288 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2289 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2290 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2291 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2292 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2293 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2294 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2295 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2296 	    ;
2297 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2298 
2299 	/* SW_ITR_IDX = 0, but don't change INTENA */
2300 	wr32(hw, I40E_PFINT_DYN_CTL0,
2301 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2302 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2303 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2304 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2305 
2306 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2307 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2308 
2309 	/* Associate the queue pair to the vector and enable the q int */
2310 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2311 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2312 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2313 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2314 
2315 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2316 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2317 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2318 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2319 
2320 	/* Next enable the queue pair */
2321 	reg = rd32(hw, I40E_QTX_ENA(0));
2322 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2323 	wr32(hw, I40E_QTX_ENA(0), reg);
2324 
2325 	reg = rd32(hw, I40E_QRX_ENA(0));
2326 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2327 	wr32(hw, I40E_QRX_ENA(0), reg);
2328 }
2329 
2330 
2331 /*
2332  * Set the Initial ITR state
2333  */
2334 static void
2335 ixl_configure_itr(struct ixl_pf *pf)
2336 {
2337 	struct i40e_hw		*hw = &pf->hw;
2338 	struct ixl_vsi		*vsi = &pf->vsi;
2339 	struct ixl_queue	*que = vsi->queues;
2340 
2341 	vsi->rx_itr_setting = ixl_rx_itr;
2342 	if (ixl_dynamic_rx_itr)
2343 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2344 	vsi->tx_itr_setting = ixl_tx_itr;
2345 	if (ixl_dynamic_tx_itr)
2346 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2347 
2348 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2349 		struct tx_ring	*txr = &que->txr;
2350 		struct rx_ring 	*rxr = &que->rxr;
2351 
2352 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2353 		    vsi->rx_itr_setting);
2354 		rxr->itr = vsi->rx_itr_setting;
2355 		rxr->latency = IXL_AVE_LATENCY;
2356 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2357 		    vsi->tx_itr_setting);
2358 		txr->itr = vsi->tx_itr_setting;
2359 		txr->latency = IXL_AVE_LATENCY;
2360 	}
2361 }
2362 
2363 
2364 static int
2365 ixl_allocate_pci_resources(struct ixl_pf *pf)
2366 {
2367 	int             rid;
2368 	device_t        dev = pf->dev;
2369 
2370 	rid = PCIR_BAR(0);
2371 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2372 	    &rid, RF_ACTIVE);
2373 
2374 	if (!(pf->pci_mem)) {
2375 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2376 		return (ENXIO);
2377 	}
2378 
2379 	pf->osdep.mem_bus_space_tag =
2380 		rman_get_bustag(pf->pci_mem);
2381 	pf->osdep.mem_bus_space_handle =
2382 		rman_get_bushandle(pf->pci_mem);
2383 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2384 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2385 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2386 
2387 	pf->hw.back = &pf->osdep;
2388 
2389 	/*
2390 	** Now setup MSI or MSI/X, should
2391 	** return us the number of supported
2392 	** vectors. (Will be 1 for MSI)
2393 	*/
2394 	pf->msix = ixl_init_msix(pf);
2395 	return (0);
2396 }
2397 
2398 static void
2399 ixl_free_pci_resources(struct ixl_pf * pf)
2400 {
2401 	struct ixl_vsi		*vsi = &pf->vsi;
2402 	struct ixl_queue	*que = vsi->queues;
2403 	device_t		dev = pf->dev;
2404 	int			rid, memrid;
2405 
2406 	memrid = PCIR_BAR(IXL_BAR);
2407 
2408 	/* We may get here before stations are setup */
2409 	if ((!ixl_enable_msix) || (que == NULL))
2410 		goto early;
2411 
2412 	/*
2413 	**  Release all msix VSI resources:
2414 	*/
2415 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2416 		rid = que->msix + 1;
2417 		if (que->tag != NULL) {
2418 			bus_teardown_intr(dev, que->res, que->tag);
2419 			que->tag = NULL;
2420 		}
2421 		if (que->res != NULL)
2422 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2423 	}
2424 
2425 early:
2426 	/* Clean the AdminQ interrupt last */
2427 	if (pf->admvec) /* we are doing MSIX */
2428 		rid = pf->admvec + 1;
2429 	else
2430 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2431 
2432 	if (pf->tag != NULL) {
2433 		bus_teardown_intr(dev, pf->res, pf->tag);
2434 		pf->tag = NULL;
2435 	}
2436 	if (pf->res != NULL)
2437 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2438 
2439 	if (pf->msix)
2440 		pci_release_msi(dev);
2441 
2442 	if (pf->msix_mem != NULL)
2443 		bus_release_resource(dev, SYS_RES_MEMORY,
2444 		    memrid, pf->msix_mem);
2445 
2446 	if (pf->pci_mem != NULL)
2447 		bus_release_resource(dev, SYS_RES_MEMORY,
2448 		    PCIR_BAR(0), pf->pci_mem);
2449 
2450 	return;
2451 }
2452 
2453 static void
2454 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2455 {
2456 	/* Display supported media types */
2457 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2458 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2459 
2460 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2461 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2462 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2463 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2464 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2465 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2466 
2467 	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2468 	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2469 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2470 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2471 
2472 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2473 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2474 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2475 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2476 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2477 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2478 
2479 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2480 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2481 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2482 	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2483 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2484 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2485 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2486 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2487 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2488 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2489 
2490 #ifndef IFM_ETH_XTYPE
2491 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2492 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2493 
2494 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2495 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2496 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2497 	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2498 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2499 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2500 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2501 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2502 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2503 
2504 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2505 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2506 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2507 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2508 #else
2509 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2510 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2511 
2512 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2513 	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2514 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2515 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2516 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2517 	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2518 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2519 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2520 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2521 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2522 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2523 
2524 	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2525 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2526 
2527 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2528 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2529 	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2530 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2531 #endif
2532 }
2533 
2534 /*********************************************************************
2535  *
2536  *  Setup networking device structure and register an interface.
2537  *
2538  **********************************************************************/
2539 static int
2540 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2541 {
2542 	struct ifnet		*ifp;
2543 	struct i40e_hw		*hw = vsi->hw;
2544 	struct ixl_queue	*que = vsi->queues;
2545 	struct i40e_aq_get_phy_abilities_resp abilities;
2546 	enum i40e_status_code aq_error = 0;
2547 
2548 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2549 
2550 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2551 	if (ifp == NULL) {
2552 		device_printf(dev, "can not allocate ifnet structure\n");
2553 		return (-1);
2554 	}
2555 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2556 	ifp->if_mtu = ETHERMTU;
2557 	ifp->if_baudrate = IF_Gbps(40);
2558 	ifp->if_init = ixl_init;
2559 	ifp->if_softc = vsi;
2560 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2561 	ifp->if_ioctl = ixl_ioctl;
2562 
2563 #if __FreeBSD_version >= 1100036
2564 	if_setgetcounterfn(ifp, ixl_get_counter);
2565 #endif
2566 
2567 	ifp->if_transmit = ixl_mq_start;
2568 
2569 	ifp->if_qflush = ixl_qflush;
2570 
2571 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2572 
2573 	vsi->max_frame_size =
2574 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2575 	    + ETHER_VLAN_ENCAP_LEN;
2576 
2577 	/*
2578 	 * Tell the upper layer(s) we support long frames.
2579 	 */
2580 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2581 
2582 	ifp->if_capabilities |= IFCAP_HWCSUM;
2583 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2584 	ifp->if_capabilities |= IFCAP_TSO;
2585 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2586 	ifp->if_capabilities |= IFCAP_LRO;
2587 
2588 	/* VLAN capabilties */
2589 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2590 			     |  IFCAP_VLAN_HWTSO
2591 			     |  IFCAP_VLAN_MTU
2592 			     |  IFCAP_VLAN_HWCSUM;
2593 	ifp->if_capenable = ifp->if_capabilities;
2594 
2595 	/*
2596 	** Don't turn this on by default, if vlans are
2597 	** created on another pseudo device (eg. lagg)
2598 	** then vlan events are not passed thru, breaking
2599 	** operation, but with HW FILTER off it works. If
2600 	** using vlans directly on the ixl driver you can
2601 	** enable this and get full hardware tag filtering.
2602 	*/
2603 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2604 
2605 	/*
2606 	 * Specify the media types supported by this adapter and register
2607 	 * callbacks to update media and link information
2608 	 */
2609 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2610 		     ixl_media_status);
2611 
2612 	aq_error = i40e_aq_get_phy_capabilities(hw,
2613 	    FALSE, TRUE, &abilities, NULL);
2614 	/* May need delay to detect fiber correctly */
2615 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2616 		i40e_msec_delay(200);
2617 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2618 		    TRUE, &abilities, NULL);
2619 	}
2620 	if (aq_error) {
2621 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2622 			device_printf(dev, "Unknown PHY type detected!\n");
2623 		else
2624 			device_printf(dev,
2625 			    "Error getting supported media types, err %d,"
2626 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2627 		return (0);
2628 	}
2629 
2630 	ixl_add_ifmedia(vsi, abilities.phy_type);
2631 
2632 	/* Use autoselect media by default */
2633 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2634 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2635 
2636 	ether_ifattach(ifp, hw->mac.addr);
2637 
2638 	return (0);
2639 }
2640 
2641 /*
2642 ** Run when the Admin Queue gets a
2643 ** link transition interrupt.
2644 */
2645 static void
2646 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2647 {
2648 	struct i40e_hw	*hw = &pf->hw;
2649 	struct i40e_aqc_get_link_status *status =
2650 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2651 	bool check;
2652 
2653 	hw->phy.get_link_info = TRUE;
2654 	i40e_get_link_status(hw, &check);
2655 	pf->link_up = check;
2656 #ifdef IXL_DEBUG
2657 	printf("Link is %s\n", check ? "up":"down");
2658 #endif
2659 	/* Report if Unqualified modules are found */
2660 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2661 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2662 	    (!(status->link_info & I40E_AQ_LINK_UP)))
2663 		device_printf(pf->dev, "Link failed because "
2664 		    "an unqualified module was detected\n");
2665 
2666 	return;
2667 }
2668 
2669 /*********************************************************************
2670  *
2671  *  Get Firmware Switch configuration
2672  *	- this will need to be more robust when more complex
2673  *	  switch configurations are enabled.
2674  *
2675  **********************************************************************/
2676 static int
2677 ixl_switch_config(struct ixl_pf *pf)
2678 {
2679 	struct i40e_hw	*hw = &pf->hw;
2680 	struct ixl_vsi	*vsi = &pf->vsi;
2681 	device_t 	dev = vsi->dev;
2682 	struct i40e_aqc_get_switch_config_resp *sw_config;
2683 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2684 	int	ret;
2685 	u16	next = 0;
2686 
2687 	memset(&aq_buf, 0, sizeof(aq_buf));
2688 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2689 	ret = i40e_aq_get_switch_config(hw, sw_config,
2690 	    sizeof(aq_buf), &next, NULL);
2691 	if (ret) {
2692 		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2693 		    ret);
2694 		return (ret);
2695 	}
2696 #ifdef IXL_DEBUG
2697 	device_printf(dev,
2698 	    "Switch config: header reported: %d in structure, %d total\n",
2699     	    sw_config->header.num_reported, sw_config->header.num_total);
2700 	for (int i = 0; i < sw_config->header.num_reported; i++) {
2701 		device_printf(dev,
2702 		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2703 		    sw_config->element[i].element_type,
2704 		    sw_config->element[i].seid,
2705 		    sw_config->element[i].uplink_seid,
2706 		    sw_config->element[i].downlink_seid);
2707 	}
2708 #endif
2709 	/* Simplified due to a single VSI at the moment */
2710 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2711 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2712 	vsi->seid = sw_config->element[0].seid;
2713 	return (ret);
2714 }
2715 
2716 /*********************************************************************
2717  *
2718  *  Initialize the VSI:  this handles contexts, which means things
2719  *  			 like the number of descriptors, buffer size,
2720  *			 plus we init the rings thru this function.
2721  *
2722  **********************************************************************/
2723 static int
2724 ixl_initialize_vsi(struct ixl_vsi *vsi)
2725 {
2726 	struct ixl_pf		*pf = vsi->back;
2727 	struct ixl_queue	*que = vsi->queues;
2728 	device_t		dev = vsi->dev;
2729 	struct i40e_hw		*hw = vsi->hw;
2730 	struct i40e_vsi_context	ctxt;
2731 	int			err = 0;
2732 
2733 	memset(&ctxt, 0, sizeof(ctxt));
2734 	ctxt.seid = vsi->seid;
2735 	if (pf->veb_seid != 0)
2736 		ctxt.uplink_seid = pf->veb_seid;
2737 	ctxt.pf_num = hw->pf_id;
2738 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2739 	if (err) {
2740 		device_printf(dev,"get vsi params failed %x!!\n", err);
2741 		return (err);
2742 	}
2743 #ifdef IXL_DEBUG
2744 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2745 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2746 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2747 	    ctxt.uplink_seid, ctxt.vsi_number,
2748 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2749 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2750 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2751 #endif
2752 	/*
2753 	** Set the queue and traffic class bits
2754 	**  - when multiple traffic classes are supported
2755 	**    this will need to be more robust.
2756 	*/
2757 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2758 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2759 	ctxt.info.queue_mapping[0] = 0;
2760 	ctxt.info.tc_mapping[0] = 0x0800;
2761 
2762 	/* Set VLAN receive stripping mode */
2763 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2764 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2765 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2766 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2767 	else
2768 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2769 
2770 	/* Keep copy of VSI info in VSI for statistic counters */
2771 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2772 
2773 	/* Reset VSI statistics */
2774 	ixl_vsi_reset_stats(vsi);
2775 	vsi->hw_filters_add = 0;
2776 	vsi->hw_filters_del = 0;
2777 
2778 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2779 
2780 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2781 	if (err) {
2782 		device_printf(dev,"update vsi params failed %x!!\n",
2783 		   hw->aq.asq_last_status);
2784 		return (err);
2785 	}
2786 
2787 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2788 		struct tx_ring		*txr = &que->txr;
2789 		struct rx_ring 		*rxr = &que->rxr;
2790 		struct i40e_hmc_obj_txq tctx;
2791 		struct i40e_hmc_obj_rxq rctx;
2792 		u32			txctl;
2793 		u16			size;
2794 
2795 
2796 		/* Setup the HMC TX Context  */
2797 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2798 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2799 		tctx.new_context = 1;
2800 		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2801 		tctx.qlen = que->num_desc;
2802 		tctx.fc_ena = 0;
2803 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2804 		/* Enable HEAD writeback */
2805 		tctx.head_wb_ena = 1;
2806 		tctx.head_wb_addr = txr->dma.pa +
2807 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2808 		tctx.rdylist_act = 0;
2809 		err = i40e_clear_lan_tx_queue_context(hw, i);
2810 		if (err) {
2811 			device_printf(dev, "Unable to clear TX context\n");
2812 			break;
2813 		}
2814 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2815 		if (err) {
2816 			device_printf(dev, "Unable to set TX context\n");
2817 			break;
2818 		}
2819 		/* Associate the ring with this PF */
2820 		txctl = I40E_QTX_CTL_PF_QUEUE;
2821 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2822 		    I40E_QTX_CTL_PF_INDX_MASK);
2823 		wr32(hw, I40E_QTX_CTL(i), txctl);
2824 		ixl_flush(hw);
2825 
2826 		/* Do ring (re)init */
2827 		ixl_init_tx_ring(que);
2828 
2829 		/* Next setup the HMC RX Context  */
2830 		if (vsi->max_frame_size <= MCLBYTES)
2831 			rxr->mbuf_sz = MCLBYTES;
2832 		else
2833 			rxr->mbuf_sz = MJUMPAGESIZE;
2834 
2835 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2836 
2837 		/* Set up an RX context for the HMC */
2838 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2839 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2840 		/* ignore header split for now */
2841 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2842 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2843 		    vsi->max_frame_size : max_rxmax;
2844 		rctx.dtype = 0;
2845 		rctx.dsize = 1;	/* do 32byte descriptors */
2846 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2847 		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2848 		rctx.qlen = que->num_desc;
2849 		rctx.tphrdesc_ena = 1;
2850 		rctx.tphwdesc_ena = 1;
2851 		rctx.tphdata_ena = 0;
2852 		rctx.tphhead_ena = 0;
2853 		rctx.lrxqthresh = 2;
2854 		rctx.crcstrip = 1;
2855 		rctx.l2tsel = 1;
2856 		rctx.showiv = 1;
2857 		rctx.fc_ena = 0;
2858 		rctx.prefena = 1;
2859 
2860 		err = i40e_clear_lan_rx_queue_context(hw, i);
2861 		if (err) {
2862 			device_printf(dev,
2863 			    "Unable to clear RX context %d\n", i);
2864 			break;
2865 		}
2866 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2867 		if (err) {
2868 			device_printf(dev, "Unable to set RX context %d\n", i);
2869 			break;
2870 		}
2871 		err = ixl_init_rx_ring(que);
2872 		if (err) {
2873 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2874 			break;
2875 		}
2876 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2877 #ifdef DEV_NETMAP
2878 		/* preserve queue */
2879 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2880 			struct netmap_adapter *na = NA(vsi->ifp);
2881 			struct netmap_kring *kring = &na->rx_rings[i];
2882 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2883 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2884 		} else
2885 #endif /* DEV_NETMAP */
2886 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2887 	}
2888 	return (err);
2889 }
2890 
2891 
2892 /*********************************************************************
2893  *
2894  *  Free all VSI structs.
2895  *
2896  **********************************************************************/
2897 void
2898 ixl_free_vsi(struct ixl_vsi *vsi)
2899 {
2900 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2901 	struct ixl_queue	*que = vsi->queues;
2902 
2903 	/* Free station queues */
2904 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2905 		struct tx_ring *txr = &que->txr;
2906 		struct rx_ring *rxr = &que->rxr;
2907 
2908 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2909 			continue;
2910 		IXL_TX_LOCK(txr);
2911 		ixl_free_que_tx(que);
2912 		if (txr->base)
2913 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2914 		IXL_TX_UNLOCK(txr);
2915 		IXL_TX_LOCK_DESTROY(txr);
2916 
2917 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2918 			continue;
2919 		IXL_RX_LOCK(rxr);
2920 		ixl_free_que_rx(que);
2921 		if (rxr->base)
2922 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2923 		IXL_RX_UNLOCK(rxr);
2924 		IXL_RX_LOCK_DESTROY(rxr);
2925 
2926 	}
2927 	free(vsi->queues, M_DEVBUF);
2928 
2929 	/* Free VSI filter list */
2930 	ixl_free_mac_filters(vsi);
2931 }
2932 
2933 static void
2934 ixl_free_mac_filters(struct ixl_vsi *vsi)
2935 {
2936 	struct ixl_mac_filter *f;
2937 
2938 	while (!SLIST_EMPTY(&vsi->ftl)) {
2939 		f = SLIST_FIRST(&vsi->ftl);
2940 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2941 		free(f, M_DEVBUF);
2942 	}
2943 }
2944 
2945 
2946 /*********************************************************************
2947  *
2948  *  Allocate memory for the VSI (virtual station interface) and their
2949  *  associated queues, rings and the descriptors associated with each,
2950  *  called only once at attach.
2951  *
2952  **********************************************************************/
2953 static int
2954 ixl_setup_stations(struct ixl_pf *pf)
2955 {
2956 	device_t		dev = pf->dev;
2957 	struct ixl_vsi		*vsi;
2958 	struct ixl_queue	*que;
2959 	struct tx_ring		*txr;
2960 	struct rx_ring		*rxr;
2961 	int 			rsize, tsize;
2962 	int			error = I40E_SUCCESS;
2963 
2964 	vsi = &pf->vsi;
2965 	vsi->back = (void *)pf;
2966 	vsi->hw = &pf->hw;
2967 	vsi->id = 0;
2968 	vsi->num_vlans = 0;
2969 	vsi->back = pf;
2970 
2971 	/* Get memory for the station queues */
2972         if (!(vsi->queues =
2973             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2974             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2975                 device_printf(dev, "Unable to allocate queue memory\n");
2976                 error = ENOMEM;
2977                 goto early;
2978         }
2979 
2980 	for (int i = 0; i < vsi->num_queues; i++) {
2981 		que = &vsi->queues[i];
2982 		que->num_desc = ixl_ringsz;
2983 		que->me = i;
2984 		que->vsi = vsi;
2985 		/* mark the queue as active */
2986 		vsi->active_queues |= (u64)1 << que->me;
2987 		txr = &que->txr;
2988 		txr->que = que;
2989 		txr->tail = I40E_QTX_TAIL(que->me);
2990 
2991 		/* Initialize the TX lock */
2992 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2993 		    device_get_nameunit(dev), que->me);
2994 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2995 		/* Create the TX descriptor ring */
2996 		tsize = roundup2((que->num_desc *
2997 		    sizeof(struct i40e_tx_desc)) +
2998 		    sizeof(u32), DBA_ALIGN);
2999 		if (i40e_allocate_dma_mem(&pf->hw,
3000 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3001 			device_printf(dev,
3002 			    "Unable to allocate TX Descriptor memory\n");
3003 			error = ENOMEM;
3004 			goto fail;
3005 		}
3006 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3007 		bzero((void *)txr->base, tsize);
3008        		/* Now allocate transmit soft structs for the ring */
3009        		if (ixl_allocate_tx_data(que)) {
3010 			device_printf(dev,
3011 			    "Critical Failure setting up TX structures\n");
3012 			error = ENOMEM;
3013 			goto fail;
3014        		}
3015 		/* Allocate a buf ring */
3016 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3017 		    M_WAITOK, &txr->mtx);
3018 		if (txr->br == NULL) {
3019 			device_printf(dev,
3020 			    "Critical Failure setting up TX buf ring\n");
3021 			error = ENOMEM;
3022 			goto fail;
3023        		}
3024 
3025 		/*
3026 		 * Next the RX queues...
3027 		 */
3028 		rsize = roundup2(que->num_desc *
3029 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3030 		rxr = &que->rxr;
3031 		rxr->que = que;
3032 		rxr->tail = I40E_QRX_TAIL(que->me);
3033 
3034 		/* Initialize the RX side lock */
3035 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3036 		    device_get_nameunit(dev), que->me);
3037 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3038 
3039 		if (i40e_allocate_dma_mem(&pf->hw,
3040 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3041 			device_printf(dev,
3042 			    "Unable to allocate RX Descriptor memory\n");
3043 			error = ENOMEM;
3044 			goto fail;
3045 		}
3046 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3047 		bzero((void *)rxr->base, rsize);
3048 
3049         	/* Allocate receive soft structs for the ring*/
3050 		if (ixl_allocate_rx_data(que)) {
3051 			device_printf(dev,
3052 			    "Critical Failure setting up receive structs\n");
3053 			error = ENOMEM;
3054 			goto fail;
3055 		}
3056 	}
3057 
3058 	return (0);
3059 
3060 fail:
3061 	for (int i = 0; i < vsi->num_queues; i++) {
3062 		que = &vsi->queues[i];
3063 		rxr = &que->rxr;
3064 		txr = &que->txr;
3065 		if (rxr->base)
3066 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3067 		if (txr->base)
3068 			i40e_free_dma_mem(&pf->hw, &txr->dma);
3069 	}
3070 
3071 early:
3072 	return (error);
3073 }
3074 
3075 /*
3076 ** Provide a update to the queue RX
3077 ** interrupt moderation value.
3078 */
3079 static void
3080 ixl_set_queue_rx_itr(struct ixl_queue *que)
3081 {
3082 	struct ixl_vsi	*vsi = que->vsi;
3083 	struct i40e_hw	*hw = vsi->hw;
3084 	struct rx_ring	*rxr = &que->rxr;
3085 	u16		rx_itr;
3086 	u16		rx_latency = 0;
3087 	int		rx_bytes;
3088 
3089 
3090 	/* Idle, do nothing */
3091 	if (rxr->bytes == 0)
3092 		return;
3093 
3094 	if (ixl_dynamic_rx_itr) {
3095 		rx_bytes = rxr->bytes/rxr->itr;
3096 		rx_itr = rxr->itr;
3097 
3098 		/* Adjust latency range */
3099 		switch (rxr->latency) {
3100 		case IXL_LOW_LATENCY:
3101 			if (rx_bytes > 10) {
3102 				rx_latency = IXL_AVE_LATENCY;
3103 				rx_itr = IXL_ITR_20K;
3104 			}
3105 			break;
3106 		case IXL_AVE_LATENCY:
3107 			if (rx_bytes > 20) {
3108 				rx_latency = IXL_BULK_LATENCY;
3109 				rx_itr = IXL_ITR_8K;
3110 			} else if (rx_bytes <= 10) {
3111 				rx_latency = IXL_LOW_LATENCY;
3112 				rx_itr = IXL_ITR_100K;
3113 			}
3114 			break;
3115 		case IXL_BULK_LATENCY:
3116 			if (rx_bytes <= 20) {
3117 				rx_latency = IXL_AVE_LATENCY;
3118 				rx_itr = IXL_ITR_20K;
3119 			}
3120 			break;
3121        		 }
3122 
3123 		rxr->latency = rx_latency;
3124 
3125 		if (rx_itr != rxr->itr) {
3126 			/* do an exponential smoothing */
3127 			rx_itr = (10 * rx_itr * rxr->itr) /
3128 			    ((9 * rx_itr) + rxr->itr);
3129 			rxr->itr = rx_itr & IXL_MAX_ITR;
3130 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3131 			    que->me), rxr->itr);
3132 		}
3133 	} else { /* We may have have toggled to non-dynamic */
3134 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3135 			vsi->rx_itr_setting = ixl_rx_itr;
3136 		/* Update the hardware if needed */
3137 		if (rxr->itr != vsi->rx_itr_setting) {
3138 			rxr->itr = vsi->rx_itr_setting;
3139 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3140 			    que->me), rxr->itr);
3141 		}
3142 	}
3143 	rxr->bytes = 0;
3144 	rxr->packets = 0;
3145 	return;
3146 }
3147 
3148 
3149 /*
3150 ** Provide a update to the queue TX
3151 ** interrupt moderation value.
3152 */
3153 static void
3154 ixl_set_queue_tx_itr(struct ixl_queue *que)
3155 {
3156 	struct ixl_vsi	*vsi = que->vsi;
3157 	struct i40e_hw	*hw = vsi->hw;
3158 	struct tx_ring	*txr = &que->txr;
3159 	u16		tx_itr;
3160 	u16		tx_latency = 0;
3161 	int		tx_bytes;
3162 
3163 
3164 	/* Idle, do nothing */
3165 	if (txr->bytes == 0)
3166 		return;
3167 
3168 	if (ixl_dynamic_tx_itr) {
3169 		tx_bytes = txr->bytes/txr->itr;
3170 		tx_itr = txr->itr;
3171 
3172 		switch (txr->latency) {
3173 		case IXL_LOW_LATENCY:
3174 			if (tx_bytes > 10) {
3175 				tx_latency = IXL_AVE_LATENCY;
3176 				tx_itr = IXL_ITR_20K;
3177 			}
3178 			break;
3179 		case IXL_AVE_LATENCY:
3180 			if (tx_bytes > 20) {
3181 				tx_latency = IXL_BULK_LATENCY;
3182 				tx_itr = IXL_ITR_8K;
3183 			} else if (tx_bytes <= 10) {
3184 				tx_latency = IXL_LOW_LATENCY;
3185 				tx_itr = IXL_ITR_100K;
3186 			}
3187 			break;
3188 		case IXL_BULK_LATENCY:
3189 			if (tx_bytes <= 20) {
3190 				tx_latency = IXL_AVE_LATENCY;
3191 				tx_itr = IXL_ITR_20K;
3192 			}
3193 			break;
3194 		}
3195 
3196 		txr->latency = tx_latency;
3197 
3198 		if (tx_itr != txr->itr) {
3199        	         /* do an exponential smoothing */
3200 			tx_itr = (10 * tx_itr * txr->itr) /
3201 			    ((9 * tx_itr) + txr->itr);
3202 			txr->itr = tx_itr & IXL_MAX_ITR;
3203 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3204 			    que->me), txr->itr);
3205 		}
3206 
3207 	} else { /* We may have have toggled to non-dynamic */
3208 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3209 			vsi->tx_itr_setting = ixl_tx_itr;
3210 		/* Update the hardware if needed */
3211 		if (txr->itr != vsi->tx_itr_setting) {
3212 			txr->itr = vsi->tx_itr_setting;
3213 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3214 			    que->me), txr->itr);
3215 		}
3216 	}
3217 	txr->bytes = 0;
3218 	txr->packets = 0;
3219 	return;
3220 }
3221 
3222 #define QUEUE_NAME_LEN 32
3223 
3224 static void
3225 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3226     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3227 {
3228 	struct sysctl_oid *tree;
3229 	struct sysctl_oid_list *child;
3230 	struct sysctl_oid_list *vsi_list;
3231 
3232 	tree = device_get_sysctl_tree(pf->dev);
3233 	child = SYSCTL_CHILDREN(tree);
3234 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3235 				   CTLFLAG_RD, NULL, "VSI Number");
3236 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3237 
3238 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3239 }
3240 
3241 static void
3242 ixl_add_hw_stats(struct ixl_pf *pf)
3243 {
3244 	device_t dev = pf->dev;
3245 	struct ixl_vsi *vsi = &pf->vsi;
3246 	struct ixl_queue *queues = vsi->queues;
3247 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3248 
3249 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3250 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3251 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3252 	struct sysctl_oid_list *vsi_list;
3253 
3254 	struct sysctl_oid *queue_node;
3255 	struct sysctl_oid_list *queue_list;
3256 
3257 	struct tx_ring *txr;
3258 	struct rx_ring *rxr;
3259 	char queue_namebuf[QUEUE_NAME_LEN];
3260 
3261 	/* Driver statistics */
3262 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3263 			CTLFLAG_RD, &pf->watchdog_events,
3264 			"Watchdog timeouts");
3265 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3266 			CTLFLAG_RD, &pf->admin_irq,
3267 			"Admin Queue IRQ Handled");
3268 
3269 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3270 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3271 
3272 	/* Queue statistics */
3273 	for (int q = 0; q < vsi->num_queues; q++) {
3274 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3275 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3276 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3277 		queue_list = SYSCTL_CHILDREN(queue_node);
3278 
3279 		txr = &(queues[q].txr);
3280 		rxr = &(queues[q].rxr);
3281 
3282 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3283 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3284 				"m_defrag() failed");
3285 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3286 				CTLFLAG_RD, &(queues[q].dropped_pkts),
3287 				"Driver dropped packets");
3288 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3289 				CTLFLAG_RD, &(queues[q].irqs),
3290 				"irqs on this queue");
3291 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3292 				CTLFLAG_RD, &(queues[q].tso),
3293 				"TSO");
3294 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3295 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3296 				"Driver tx dma failure in xmit");
3297 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3298 				CTLFLAG_RD, &(txr->no_desc),
3299 				"Queue No Descriptor Available");
3300 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3301 				CTLFLAG_RD, &(txr->total_packets),
3302 				"Queue Packets Transmitted");
3303 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3304 				CTLFLAG_RD, &(txr->tx_bytes),
3305 				"Queue Bytes Transmitted");
3306 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3307 				CTLFLAG_RD, &(rxr->rx_packets),
3308 				"Queue Packets Received");
3309 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3310 				CTLFLAG_RD, &(rxr->rx_bytes),
3311 				"Queue Bytes Received");
3312 	}
3313 
3314 	/* MAC stats */
3315 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3316 }
3317 
3318 static void
3319 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3320 	struct sysctl_oid_list *child,
3321 	struct i40e_eth_stats *eth_stats)
3322 {
3323 	struct ixl_sysctl_info ctls[] =
3324 	{
3325 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3326 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3327 			"Unicast Packets Received"},
3328 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3329 			"Multicast Packets Received"},
3330 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3331 			"Broadcast Packets Received"},
3332 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3333 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3334 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3335 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3336 			"Multicast Packets Transmitted"},
3337 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3338 			"Broadcast Packets Transmitted"},
3339 		// end
3340 		{0,0,0}
3341 	};
3342 
3343 	struct ixl_sysctl_info *entry = ctls;
3344 	while (entry->stat != 0)
3345 	{
3346 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3347 				CTLFLAG_RD, entry->stat,
3348 				entry->description);
3349 		entry++;
3350 	}
3351 }
3352 
3353 static void
3354 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3355 	struct sysctl_oid_list *child,
3356 	struct i40e_hw_port_stats *stats)
3357 {
3358 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3359 				    CTLFLAG_RD, NULL, "Mac Statistics");
3360 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3361 
3362 	struct i40e_eth_stats *eth_stats = &stats->eth;
3363 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3364 
3365 	struct ixl_sysctl_info ctls[] =
3366 	{
3367 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3368 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3369 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3370 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3371 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3372 		/* Packet Reception Stats */
3373 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3374 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3375 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3376 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3377 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3378 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3379 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3380 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3381 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3382 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3383 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3384 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3385 		/* Packet Transmission Stats */
3386 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3387 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3388 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3389 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3390 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3391 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3392 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3393 		/* Flow control */
3394 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3395 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3396 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3397 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3398 		/* End */
3399 		{0,0,0}
3400 	};
3401 
3402 	struct ixl_sysctl_info *entry = ctls;
3403 	while (entry->stat != 0)
3404 	{
3405 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3406 				CTLFLAG_RD, entry->stat,
3407 				entry->description);
3408 		entry++;
3409 	}
3410 }
3411 
3412 
3413 /*
3414 ** ixl_config_rss - setup RSS
3415 **  - note this is done for the single vsi
3416 */
3417 static void ixl_config_rss(struct ixl_vsi *vsi)
3418 {
3419 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3420 	struct i40e_hw	*hw = vsi->hw;
3421 	u32		lut = 0;
3422 	u64		set_hena = 0, hena;
3423 	int		i, j, que_id;
3424 #ifdef RSS
3425 	u32		rss_hash_config;
3426 	u32		rss_seed[IXL_KEYSZ];
3427 #else
3428 	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3429 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3430 			    0x35897377, 0x328b25e1, 0x4fa98922,
3431 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3432 #endif
3433 
3434 #ifdef RSS
3435         /* Fetch the configured RSS key */
3436         rss_getkey((uint8_t *) &rss_seed);
3437 #endif
3438 
3439 	/* Fill out hash function seed */
3440 	for (i = 0; i < IXL_KEYSZ; i++)
3441                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3442 
3443 	/* Enable PCTYPES for RSS: */
3444 #ifdef RSS
3445 	rss_hash_config = rss_gethashconfig();
3446 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3447                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3448 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3449                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3450 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3451                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3452 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3453                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3454 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3455 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3456 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3457                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3458         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3459                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3460 #else
3461 	set_hena =
3462 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3463 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3464 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3465 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3466 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3467 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3468 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3469 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3470 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3471 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3472 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3473 #endif
3474 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3475 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3476 	hena |= set_hena;
3477 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3478 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3479 
3480 	/* Populate the LUT with max no. of queues in round robin fashion */
3481 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3482 		if (j == vsi->num_queues)
3483 			j = 0;
3484 #ifdef RSS
3485 		/*
3486 		 * Fetch the RSS bucket id for the given indirection entry.
3487 		 * Cap it at the number of configured buckets (which is
3488 		 * num_queues.)
3489 		 */
3490 		que_id = rss_get_indirection_to_bucket(i);
3491 		que_id = que_id % vsi->num_queues;
3492 #else
3493 		que_id = j;
3494 #endif
3495 		/* lut = 4-byte sliding window of 4 lut entries */
3496 		lut = (lut << 8) | (que_id &
3497 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3498 		/* On i = 3, we have 4 entries in lut; write to the register */
3499 		if ((i & 3) == 3)
3500 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3501 	}
3502 	ixl_flush(hw);
3503 }
3504 
3505 
3506 /*
3507 ** This routine is run via an vlan config EVENT,
3508 ** it enables us to use the HW Filter table since
3509 ** we can get the vlan id. This just creates the
3510 ** entry in the soft version of the VFTA, init will
3511 ** repopulate the real table.
3512 */
3513 static void
3514 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3515 {
3516 	struct ixl_vsi	*vsi = ifp->if_softc;
3517 	struct i40e_hw	*hw = vsi->hw;
3518 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3519 
3520 	if (ifp->if_softc !=  arg)   /* Not our event */
3521 		return;
3522 
3523 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3524 		return;
3525 
3526 	IXL_PF_LOCK(pf);
3527 	++vsi->num_vlans;
3528 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3529 	IXL_PF_UNLOCK(pf);
3530 }
3531 
3532 /*
3533 ** This routine is run via an vlan
3534 ** unconfig EVENT, remove our entry
3535 ** in the soft vfta.
3536 */
3537 static void
3538 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3539 {
3540 	struct ixl_vsi	*vsi = ifp->if_softc;
3541 	struct i40e_hw	*hw = vsi->hw;
3542 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3543 
3544 	if (ifp->if_softc !=  arg)
3545 		return;
3546 
3547 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3548 		return;
3549 
3550 	IXL_PF_LOCK(pf);
3551 	--vsi->num_vlans;
3552 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3553 	IXL_PF_UNLOCK(pf);
3554 }
3555 
3556 /*
3557 ** This routine updates vlan filters, called by init
3558 ** it scans the filter table and then updates the hw
3559 ** after a soft reset.
3560 */
3561 static void
3562 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3563 {
3564 	struct ixl_mac_filter	*f;
3565 	int			cnt = 0, flags;
3566 
3567 	if (vsi->num_vlans == 0)
3568 		return;
3569 	/*
3570 	** Scan the filter list for vlan entries,
3571 	** mark them for addition and then call
3572 	** for the AQ update.
3573 	*/
3574 	SLIST_FOREACH(f, &vsi->ftl, next) {
3575 		if (f->flags & IXL_FILTER_VLAN) {
3576 			f->flags |=
3577 			    (IXL_FILTER_ADD |
3578 			    IXL_FILTER_USED);
3579 			cnt++;
3580 		}
3581 	}
3582 	if (cnt == 0) {
3583 		printf("setup vlan: no filters found!\n");
3584 		return;
3585 	}
3586 	flags = IXL_FILTER_VLAN;
3587 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3588 	ixl_add_hw_filters(vsi, flags, cnt);
3589 	return;
3590 }
3591 
3592 /*
3593 ** Initialize filter list and add filters that the hardware
3594 ** needs to know about.
3595 */
3596 static void
3597 ixl_init_filters(struct ixl_vsi *vsi)
3598 {
3599 	/* Add broadcast address */
3600 	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3601 }
3602 
3603 /*
3604 ** This routine adds mulicast filters
3605 */
3606 static void
3607 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3608 {
3609 	struct ixl_mac_filter *f;
3610 
3611 	/* Does one already exist */
3612 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3613 	if (f != NULL)
3614 		return;
3615 
3616 	f = ixl_get_filter(vsi);
3617 	if (f == NULL) {
3618 		printf("WARNING: no filter available!!\n");
3619 		return;
3620 	}
3621 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3622 	f->vlan = IXL_VLAN_ANY;
3623 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3624 	    | IXL_FILTER_MC);
3625 
3626 	return;
3627 }
3628 
3629 static void
3630 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3631 {
3632 
3633 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3634 }
3635 
3636 /*
3637 ** This routine adds macvlan filters
3638 */
3639 static void
3640 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3641 {
3642 	struct ixl_mac_filter	*f, *tmp;
3643 	struct ixl_pf		*pf;
3644 	device_t		dev;
3645 
3646 	DEBUGOUT("ixl_add_filter: begin");
3647 
3648 	pf = vsi->back;
3649 	dev = pf->dev;
3650 
3651 	/* Does one already exist */
3652 	f = ixl_find_filter(vsi, macaddr, vlan);
3653 	if (f != NULL)
3654 		return;
3655 	/*
3656 	** Is this the first vlan being registered, if so we
3657 	** need to remove the ANY filter that indicates we are
3658 	** not in a vlan, and replace that with a 0 filter.
3659 	*/
3660 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3661 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3662 		if (tmp != NULL) {
3663 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3664 			ixl_add_filter(vsi, macaddr, 0);
3665 		}
3666 	}
3667 
3668 	f = ixl_get_filter(vsi);
3669 	if (f == NULL) {
3670 		device_printf(dev, "WARNING: no filter available!!\n");
3671 		return;
3672 	}
3673 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3674 	f->vlan = vlan;
3675 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3676 	if (f->vlan != IXL_VLAN_ANY)
3677 		f->flags |= IXL_FILTER_VLAN;
3678 	else
3679 		vsi->num_macs++;
3680 
3681 	ixl_add_hw_filters(vsi, f->flags, 1);
3682 	return;
3683 }
3684 
3685 static void
3686 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3687 {
3688 	struct ixl_mac_filter *f;
3689 
3690 	f = ixl_find_filter(vsi, macaddr, vlan);
3691 	if (f == NULL)
3692 		return;
3693 
3694 	f->flags |= IXL_FILTER_DEL;
3695 	ixl_del_hw_filters(vsi, 1);
3696 	vsi->num_macs--;
3697 
3698 	/* Check if this is the last vlan removal */
3699 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3700 		/* Switch back to a non-vlan filter */
3701 		ixl_del_filter(vsi, macaddr, 0);
3702 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3703 	}
3704 	return;
3705 }
3706 
3707 /*
3708 ** Find the filter with both matching mac addr and vlan id
3709 */
3710 static struct ixl_mac_filter *
3711 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3712 {
3713 	struct ixl_mac_filter	*f;
3714 	bool			match = FALSE;
3715 
3716 	SLIST_FOREACH(f, &vsi->ftl, next) {
3717 		if (!cmp_etheraddr(f->macaddr, macaddr))
3718 			continue;
3719 		if (f->vlan == vlan) {
3720 			match = TRUE;
3721 			break;
3722 		}
3723 	}
3724 
3725 	if (!match)
3726 		f = NULL;
3727 	return (f);
3728 }
3729 
3730 /*
3731 ** This routine takes additions to the vsi filter
3732 ** table and creates an Admin Queue call to create
3733 ** the filters in the hardware.
3734 */
3735 static void
3736 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3737 {
3738 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3739 	struct ixl_mac_filter	*f;
3740 	struct ixl_pf		*pf;
3741 	struct i40e_hw		*hw;
3742 	device_t		dev;
3743 	int			err, j = 0;
3744 
3745 	pf = vsi->back;
3746 	dev = pf->dev;
3747 	hw = &pf->hw;
3748 	IXL_PF_LOCK_ASSERT(pf);
3749 
3750 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3751 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3752 	if (a == NULL) {
3753 		device_printf(dev, "add_hw_filters failed to get memory\n");
3754 		return;
3755 	}
3756 
3757 	/*
3758 	** Scan the filter list, each time we find one
3759 	** we add it to the admin queue array and turn off
3760 	** the add bit.
3761 	*/
3762 	SLIST_FOREACH(f, &vsi->ftl, next) {
3763 		if (f->flags == flags) {
3764 			b = &a[j]; // a pox on fvl long names :)
3765 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3766 			if (f->vlan == IXL_VLAN_ANY) {
3767 				b->vlan_tag = 0;
3768 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3769 			} else {
3770 				b->vlan_tag = f->vlan;
3771 				b->flags = 0;
3772 			}
3773 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3774 			f->flags &= ~IXL_FILTER_ADD;
3775 			j++;
3776 		}
3777 		if (j == cnt)
3778 			break;
3779 	}
3780 	if (j > 0) {
3781 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3782 		if (err)
3783 			device_printf(dev, "aq_add_macvlan err %d, "
3784 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3785 		else
3786 			vsi->hw_filters_add += j;
3787 	}
3788 	free(a, M_DEVBUF);
3789 	return;
3790 }
3791 
3792 /*
3793 ** This routine takes removals in the vsi filter
3794 ** table and creates an Admin Queue call to delete
3795 ** the filters in the hardware.
3796 */
3797 static void
3798 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3799 {
3800 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3801 	struct ixl_pf		*pf;
3802 	struct i40e_hw		*hw;
3803 	device_t		dev;
3804 	struct ixl_mac_filter	*f, *f_temp;
3805 	int			err, j = 0;
3806 
3807 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3808 
3809 	pf = vsi->back;
3810 	hw = &pf->hw;
3811 	dev = pf->dev;
3812 
3813 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3814 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3815 	if (d == NULL) {
3816 		printf("del hw filter failed to get memory\n");
3817 		return;
3818 	}
3819 
3820 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3821 		if (f->flags & IXL_FILTER_DEL) {
3822 			e = &d[j]; // a pox on fvl long names :)
3823 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3824 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3825 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3826 			/* delete entry from vsi list */
3827 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3828 			free(f, M_DEVBUF);
3829 			j++;
3830 		}
3831 		if (j == cnt)
3832 			break;
3833 	}
3834 	if (j > 0) {
3835 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3836 		/* NOTE: returns ENOENT every time but seems to work fine,
3837 		   so we'll ignore that specific error. */
3838 		// TODO: Does this still occur on current firmwares?
3839 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3840 			int sc = 0;
3841 			for (int i = 0; i < j; i++)
3842 				sc += (!d[i].error_code);
3843 			vsi->hw_filters_del += sc;
3844 			device_printf(dev,
3845 			    "Failed to remove %d/%d filters, aq error %d\n",
3846 			    j - sc, j, hw->aq.asq_last_status);
3847 		} else
3848 			vsi->hw_filters_del += j;
3849 	}
3850 	free(d, M_DEVBUF);
3851 
3852 	DEBUGOUT("ixl_del_hw_filters: end\n");
3853 	return;
3854 }
3855 
3856 static int
3857 ixl_enable_rings(struct ixl_vsi *vsi)
3858 {
3859 	struct ixl_pf	*pf = vsi->back;
3860 	struct i40e_hw	*hw = &pf->hw;
3861 	int		index, error;
3862 	u32		reg;
3863 
3864 	error = 0;
3865 	for (int i = 0; i < vsi->num_queues; i++) {
3866 		index = vsi->first_queue + i;
3867 		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3868 
3869 		reg = rd32(hw, I40E_QTX_ENA(index));
3870 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3871 		    I40E_QTX_ENA_QENA_STAT_MASK;
3872 		wr32(hw, I40E_QTX_ENA(index), reg);
3873 		/* Verify the enable took */
3874 		for (int j = 0; j < 10; j++) {
3875 			reg = rd32(hw, I40E_QTX_ENA(index));
3876 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3877 				break;
3878 			i40e_msec_delay(10);
3879 		}
3880 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3881 			device_printf(pf->dev, "TX queue %d disabled!\n",
3882 			    index);
3883 			error = ETIMEDOUT;
3884 		}
3885 
3886 		reg = rd32(hw, I40E_QRX_ENA(index));
3887 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3888 		    I40E_QRX_ENA_QENA_STAT_MASK;
3889 		wr32(hw, I40E_QRX_ENA(index), reg);
3890 		/* Verify the enable took */
3891 		for (int j = 0; j < 10; j++) {
3892 			reg = rd32(hw, I40E_QRX_ENA(index));
3893 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3894 				break;
3895 			i40e_msec_delay(10);
3896 		}
3897 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3898 			device_printf(pf->dev, "RX queue %d disabled!\n",
3899 			    index);
3900 			error = ETIMEDOUT;
3901 		}
3902 	}
3903 
3904 	return (error);
3905 }
3906 
3907 static int
3908 ixl_disable_rings(struct ixl_vsi *vsi)
3909 {
3910 	struct ixl_pf	*pf = vsi->back;
3911 	struct i40e_hw	*hw = &pf->hw;
3912 	int		index, error;
3913 	u32		reg;
3914 
3915 	error = 0;
3916 	for (int i = 0; i < vsi->num_queues; i++) {
3917 		index = vsi->first_queue + i;
3918 
3919 		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3920 		i40e_usec_delay(500);
3921 
3922 		reg = rd32(hw, I40E_QTX_ENA(index));
3923 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3924 		wr32(hw, I40E_QTX_ENA(index), reg);
3925 		/* Verify the disable took */
3926 		for (int j = 0; j < 10; j++) {
3927 			reg = rd32(hw, I40E_QTX_ENA(index));
3928 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3929 				break;
3930 			i40e_msec_delay(10);
3931 		}
3932 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3933 			device_printf(pf->dev, "TX queue %d still enabled!\n",
3934 			    index);
3935 			error = ETIMEDOUT;
3936 		}
3937 
3938 		reg = rd32(hw, I40E_QRX_ENA(index));
3939 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3940 		wr32(hw, I40E_QRX_ENA(index), reg);
3941 		/* Verify the disable took */
3942 		for (int j = 0; j < 10; j++) {
3943 			reg = rd32(hw, I40E_QRX_ENA(index));
3944 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3945 				break;
3946 			i40e_msec_delay(10);
3947 		}
3948 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3949 			device_printf(pf->dev, "RX queue %d still enabled!\n",
3950 			    index);
3951 			error = ETIMEDOUT;
3952 		}
3953 	}
3954 
3955 	return (error);
3956 }
3957 
3958 /**
3959  * ixl_handle_mdd_event
3960  *
3961  * Called from interrupt handler to identify possibly malicious vfs
3962  * (But also detects events from the PF, as well)
3963  **/
3964 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3965 {
3966 	struct i40e_hw *hw = &pf->hw;
3967 	device_t dev = pf->dev;
3968 	bool mdd_detected = false;
3969 	bool pf_mdd_detected = false;
3970 	u32 reg;
3971 
3972 	/* find what triggered the MDD event */
3973 	reg = rd32(hw, I40E_GL_MDET_TX);
3974 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3975 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3976 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3977 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3978 				I40E_GL_MDET_TX_EVENT_SHIFT;
3979 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3980 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3981 		device_printf(dev,
3982 			 "Malicious Driver Detection event 0x%02x"
3983 			 " on TX queue %d pf number 0x%02x\n",
3984 			 event, queue, pf_num);
3985 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3986 		mdd_detected = true;
3987 	}
3988 	reg = rd32(hw, I40E_GL_MDET_RX);
3989 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3990 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3991 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3992 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3993 				I40E_GL_MDET_RX_EVENT_SHIFT;
3994 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3995 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3996 		device_printf(dev,
3997 			 "Malicious Driver Detection event 0x%02x"
3998 			 " on RX queue %d of function 0x%02x\n",
3999 			 event, queue, func);
4000 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4001 		mdd_detected = true;
4002 	}
4003 
4004 	if (mdd_detected) {
4005 		reg = rd32(hw, I40E_PF_MDET_TX);
4006 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4007 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4008 			device_printf(dev,
4009 				 "MDD TX event is for this function 0x%08x",
4010 				 reg);
4011 			pf_mdd_detected = true;
4012 		}
4013 		reg = rd32(hw, I40E_PF_MDET_RX);
4014 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4015 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4016 			device_printf(dev,
4017 				 "MDD RX event is for this function 0x%08x",
4018 				 reg);
4019 			pf_mdd_detected = true;
4020 		}
4021 	}
4022 
4023 	/* re-enable mdd interrupt cause */
4024 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4025 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4026 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4027 	ixl_flush(hw);
4028 }
4029 
4030 static void
4031 ixl_enable_intr(struct ixl_vsi *vsi)
4032 {
4033 	struct i40e_hw		*hw = vsi->hw;
4034 	struct ixl_queue	*que = vsi->queues;
4035 
4036 	if (ixl_enable_msix) {
4037 		ixl_enable_adminq(hw);
4038 		for (int i = 0; i < vsi->num_queues; i++, que++)
4039 			ixl_enable_queue(hw, que->me);
4040 	} else
4041 		ixl_enable_legacy(hw);
4042 }
4043 
4044 static void
4045 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4046 {
4047 	struct i40e_hw		*hw = vsi->hw;
4048 	struct ixl_queue	*que = vsi->queues;
4049 
4050 	for (int i = 0; i < vsi->num_queues; i++, que++)
4051 		ixl_disable_queue(hw, que->me);
4052 }
4053 
4054 static void
4055 ixl_disable_intr(struct ixl_vsi *vsi)
4056 {
4057 	struct i40e_hw		*hw = vsi->hw;
4058 
4059 	if (ixl_enable_msix)
4060 		ixl_disable_adminq(hw);
4061 	else
4062 		ixl_disable_legacy(hw);
4063 }
4064 
4065 static void
4066 ixl_enable_adminq(struct i40e_hw *hw)
4067 {
4068 	u32		reg;
4069 
4070 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4071 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4072 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4073 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4074 	ixl_flush(hw);
4075 	return;
4076 }
4077 
4078 static void
4079 ixl_disable_adminq(struct i40e_hw *hw)
4080 {
4081 	u32		reg;
4082 
4083 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4084 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4085 
4086 	return;
4087 }
4088 
4089 static void
4090 ixl_enable_queue(struct i40e_hw *hw, int id)
4091 {
4092 	u32		reg;
4093 
4094 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4095 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4096 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4097 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4098 }
4099 
4100 static void
4101 ixl_disable_queue(struct i40e_hw *hw, int id)
4102 {
4103 	u32		reg;
4104 
4105 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4106 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4107 
4108 	return;
4109 }
4110 
4111 static void
4112 ixl_enable_legacy(struct i40e_hw *hw)
4113 {
4114 	u32		reg;
4115 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4116 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4117 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4118 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4119 }
4120 
4121 static void
4122 ixl_disable_legacy(struct i40e_hw *hw)
4123 {
4124 	u32		reg;
4125 
4126 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4127 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4128 
4129 	return;
4130 }
4131 
4132 static void
4133 ixl_update_stats_counters(struct ixl_pf *pf)
4134 {
4135 	struct i40e_hw	*hw = &pf->hw;
4136 	struct ixl_vsi	*vsi = &pf->vsi;
4137 	struct ixl_vf	*vf;
4138 
4139 	struct i40e_hw_port_stats *nsd = &pf->stats;
4140 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4141 
4142 	/* Update hw stats */
4143 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4144 			   pf->stat_offsets_loaded,
4145 			   &osd->crc_errors, &nsd->crc_errors);
4146 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4147 			   pf->stat_offsets_loaded,
4148 			   &osd->illegal_bytes, &nsd->illegal_bytes);
4149 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4150 			   I40E_GLPRT_GORCL(hw->port),
4151 			   pf->stat_offsets_loaded,
4152 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4153 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4154 			   I40E_GLPRT_GOTCL(hw->port),
4155 			   pf->stat_offsets_loaded,
4156 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4157 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4158 			   pf->stat_offsets_loaded,
4159 			   &osd->eth.rx_discards,
4160 			   &nsd->eth.rx_discards);
4161 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4162 			   I40E_GLPRT_UPRCL(hw->port),
4163 			   pf->stat_offsets_loaded,
4164 			   &osd->eth.rx_unicast,
4165 			   &nsd->eth.rx_unicast);
4166 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4167 			   I40E_GLPRT_UPTCL(hw->port),
4168 			   pf->stat_offsets_loaded,
4169 			   &osd->eth.tx_unicast,
4170 			   &nsd->eth.tx_unicast);
4171 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4172 			   I40E_GLPRT_MPRCL(hw->port),
4173 			   pf->stat_offsets_loaded,
4174 			   &osd->eth.rx_multicast,
4175 			   &nsd->eth.rx_multicast);
4176 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4177 			   I40E_GLPRT_MPTCL(hw->port),
4178 			   pf->stat_offsets_loaded,
4179 			   &osd->eth.tx_multicast,
4180 			   &nsd->eth.tx_multicast);
4181 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4182 			   I40E_GLPRT_BPRCL(hw->port),
4183 			   pf->stat_offsets_loaded,
4184 			   &osd->eth.rx_broadcast,
4185 			   &nsd->eth.rx_broadcast);
4186 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4187 			   I40E_GLPRT_BPTCL(hw->port),
4188 			   pf->stat_offsets_loaded,
4189 			   &osd->eth.tx_broadcast,
4190 			   &nsd->eth.tx_broadcast);
4191 
4192 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4193 			   pf->stat_offsets_loaded,
4194 			   &osd->tx_dropped_link_down,
4195 			   &nsd->tx_dropped_link_down);
4196 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4197 			   pf->stat_offsets_loaded,
4198 			   &osd->mac_local_faults,
4199 			   &nsd->mac_local_faults);
4200 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4201 			   pf->stat_offsets_loaded,
4202 			   &osd->mac_remote_faults,
4203 			   &nsd->mac_remote_faults);
4204 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4205 			   pf->stat_offsets_loaded,
4206 			   &osd->rx_length_errors,
4207 			   &nsd->rx_length_errors);
4208 
4209 	/* Flow control (LFC) stats */
4210 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4211 			   pf->stat_offsets_loaded,
4212 			   &osd->link_xon_rx, &nsd->link_xon_rx);
4213 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4214 			   pf->stat_offsets_loaded,
4215 			   &osd->link_xon_tx, &nsd->link_xon_tx);
4216 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4217 			   pf->stat_offsets_loaded,
4218 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4219 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4220 			   pf->stat_offsets_loaded,
4221 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4222 
4223 	/* Packet size stats rx */
4224 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4225 			   I40E_GLPRT_PRC64L(hw->port),
4226 			   pf->stat_offsets_loaded,
4227 			   &osd->rx_size_64, &nsd->rx_size_64);
4228 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4229 			   I40E_GLPRT_PRC127L(hw->port),
4230 			   pf->stat_offsets_loaded,
4231 			   &osd->rx_size_127, &nsd->rx_size_127);
4232 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4233 			   I40E_GLPRT_PRC255L(hw->port),
4234 			   pf->stat_offsets_loaded,
4235 			   &osd->rx_size_255, &nsd->rx_size_255);
4236 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4237 			   I40E_GLPRT_PRC511L(hw->port),
4238 			   pf->stat_offsets_loaded,
4239 			   &osd->rx_size_511, &nsd->rx_size_511);
4240 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4241 			   I40E_GLPRT_PRC1023L(hw->port),
4242 			   pf->stat_offsets_loaded,
4243 			   &osd->rx_size_1023, &nsd->rx_size_1023);
4244 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4245 			   I40E_GLPRT_PRC1522L(hw->port),
4246 			   pf->stat_offsets_loaded,
4247 			   &osd->rx_size_1522, &nsd->rx_size_1522);
4248 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4249 			   I40E_GLPRT_PRC9522L(hw->port),
4250 			   pf->stat_offsets_loaded,
4251 			   &osd->rx_size_big, &nsd->rx_size_big);
4252 
4253 	/* Packet size stats tx */
4254 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4255 			   I40E_GLPRT_PTC64L(hw->port),
4256 			   pf->stat_offsets_loaded,
4257 			   &osd->tx_size_64, &nsd->tx_size_64);
4258 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4259 			   I40E_GLPRT_PTC127L(hw->port),
4260 			   pf->stat_offsets_loaded,
4261 			   &osd->tx_size_127, &nsd->tx_size_127);
4262 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4263 			   I40E_GLPRT_PTC255L(hw->port),
4264 			   pf->stat_offsets_loaded,
4265 			   &osd->tx_size_255, &nsd->tx_size_255);
4266 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4267 			   I40E_GLPRT_PTC511L(hw->port),
4268 			   pf->stat_offsets_loaded,
4269 			   &osd->tx_size_511, &nsd->tx_size_511);
4270 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4271 			   I40E_GLPRT_PTC1023L(hw->port),
4272 			   pf->stat_offsets_loaded,
4273 			   &osd->tx_size_1023, &nsd->tx_size_1023);
4274 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4275 			   I40E_GLPRT_PTC1522L(hw->port),
4276 			   pf->stat_offsets_loaded,
4277 			   &osd->tx_size_1522, &nsd->tx_size_1522);
4278 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4279 			   I40E_GLPRT_PTC9522L(hw->port),
4280 			   pf->stat_offsets_loaded,
4281 			   &osd->tx_size_big, &nsd->tx_size_big);
4282 
4283 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4284 			   pf->stat_offsets_loaded,
4285 			   &osd->rx_undersize, &nsd->rx_undersize);
4286 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4287 			   pf->stat_offsets_loaded,
4288 			   &osd->rx_fragments, &nsd->rx_fragments);
4289 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4290 			   pf->stat_offsets_loaded,
4291 			   &osd->rx_oversize, &nsd->rx_oversize);
4292 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4293 			   pf->stat_offsets_loaded,
4294 			   &osd->rx_jabber, &nsd->rx_jabber);
4295 	pf->stat_offsets_loaded = true;
4296 	/* End hw stats */
4297 
4298 	/* Update vsi stats */
4299 	ixl_update_vsi_stats(vsi);
4300 
4301 	for (int i = 0; i < pf->num_vfs; i++) {
4302 		vf = &pf->vfs[i];
4303 		if (vf->vf_flags & VF_FLAG_ENABLED)
4304 			ixl_update_eth_stats(&pf->vfs[i].vsi);
4305 	}
4306 }
4307 
4308 /*
4309 ** Tasklet handler for MSIX Adminq interrupts
4310 **  - do outside interrupt since it might sleep
4311 */
4312 static void
4313 ixl_do_adminq(void *context, int pending)
4314 {
4315 	struct ixl_pf			*pf = context;
4316 	struct i40e_hw			*hw = &pf->hw;
4317 	struct ixl_vsi			*vsi = &pf->vsi;
4318 	struct i40e_arq_event_info	event;
4319 	i40e_status			ret;
4320 	u32				reg, loop = 0;
4321 	u16				opcode, result;
4322 
4323 	event.buf_len = IXL_AQ_BUF_SZ;
4324 	event.msg_buf = malloc(event.buf_len,
4325 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4326 	if (!event.msg_buf) {
4327 		printf("Unable to allocate adminq memory\n");
4328 		return;
4329 	}
4330 
4331 	IXL_PF_LOCK(pf);
4332 	/* clean and process any events */
4333 	do {
4334 		ret = i40e_clean_arq_element(hw, &event, &result);
4335 		if (ret)
4336 			break;
4337 		opcode = LE16_TO_CPU(event.desc.opcode);
4338 		switch (opcode) {
4339 		case i40e_aqc_opc_get_link_status:
4340 			ixl_link_event(pf, &event);
4341 			ixl_update_link_status(pf);
4342 			break;
4343 		case i40e_aqc_opc_send_msg_to_pf:
4344 #ifdef PCI_IOV
4345 			ixl_handle_vf_msg(pf, &event);
4346 #endif
4347 			break;
4348 		case i40e_aqc_opc_event_lan_overflow:
4349 			break;
4350 		default:
4351 #ifdef IXL_DEBUG
4352 			printf("AdminQ unknown event %x\n", opcode);
4353 #endif
4354 			break;
4355 		}
4356 
4357 	} while (result && (loop++ < IXL_ADM_LIMIT));
4358 
4359 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4360 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4361 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4362 	free(event.msg_buf, M_DEVBUF);
4363 
4364 	/*
4365 	 * If there are still messages to process, reschedule ourselves.
4366 	 * Otherwise, re-enable our interrupt and go to sleep.
4367 	 */
4368 	if (result > 0)
4369 		taskqueue_enqueue(pf->tq, &pf->adminq);
4370 	else
4371 		ixl_enable_intr(vsi);
4372 
4373 	IXL_PF_UNLOCK(pf);
4374 }
4375 
4376 static int
4377 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4378 {
4379 	struct ixl_pf	*pf;
4380 	int		error, input = 0;
4381 
4382 	error = sysctl_handle_int(oidp, &input, 0, req);
4383 
4384 	if (error || !req->newptr)
4385 		return (error);
4386 
4387 	if (input == 1) {
4388 		pf = (struct ixl_pf *)arg1;
4389 		ixl_print_debug_info(pf);
4390 	}
4391 
4392 	return (error);
4393 }
4394 
4395 static void
4396 ixl_print_debug_info(struct ixl_pf *pf)
4397 {
4398 	struct i40e_hw		*hw = &pf->hw;
4399 	struct ixl_vsi		*vsi = &pf->vsi;
4400 	struct ixl_queue	*que = vsi->queues;
4401 	struct rx_ring		*rxr = &que->rxr;
4402 	struct tx_ring		*txr = &que->txr;
4403 	u32			reg;
4404 
4405 
4406 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4407 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4408 	printf("RX next check = %x\n", rxr->next_check);
4409 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4410 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4411 	printf("TX desc avail = %x\n", txr->avail);
4412 
4413 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4414 	 printf("RX Bytes = %x\n", reg);
4415 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4416 	 printf("Port RX Bytes = %x\n", reg);
4417 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4418 	 printf("RX discard = %x\n", reg);
4419 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4420 	 printf("Port RX discard = %x\n", reg);
4421 
4422 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4423 	 printf("TX errors = %x\n", reg);
4424 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4425 	 printf("TX Bytes = %x\n", reg);
4426 
4427 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4428 	 printf("RX undersize = %x\n", reg);
4429 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4430 	 printf("RX fragments = %x\n", reg);
4431 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4432 	 printf("RX oversize = %x\n", reg);
4433 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4434 	 printf("RX length error = %x\n", reg);
4435 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4436 	 printf("mac remote fault = %x\n", reg);
4437 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4438 	 printf("mac local fault = %x\n", reg);
4439 }
4440 
4441 /**
4442  * Update VSI-specific ethernet statistics counters.
4443  **/
4444 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4445 {
4446 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4447 	struct i40e_hw *hw = &pf->hw;
4448 	struct i40e_eth_stats *es;
4449 	struct i40e_eth_stats *oes;
4450 	struct i40e_hw_port_stats *nsd;
4451 	u16 stat_idx = vsi->info.stat_counter_idx;
4452 
4453 	es = &vsi->eth_stats;
4454 	oes = &vsi->eth_stats_offsets;
4455 	nsd = &pf->stats;
4456 
4457 	/* Gather up the stats that the hw collects */
4458 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4459 			   vsi->stat_offsets_loaded,
4460 			   &oes->tx_errors, &es->tx_errors);
4461 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4462 			   vsi->stat_offsets_loaded,
4463 			   &oes->rx_discards, &es->rx_discards);
4464 
4465 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4466 			   I40E_GLV_GORCL(stat_idx),
4467 			   vsi->stat_offsets_loaded,
4468 			   &oes->rx_bytes, &es->rx_bytes);
4469 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4470 			   I40E_GLV_UPRCL(stat_idx),
4471 			   vsi->stat_offsets_loaded,
4472 			   &oes->rx_unicast, &es->rx_unicast);
4473 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4474 			   I40E_GLV_MPRCL(stat_idx),
4475 			   vsi->stat_offsets_loaded,
4476 			   &oes->rx_multicast, &es->rx_multicast);
4477 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4478 			   I40E_GLV_BPRCL(stat_idx),
4479 			   vsi->stat_offsets_loaded,
4480 			   &oes->rx_broadcast, &es->rx_broadcast);
4481 
4482 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4483 			   I40E_GLV_GOTCL(stat_idx),
4484 			   vsi->stat_offsets_loaded,
4485 			   &oes->tx_bytes, &es->tx_bytes);
4486 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4487 			   I40E_GLV_UPTCL(stat_idx),
4488 			   vsi->stat_offsets_loaded,
4489 			   &oes->tx_unicast, &es->tx_unicast);
4490 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4491 			   I40E_GLV_MPTCL(stat_idx),
4492 			   vsi->stat_offsets_loaded,
4493 			   &oes->tx_multicast, &es->tx_multicast);
4494 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4495 			   I40E_GLV_BPTCL(stat_idx),
4496 			   vsi->stat_offsets_loaded,
4497 			   &oes->tx_broadcast, &es->tx_broadcast);
4498 	vsi->stat_offsets_loaded = true;
4499 }
4500 
4501 static void
4502 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4503 {
4504 	struct ixl_pf		*pf;
4505 	struct ifnet		*ifp;
4506 	struct i40e_eth_stats	*es;
4507 	u64			tx_discards;
4508 
4509 	struct i40e_hw_port_stats *nsd;
4510 
4511 	pf = vsi->back;
4512 	ifp = vsi->ifp;
4513 	es = &vsi->eth_stats;
4514 	nsd = &pf->stats;
4515 
4516 	ixl_update_eth_stats(vsi);
4517 
4518 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4519 	for (int i = 0; i < vsi->num_queues; i++)
4520 		tx_discards += vsi->queues[i].txr.br->br_drops;
4521 
4522 	/* Update ifnet stats */
4523 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4524 	                   es->rx_multicast +
4525 			   es->rx_broadcast);
4526 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4527 	                   es->tx_multicast +
4528 			   es->tx_broadcast);
4529 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4530 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4531 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4532 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4533 
4534 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4535 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4536 	    nsd->rx_jabber);
4537 	IXL_SET_OERRORS(vsi, es->tx_errors);
4538 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4539 	IXL_SET_OQDROPS(vsi, tx_discards);
4540 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4541 	IXL_SET_COLLISIONS(vsi, 0);
4542 }
4543 
4544 /**
4545  * Reset all of the stats for the given pf
4546  **/
4547 void ixl_pf_reset_stats(struct ixl_pf *pf)
4548 {
4549 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4550 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4551 	pf->stat_offsets_loaded = false;
4552 }
4553 
4554 /**
4555  * Resets all stats of the given vsi
4556  **/
4557 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4558 {
4559 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4560 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4561 	vsi->stat_offsets_loaded = false;
4562 }
4563 
4564 /**
4565  * Read and update a 48 bit stat from the hw
4566  *
4567  * Since the device stats are not reset at PFReset, they likely will not
4568  * be zeroed when the driver starts.  We'll save the first values read
4569  * and use them as offsets to be subtracted from the raw values in order
4570  * to report stats that count from zero.
4571  **/
4572 static void
4573 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4574 	bool offset_loaded, u64 *offset, u64 *stat)
4575 {
4576 	u64 new_data;
4577 
4578 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4579 	new_data = rd64(hw, loreg);
4580 #else
4581 	/*
4582 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4583 	 * 10 don't support 8 byte bus reads/writes.
4584 	 */
4585 	new_data = rd32(hw, loreg);
4586 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4587 #endif
4588 
4589 	if (!offset_loaded)
4590 		*offset = new_data;
4591 	if (new_data >= *offset)
4592 		*stat = new_data - *offset;
4593 	else
4594 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4595 	*stat &= 0xFFFFFFFFFFFFULL;
4596 }
4597 
4598 /**
4599  * Read and update a 32 bit stat from the hw
4600  **/
4601 static void
4602 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4603 	bool offset_loaded, u64 *offset, u64 *stat)
4604 {
4605 	u32 new_data;
4606 
4607 	new_data = rd32(hw, reg);
4608 	if (!offset_loaded)
4609 		*offset = new_data;
4610 	if (new_data >= *offset)
4611 		*stat = (u32)(new_data - *offset);
4612 	else
4613 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4614 }
4615 
4616 /*
4617 ** Set flow control using sysctl:
4618 ** 	0 - off
4619 **	1 - rx pause
4620 **	2 - tx pause
4621 **	3 - full
4622 */
4623 static int
4624 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4625 {
4626 	/*
4627 	 * TODO: ensure flow control is disabled if
4628 	 * priority flow control is enabled
4629 	 *
4630 	 * TODO: ensure tx CRC by hardware should be enabled
4631 	 * if tx flow control is enabled.
4632 	 */
4633 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4634 	struct i40e_hw *hw = &pf->hw;
4635 	device_t dev = pf->dev;
4636 	int error = 0;
4637 	enum i40e_status_code aq_error = 0;
4638 	u8 fc_aq_err = 0;
4639 
4640 	/* Get request */
4641 	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4642 	if ((error) || (req->newptr == NULL))
4643 		return (error);
4644 	if (pf->fc < 0 || pf->fc > 3) {
4645 		device_printf(dev,
4646 		    "Invalid fc mode; valid modes are 0 through 3\n");
4647 		return (EINVAL);
4648 	}
4649 
4650 	/*
4651 	** Changing flow control mode currently does not work on
4652 	** 40GBASE-CR4 PHYs
4653 	*/
4654 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4655 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4656 		device_printf(dev, "Changing flow control mode unsupported"
4657 		    " on 40GBase-CR4 media.\n");
4658 		return (ENODEV);
4659 	}
4660 
4661 	/* Set fc ability for port */
4662 	hw->fc.requested_mode = pf->fc;
4663 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4664 	if (aq_error) {
4665 		device_printf(dev,
4666 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4667 		    __func__, aq_error, fc_aq_err);
4668 		return (EAGAIN);
4669 	}
4670 
4671 	return (0);
4672 }
4673 
4674 static int
4675 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4676 {
4677 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4678 	struct i40e_hw *hw = &pf->hw;
4679 	int error = 0, index = 0;
4680 
4681 	char *speeds[] = {
4682 		"Unknown",
4683 		"100M",
4684 		"1G",
4685 		"10G",
4686 		"40G",
4687 		"20G"
4688 	};
4689 
4690 	ixl_update_link_status(pf);
4691 
4692 	switch (hw->phy.link_info.link_speed) {
4693 	case I40E_LINK_SPEED_100MB:
4694 		index = 1;
4695 		break;
4696 	case I40E_LINK_SPEED_1GB:
4697 		index = 2;
4698 		break;
4699 	case I40E_LINK_SPEED_10GB:
4700 		index = 3;
4701 		break;
4702 	case I40E_LINK_SPEED_40GB:
4703 		index = 4;
4704 		break;
4705 	case I40E_LINK_SPEED_20GB:
4706 		index = 5;
4707 		break;
4708 	case I40E_LINK_SPEED_UNKNOWN:
4709 	default:
4710 		index = 0;
4711 		break;
4712 	}
4713 
4714 	error = sysctl_handle_string(oidp, speeds[index],
4715 	    strlen(speeds[index]), req);
4716 	return (error);
4717 }
4718 
4719 static int
4720 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4721 {
4722 	struct i40e_hw *hw = &pf->hw;
4723 	device_t dev = pf->dev;
4724 	struct i40e_aq_get_phy_abilities_resp abilities;
4725 	struct i40e_aq_set_phy_config config;
4726 	enum i40e_status_code aq_error = 0;
4727 
4728 	/* Get current capability information */
4729 	aq_error = i40e_aq_get_phy_capabilities(hw,
4730 	    FALSE, FALSE, &abilities, NULL);
4731 	if (aq_error) {
4732 		device_printf(dev,
4733 		    "%s: Error getting phy capabilities %d,"
4734 		    " aq error: %d\n", __func__, aq_error,
4735 		    hw->aq.asq_last_status);
4736 		return (EAGAIN);
4737 	}
4738 
4739 	/* Prepare new config */
4740 	bzero(&config, sizeof(config));
4741 	config.phy_type = abilities.phy_type;
4742 	config.abilities = abilities.abilities
4743 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4744 	config.eee_capability = abilities.eee_capability;
4745 	config.eeer = abilities.eeer_val;
4746 	config.low_power_ctrl = abilities.d3_lpan;
4747 	/* Translate into aq cmd link_speed */
4748 	if (speeds & 0x8)
4749 		config.link_speed |= I40E_LINK_SPEED_20GB;
4750 	if (speeds & 0x4)
4751 		config.link_speed |= I40E_LINK_SPEED_10GB;
4752 	if (speeds & 0x2)
4753 		config.link_speed |= I40E_LINK_SPEED_1GB;
4754 	if (speeds & 0x1)
4755 		config.link_speed |= I40E_LINK_SPEED_100MB;
4756 
4757 	/* Do aq command & restart link */
4758 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4759 	if (aq_error) {
4760 		device_printf(dev,
4761 		    "%s: Error setting new phy config %d,"
4762 		    " aq error: %d\n", __func__, aq_error,
4763 		    hw->aq.asq_last_status);
4764 		return (EAGAIN);
4765 	}
4766 
4767 	/*
4768 	** This seems a bit heavy handed, but we
4769 	** need to get a reinit on some devices
4770 	*/
4771 	IXL_PF_LOCK(pf);
4772 	ixl_stop(pf);
4773 	ixl_init_locked(pf);
4774 	IXL_PF_UNLOCK(pf);
4775 
4776 	return (0);
4777 }
4778 
4779 /*
4780 ** Control link advertise speed:
4781 **	Flags:
4782 **	0x1 - advertise 100 Mb
4783 **	0x2 - advertise 1G
4784 **	0x4 - advertise 10G
4785 **	0x8 - advertise 20G
4786 **
4787 ** Does not work on 40G devices.
4788 */
4789 static int
4790 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4791 {
4792 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4793 	struct i40e_hw *hw = &pf->hw;
4794 	device_t dev = pf->dev;
4795 	int requested_ls = 0;
4796 	int error = 0;
4797 
4798 	/*
4799 	** FW doesn't support changing advertised speed
4800 	** for 40G devices; speed is always 40G.
4801 	*/
4802 	if (i40e_is_40G_device(hw->device_id))
4803 		return (ENODEV);
4804 
4805 	/* Read in new mode */
4806 	requested_ls = pf->advertised_speed;
4807 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4808 	if ((error) || (req->newptr == NULL))
4809 		return (error);
4810 	/* Check for sane value */
4811 	if (requested_ls < 0x1 || requested_ls > 0xE) {
4812 		device_printf(dev, "Invalid advertised speed; "
4813 		    "valid modes are 0x1 through 0xE\n");
4814 		return (EINVAL);
4815 	}
4816 	/* Then check for validity based on adapter type */
4817 	switch (hw->device_id) {
4818 	case I40E_DEV_ID_10G_BASE_T:
4819 		if (requested_ls & 0x8) {
4820 			device_printf(dev,
4821 			    "20Gbs speed not supported on this device.\n");
4822 			return (EINVAL);
4823 		}
4824 		break;
4825 	case I40E_DEV_ID_20G_KR2:
4826 		if (requested_ls & 0x1) {
4827 			device_printf(dev,
4828 			    "100Mbs speed not supported on this device.\n");
4829 			return (EINVAL);
4830 		}
4831 		break;
4832 	default:
4833 		if (requested_ls & ~0x6) {
4834 			device_printf(dev,
4835 			    "Only 1/10Gbs speeds are supported on this device.\n");
4836 			return (EINVAL);
4837 		}
4838 		break;
4839 	}
4840 
4841 	/* Exit if no change */
4842 	if (pf->advertised_speed == requested_ls)
4843 		return (0);
4844 
4845 	error = ixl_set_advertised_speeds(pf, requested_ls);
4846 	if (error)
4847 		return (error);
4848 
4849 	pf->advertised_speed = requested_ls;
4850 	ixl_update_link_status(pf);
4851 	return (0);
4852 }
4853 
4854 /*
4855 ** Get the width and transaction speed of
4856 ** the bus this adapter is plugged into.
4857 */
4858 static u16
4859 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4860 {
4861         u16                     link;
4862         u32                     offset;
4863 
4864 
4865         /* Get the PCI Express Capabilities offset */
4866         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4867 
4868         /* ...and read the Link Status Register */
4869         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4870 
4871         switch (link & I40E_PCI_LINK_WIDTH) {
4872         case I40E_PCI_LINK_WIDTH_1:
4873                 hw->bus.width = i40e_bus_width_pcie_x1;
4874                 break;
4875         case I40E_PCI_LINK_WIDTH_2:
4876                 hw->bus.width = i40e_bus_width_pcie_x2;
4877                 break;
4878         case I40E_PCI_LINK_WIDTH_4:
4879                 hw->bus.width = i40e_bus_width_pcie_x4;
4880                 break;
4881         case I40E_PCI_LINK_WIDTH_8:
4882                 hw->bus.width = i40e_bus_width_pcie_x8;
4883                 break;
4884         default:
4885                 hw->bus.width = i40e_bus_width_unknown;
4886                 break;
4887         }
4888 
4889         switch (link & I40E_PCI_LINK_SPEED) {
4890         case I40E_PCI_LINK_SPEED_2500:
4891                 hw->bus.speed = i40e_bus_speed_2500;
4892                 break;
4893         case I40E_PCI_LINK_SPEED_5000:
4894                 hw->bus.speed = i40e_bus_speed_5000;
4895                 break;
4896         case I40E_PCI_LINK_SPEED_8000:
4897                 hw->bus.speed = i40e_bus_speed_8000;
4898                 break;
4899         default:
4900                 hw->bus.speed = i40e_bus_speed_unknown;
4901                 break;
4902         }
4903 
4904 
4905         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4906             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4907             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4908             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4909             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4910             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4911             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4912             ("Unknown"));
4913 
4914         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4915             (hw->bus.speed < i40e_bus_speed_8000)) {
4916                 device_printf(dev, "PCI-Express bandwidth available"
4917                     " for this device\n     may be insufficient for"
4918                     " optimal performance.\n");
4919                 device_printf(dev, "For expected performance a x8 "
4920                     "PCIE Gen3 slot is required.\n");
4921         }
4922 
4923         return (link);
4924 }
4925 
4926 static int
4927 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4928 {
4929 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4930 	struct i40e_hw	*hw = &pf->hw;
4931 	char		buf[32];
4932 
4933 	snprintf(buf, sizeof(buf),
4934 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4935 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4936 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4937 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4938 	    IXL_NVM_VERSION_HI_SHIFT,
4939 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4940 	    IXL_NVM_VERSION_LO_SHIFT,
4941 	    hw->nvm.eetrack);
4942 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4943 }
4944 
4945 
4946 #ifdef IXL_DEBUG_SYSCTL
4947 static int
4948 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4949 {
4950 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4951 	struct i40e_hw *hw = &pf->hw;
4952 	struct i40e_link_status link_status;
4953 	char buf[512];
4954 
4955 	enum i40e_status_code aq_error = 0;
4956 
4957 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4958 	if (aq_error) {
4959 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4960 		return (EPERM);
4961 	}
4962 
4963 	sprintf(buf, "\n"
4964 	    "PHY Type : %#04x\n"
4965 	    "Speed    : %#04x\n"
4966 	    "Link info: %#04x\n"
4967 	    "AN info  : %#04x\n"
4968 	    "Ext info : %#04x",
4969 	    link_status.phy_type, link_status.link_speed,
4970 	    link_status.link_info, link_status.an_info,
4971 	    link_status.ext_info);
4972 
4973 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4974 }
4975 
4976 static int
4977 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4978 {
4979 	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
4980 	struct i40e_hw		*hw = &pf->hw;
4981 	char			buf[512];
4982 	enum i40e_status_code	aq_error = 0;
4983 
4984 	struct i40e_aq_get_phy_abilities_resp abilities;
4985 
4986 	aq_error = i40e_aq_get_phy_capabilities(hw,
4987 	    TRUE, FALSE, &abilities, NULL);
4988 	if (aq_error) {
4989 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4990 		return (EPERM);
4991 	}
4992 
4993 	sprintf(buf, "\n"
4994 	    "PHY Type : %#010x\n"
4995 	    "Speed    : %#04x\n"
4996 	    "Abilities: %#04x\n"
4997 	    "EEE cap  : %#06x\n"
4998 	    "EEER reg : %#010x\n"
4999 	    "D3 Lpan  : %#04x",
5000 	    abilities.phy_type, abilities.link_speed,
5001 	    abilities.abilities, abilities.eee_capability,
5002 	    abilities.eeer_val, abilities.d3_lpan);
5003 
5004 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5005 }
5006 
5007 static int
5008 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5009 {
5010 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5011 	struct ixl_vsi *vsi = &pf->vsi;
5012 	struct ixl_mac_filter *f;
5013 	char *buf, *buf_i;
5014 
5015 	int error = 0;
5016 	int ftl_len = 0;
5017 	int ftl_counter = 0;
5018 	int buf_len = 0;
5019 	int entry_len = 42;
5020 
5021 	SLIST_FOREACH(f, &vsi->ftl, next) {
5022 		ftl_len++;
5023 	}
5024 
5025 	if (ftl_len < 1) {
5026 		sysctl_handle_string(oidp, "(none)", 6, req);
5027 		return (0);
5028 	}
5029 
5030 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5031 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5032 
5033 	sprintf(buf_i++, "\n");
5034 	SLIST_FOREACH(f, &vsi->ftl, next) {
5035 		sprintf(buf_i,
5036 		    MAC_FORMAT ", vlan %4d, flags %#06x",
5037 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5038 		buf_i += entry_len;
5039 		/* don't print '\n' for last entry */
5040 		if (++ftl_counter != ftl_len) {
5041 			sprintf(buf_i, "\n");
5042 			buf_i++;
5043 		}
5044 	}
5045 
5046 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5047 	if (error)
5048 		printf("sysctl error: %d\n", error);
5049 	free(buf, M_DEVBUF);
5050 	return error;
5051 }
5052 
5053 #define IXL_SW_RES_SIZE 0x14
5054 static int
5055 ixl_res_alloc_cmp(const void *a, const void *b)
5056 {
5057 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5058 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5059 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5060 
5061 	return ((int)one->resource_type - (int)two->resource_type);
5062 }
5063 
5064 static int
5065 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5066 {
5067 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5068 	struct i40e_hw *hw = &pf->hw;
5069 	device_t dev = pf->dev;
5070 	struct sbuf *buf;
5071 	int error = 0;
5072 
5073 	u8 num_entries;
5074 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5075 
5076 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5077 	if (!buf) {
5078 		device_printf(dev, "Could not allocate sbuf for output.\n");
5079 		return (ENOMEM);
5080 	}
5081 
5082 	bzero(resp, sizeof(resp));
5083 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5084 				resp,
5085 				IXL_SW_RES_SIZE,
5086 				NULL);
5087 	if (error) {
5088 		device_printf(dev,
5089 		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5090 		    __func__, error, hw->aq.asq_last_status);
5091 		sbuf_delete(buf);
5092 		return error;
5093 	}
5094 
5095 	/* Sort entries by type for display */
5096 	qsort(resp, num_entries,
5097 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5098 	    &ixl_res_alloc_cmp);
5099 
5100 	sbuf_cat(buf, "\n");
5101 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5102 	sbuf_printf(buf,
5103 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5104 	    "     | (this)     | (all) | (this) | (all)       \n");
5105 	for (int i = 0; i < num_entries; i++) {
5106 		sbuf_printf(buf,
5107 		    "%#4x | %10d   %5d   %6d   %12d",
5108 		    resp[i].resource_type,
5109 		    resp[i].guaranteed,
5110 		    resp[i].total,
5111 		    resp[i].used,
5112 		    resp[i].total_unalloced);
5113 		if (i < num_entries - 1)
5114 			sbuf_cat(buf, "\n");
5115 	}
5116 
5117 	error = sbuf_finish(buf);
5118 	if (error) {
5119 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5120 		sbuf_delete(buf);
5121 		return error;
5122 	}
5123 
5124 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5125 	if (error)
5126 		device_printf(dev, "sysctl error: %d\n", error);
5127 	sbuf_delete(buf);
5128 	return error;
5129 }
5130 
5131 /*
5132 ** Caller must init and delete sbuf; this function will clear and
5133 ** finish it for caller.
5134 */
5135 static char *
5136 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5137 {
5138 	sbuf_clear(s);
5139 
5140 	if (seid == 0 && uplink)
5141 		sbuf_cat(s, "Network");
5142 	else if (seid == 0)
5143 		sbuf_cat(s, "Host");
5144 	else if (seid == 1)
5145 		sbuf_cat(s, "EMP");
5146 	else if (seid <= 5)
5147 		sbuf_printf(s, "MAC %d", seid - 2);
5148 	else if (seid <= 15)
5149 		sbuf_cat(s, "Reserved");
5150 	else if (seid <= 31)
5151 		sbuf_printf(s, "PF %d", seid - 16);
5152 	else if (seid <= 159)
5153 		sbuf_printf(s, "VF %d", seid - 32);
5154 	else if (seid <= 287)
5155 		sbuf_cat(s, "Reserved");
5156 	else if (seid <= 511)
5157 		sbuf_cat(s, "Other"); // for other structures
5158 	else if (seid <= 895)
5159 		sbuf_printf(s, "VSI %d", seid - 512);
5160 	else if (seid <= 1023)
5161 		sbuf_printf(s, "Reserved");
5162 	else
5163 		sbuf_cat(s, "Invalid");
5164 
5165 	sbuf_finish(s);
5166 	return sbuf_data(s);
5167 }
5168 
5169 static int
5170 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5171 {
5172 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5173 	struct i40e_hw *hw = &pf->hw;
5174 	device_t dev = pf->dev;
5175 	struct sbuf *buf;
5176 	struct sbuf *nmbuf;
5177 	int error = 0;
5178 	u8 aq_buf[I40E_AQ_LARGE_BUF];
5179 
5180 	u16 next = 0;
5181 	struct i40e_aqc_get_switch_config_resp *sw_config;
5182 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5183 
5184 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5185 	if (!buf) {
5186 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5187 		return (ENOMEM);
5188 	}
5189 
5190 	error = i40e_aq_get_switch_config(hw, sw_config,
5191 	    sizeof(aq_buf), &next, NULL);
5192 	if (error) {
5193 		device_printf(dev,
5194 		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5195 		    __func__, error, hw->aq.asq_last_status);
5196 		sbuf_delete(buf);
5197 		return error;
5198 	}
5199 
5200 	nmbuf = sbuf_new_auto();
5201 	if (!nmbuf) {
5202 		device_printf(dev, "Could not allocate sbuf for name output.\n");
5203 		return (ENOMEM);
5204 	}
5205 
5206 	sbuf_cat(buf, "\n");
5207 	// Assuming <= 255 elements in switch
5208 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5209 	/* Exclude:
5210 	** Revision -- all elements are revision 1 for now
5211 	*/
5212 	sbuf_printf(buf,
5213 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5214 	    "                |          |          | (uplink)\n");
5215 	for (int i = 0; i < sw_config->header.num_reported; i++) {
5216 		// "%4d (%8s) | %8s   %8s   %#8x",
5217 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5218 		sbuf_cat(buf, " ");
5219 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5220 		    sw_config->element[i].seid, false));
5221 		sbuf_cat(buf, " | ");
5222 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5223 		    sw_config->element[i].uplink_seid, true));
5224 		sbuf_cat(buf, "   ");
5225 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5226 		    sw_config->element[i].downlink_seid, false));
5227 		sbuf_cat(buf, "   ");
5228 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5229 		if (i < sw_config->header.num_reported - 1)
5230 			sbuf_cat(buf, "\n");
5231 	}
5232 	sbuf_delete(nmbuf);
5233 
5234 	error = sbuf_finish(buf);
5235 	if (error) {
5236 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5237 		sbuf_delete(buf);
5238 		return error;
5239 	}
5240 
5241 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5242 	if (error)
5243 		device_printf(dev, "sysctl error: %d\n", error);
5244 	sbuf_delete(buf);
5245 
5246 	return (error);
5247 }
5248 #endif /* IXL_DEBUG_SYSCTL */
5249 
5250 
5251 #ifdef PCI_IOV
5252 static int
5253 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5254 {
5255 	struct i40e_hw *hw;
5256 	struct ixl_vsi *vsi;
5257 	struct i40e_vsi_context vsi_ctx;
5258 	int i;
5259 	uint16_t first_queue;
5260 	enum i40e_status_code code;
5261 
5262 	hw = &pf->hw;
5263 	vsi = &pf->vsi;
5264 
5265 	vsi_ctx.pf_num = hw->pf_id;
5266 	vsi_ctx.uplink_seid = pf->veb_seid;
5267 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5268 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5269 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5270 
5271 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5272 
5273 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5274 	vsi_ctx.info.switch_id = htole16(0);
5275 
5276 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5277 	vsi_ctx.info.sec_flags = 0;
5278 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5279 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5280 
5281 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5282 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5283 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5284 
5285 	vsi_ctx.info.valid_sections |=
5286 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5287 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5288 	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5289 	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5290 		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5291 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5292 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5293 
5294 	vsi_ctx.info.tc_mapping[0] = htole16(
5295 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5296 	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5297 
5298 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5299 	if (code != I40E_SUCCESS)
5300 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5301 	vf->vsi.seid = vsi_ctx.seid;
5302 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5303 	vf->vsi.first_queue = first_queue;
5304 	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5305 
5306 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5307 	if (code != I40E_SUCCESS)
5308 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5309 
5310 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5311 	if (code != I40E_SUCCESS) {
5312 		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5313 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5314 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5315 	}
5316 
5317 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5318 	return (0);
5319 }
5320 
5321 static int
5322 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5323 {
5324 	struct i40e_hw *hw;
5325 	int error;
5326 
5327 	hw = &pf->hw;
5328 
5329 	error = ixl_vf_alloc_vsi(pf, vf);
5330 	if (error != 0)
5331 		return (error);
5332 
5333 	vf->vsi.hw_filters_add = 0;
5334 	vf->vsi.hw_filters_del = 0;
5335 	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5336 	ixl_reconfigure_filters(&vf->vsi);
5337 
5338 	return (0);
5339 }
5340 
5341 static void
5342 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5343     uint32_t val)
5344 {
5345 	uint32_t qtable;
5346 	int index, shift;
5347 
5348 	/*
5349 	 * Two queues are mapped in a single register, so we have to do some
5350 	 * gymnastics to convert the queue number into a register index and
5351 	 * shift.
5352 	 */
5353 	index = qnum / 2;
5354 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5355 
5356 	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5357 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5358 	qtable |= val << shift;
5359 	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5360 }
5361 
5362 static void
5363 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5364 {
5365 	struct i40e_hw *hw;
5366 	uint32_t qtable;
5367 	int i;
5368 
5369 	hw = &pf->hw;
5370 
5371 	/*
5372 	 * Contiguous mappings aren't actually supported by the hardware,
5373 	 * so we have to use non-contiguous mappings.
5374 	 */
5375 	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5376 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5377 
5378 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5379 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5380 
5381 	for (i = 0; i < vf->vsi.num_queues; i++) {
5382 		qtable = (vf->vsi.first_queue + i) <<
5383 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5384 
5385 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5386 	}
5387 
5388 	/* Map queues allocated to VF to its VSI. */
5389 	for (i = 0; i < vf->vsi.num_queues; i++)
5390 		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5391 
5392 	/* Set rest of VSI queues as unused. */
5393 	for (; i < IXL_MAX_VSI_QUEUES; i++)
5394 		ixl_vf_map_vsi_queue(hw, vf, i,
5395 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5396 
5397 	ixl_flush(hw);
5398 }
5399 
5400 static void
5401 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5402 {
5403 	struct i40e_hw *hw;
5404 
5405 	hw = &pf->hw;
5406 
5407 	if (vsi->seid == 0)
5408 		return;
5409 
5410 	i40e_aq_delete_element(hw, vsi->seid, NULL);
5411 }
5412 
5413 static void
5414 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5415 {
5416 
5417 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5418 	ixl_flush(hw);
5419 }
5420 
5421 static void
5422 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5423 {
5424 
5425 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5426 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5427 	ixl_flush(hw);
5428 }
5429 
5430 static void
5431 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5432 {
5433 	struct i40e_hw *hw;
5434 	uint32_t vfint_reg, vpint_reg;
5435 	int i;
5436 
5437 	hw = &pf->hw;
5438 
5439 	ixl_vf_vsi_release(pf, &vf->vsi);
5440 
5441 	/* Index 0 has a special register. */
5442 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5443 
5444 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5445 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5446 		ixl_vf_disable_queue_intr(hw, vfint_reg);
5447 	}
5448 
5449 	/* Index 0 has a special register. */
5450 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5451 
5452 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5453 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5454 		ixl_vf_unregister_intr(hw, vpint_reg);
5455 	}
5456 
5457 	vf->vsi.num_queues = 0;
5458 }
5459 
5460 static int
5461 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5462 {
5463 	struct i40e_hw *hw;
5464 	int i;
5465 	uint16_t global_vf_num;
5466 	uint32_t ciad;
5467 
5468 	hw = &pf->hw;
5469 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5470 
5471 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5472 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5473 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5474 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5475 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5476 			return (0);
5477 		DELAY(1);
5478 	}
5479 
5480 	return (ETIMEDOUT);
5481 }
5482 
5483 static void
5484 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5485 {
5486 	struct i40e_hw *hw;
5487 	uint32_t vfrtrig;
5488 
5489 	hw = &pf->hw;
5490 
5491 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5492 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5493 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5494 	ixl_flush(hw);
5495 
5496 	ixl_reinit_vf(pf, vf);
5497 }
5498 
5499 static void
5500 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5501 {
5502 	struct i40e_hw *hw;
5503 	uint32_t vfrstat, vfrtrig;
5504 	int i, error;
5505 
5506 	hw = &pf->hw;
5507 
5508 	error = ixl_flush_pcie(pf, vf);
5509 	if (error != 0)
5510 		device_printf(pf->dev,
5511 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5512 		    vf->vf_num);
5513 
5514 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5515 		DELAY(10);
5516 
5517 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5518 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5519 			break;
5520 	}
5521 
5522 	if (i == IXL_VF_RESET_TIMEOUT)
5523 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5524 
5525 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5526 
5527 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5528 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5529 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5530 
5531 	if (vf->vsi.seid != 0)
5532 		ixl_disable_rings(&vf->vsi);
5533 
5534 	ixl_vf_release_resources(pf, vf);
5535 	ixl_vf_setup_vsi(pf, vf);
5536 	ixl_vf_map_queues(pf, vf);
5537 
5538 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5539 	ixl_flush(hw);
5540 }
5541 
5542 static const char *
5543 ixl_vc_opcode_str(uint16_t op)
5544 {
5545 
5546 	switch (op) {
5547 	case I40E_VIRTCHNL_OP_VERSION:
5548 		return ("VERSION");
5549 	case I40E_VIRTCHNL_OP_RESET_VF:
5550 		return ("RESET_VF");
5551 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5552 		return ("GET_VF_RESOURCES");
5553 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5554 		return ("CONFIG_TX_QUEUE");
5555 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5556 		return ("CONFIG_RX_QUEUE");
5557 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5558 		return ("CONFIG_VSI_QUEUES");
5559 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5560 		return ("CONFIG_IRQ_MAP");
5561 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5562 		return ("ENABLE_QUEUES");
5563 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5564 		return ("DISABLE_QUEUES");
5565 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5566 		return ("ADD_ETHER_ADDRESS");
5567 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5568 		return ("DEL_ETHER_ADDRESS");
5569 	case I40E_VIRTCHNL_OP_ADD_VLAN:
5570 		return ("ADD_VLAN");
5571 	case I40E_VIRTCHNL_OP_DEL_VLAN:
5572 		return ("DEL_VLAN");
5573 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5574 		return ("CONFIG_PROMISCUOUS_MODE");
5575 	case I40E_VIRTCHNL_OP_GET_STATS:
5576 		return ("GET_STATS");
5577 	case I40E_VIRTCHNL_OP_FCOE:
5578 		return ("FCOE");
5579 	case I40E_VIRTCHNL_OP_EVENT:
5580 		return ("EVENT");
5581 	default:
5582 		return ("UNKNOWN");
5583 	}
5584 }
5585 
5586 static int
5587 ixl_vc_opcode_level(uint16_t opcode)
5588 {
5589 
5590 	switch (opcode) {
5591 	case I40E_VIRTCHNL_OP_GET_STATS:
5592 		return (10);
5593 	default:
5594 		return (5);
5595 	}
5596 }
5597 
5598 static void
5599 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5600     enum i40e_status_code status, void *msg, uint16_t len)
5601 {
5602 	struct i40e_hw *hw;
5603 	int global_vf_id;
5604 
5605 	hw = &pf->hw;
5606 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5607 
5608 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5609 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5610 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5611 
5612 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5613 }
5614 
5615 static void
5616 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5617 {
5618 
5619 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5620 }
5621 
5622 static void
5623 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5624     enum i40e_status_code status, const char *file, int line)
5625 {
5626 
5627 	I40E_VC_DEBUG(pf, 1,
5628 	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5629 	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5630 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5631 }
5632 
5633 static void
5634 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5635     uint16_t msg_size)
5636 {
5637 	struct i40e_virtchnl_version_info reply;
5638 
5639 	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5640 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5641 		    I40E_ERR_PARAM);
5642 		return;
5643 	}
5644 
5645 	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5646 	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5647 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5648 	    sizeof(reply));
5649 }
5650 
5651 static void
5652 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5653     uint16_t msg_size)
5654 {
5655 
5656 	if (msg_size != 0) {
5657 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5658 		    I40E_ERR_PARAM);
5659 		return;
5660 	}
5661 
5662 	ixl_reset_vf(pf, vf);
5663 
5664 	/* No response to a reset message. */
5665 }
5666 
5667 static void
5668 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5669     uint16_t msg_size)
5670 {
5671 	struct i40e_virtchnl_vf_resource reply;
5672 
5673 	if (msg_size != 0) {
5674 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5675 		    I40E_ERR_PARAM);
5676 		return;
5677 	}
5678 
5679 	bzero(&reply, sizeof(reply));
5680 
5681 	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5682 
5683 	reply.num_vsis = 1;
5684 	reply.num_queue_pairs = vf->vsi.num_queues;
5685 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5686 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5687 	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5688 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5689 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5690 
5691 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5692 	    I40E_SUCCESS, &reply, sizeof(reply));
5693 }
5694 
5695 static int
5696 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5697     struct i40e_virtchnl_txq_info *info)
5698 {
5699 	struct i40e_hw *hw;
5700 	struct i40e_hmc_obj_txq txq;
5701 	uint16_t global_queue_num, global_vf_num;
5702 	enum i40e_status_code status;
5703 	uint32_t qtx_ctl;
5704 
5705 	hw = &pf->hw;
5706 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5707 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5708 	bzero(&txq, sizeof(txq));
5709 
5710 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5711 	if (status != I40E_SUCCESS)
5712 		return (EINVAL);
5713 
5714 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5715 
5716 	txq.head_wb_ena = info->headwb_enabled;
5717 	txq.head_wb_addr = info->dma_headwb_addr;
5718 	txq.qlen = info->ring_len;
5719 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5720 	txq.rdylist_act = 0;
5721 
5722 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5723 	if (status != I40E_SUCCESS)
5724 		return (EINVAL);
5725 
5726 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5727 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5728 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5729 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5730 	ixl_flush(hw);
5731 
5732 	return (0);
5733 }
5734 
5735 static int
5736 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5737     struct i40e_virtchnl_rxq_info *info)
5738 {
5739 	struct i40e_hw *hw;
5740 	struct i40e_hmc_obj_rxq rxq;
5741 	uint16_t global_queue_num;
5742 	enum i40e_status_code status;
5743 
5744 	hw = &pf->hw;
5745 	global_queue_num = vf->vsi.first_queue + info->queue_id;
5746 	bzero(&rxq, sizeof(rxq));
5747 
5748 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5749 		return (EINVAL);
5750 
5751 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5752 	    info->max_pkt_size < ETHER_MIN_LEN)
5753 		return (EINVAL);
5754 
5755 	if (info->splithdr_enabled) {
5756 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5757 			return (EINVAL);
5758 
5759 		rxq.hsplit_0 = info->rx_split_pos &
5760 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5761 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5762 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5763 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5764 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5765 
5766 		rxq.dtype = 2;
5767 	}
5768 
5769 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5770 	if (status != I40E_SUCCESS)
5771 		return (EINVAL);
5772 
5773 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5774 	rxq.qlen = info->ring_len;
5775 
5776 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5777 
5778 	rxq.dsize = 1;
5779 	rxq.crcstrip = 1;
5780 	rxq.l2tsel = 1;
5781 
5782 	rxq.rxmax = info->max_pkt_size;
5783 	rxq.tphrdesc_ena = 1;
5784 	rxq.tphwdesc_ena = 1;
5785 	rxq.tphdata_ena = 1;
5786 	rxq.tphhead_ena = 1;
5787 	rxq.lrxqthresh = 2;
5788 	rxq.prefena = 1;
5789 
5790 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5791 	if (status != I40E_SUCCESS)
5792 		return (EINVAL);
5793 
5794 	return (0);
5795 }
5796 
5797 static void
5798 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5799     uint16_t msg_size)
5800 {
5801 	struct i40e_virtchnl_vsi_queue_config_info *info;
5802 	struct i40e_virtchnl_queue_pair_info *pair;
5803 	int i;
5804 
5805 	if (msg_size < sizeof(*info)) {
5806 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5807 		    I40E_ERR_PARAM);
5808 		return;
5809 	}
5810 
5811 	info = msg;
5812 	if (info->num_queue_pairs == 0) {
5813 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5814 		    I40E_ERR_PARAM);
5815 		return;
5816 	}
5817 
5818 	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5819 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5820 		    I40E_ERR_PARAM);
5821 		return;
5822 	}
5823 
5824 	if (info->vsi_id != vf->vsi.vsi_num) {
5825 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5826 		    I40E_ERR_PARAM);
5827 		return;
5828 	}
5829 
5830 	for (i = 0; i < info->num_queue_pairs; i++) {
5831 		pair = &info->qpair[i];
5832 
5833 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5834 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5835 		    pair->txq.queue_id != pair->rxq.queue_id ||
5836 		    pair->txq.queue_id >= vf->vsi.num_queues) {
5837 
5838 			i40e_send_vf_nack(pf, vf,
5839 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5840 			return;
5841 		}
5842 
5843 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5844 			i40e_send_vf_nack(pf, vf,
5845 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5846 			return;
5847 		}
5848 
5849 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5850 			i40e_send_vf_nack(pf, vf,
5851 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5852 			return;
5853 		}
5854 	}
5855 
5856 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5857 }
5858 
5859 static void
5860 ixl_vf_set_qctl(struct ixl_pf *pf,
5861     const struct i40e_virtchnl_vector_map *vector,
5862     enum i40e_queue_type cur_type, uint16_t cur_queue,
5863     enum i40e_queue_type *last_type, uint16_t *last_queue)
5864 {
5865 	uint32_t offset, qctl;
5866 	uint16_t itr_indx;
5867 
5868 	if (cur_type == I40E_QUEUE_TYPE_RX) {
5869 		offset = I40E_QINT_RQCTL(cur_queue);
5870 		itr_indx = vector->rxitr_idx;
5871 	} else {
5872 		offset = I40E_QINT_TQCTL(cur_queue);
5873 		itr_indx = vector->txitr_idx;
5874 	}
5875 
5876 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5877 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5878 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5879 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5880 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5881 
5882 	wr32(&pf->hw, offset, qctl);
5883 
5884 	*last_type = cur_type;
5885 	*last_queue = cur_queue;
5886 }
5887 
5888 static void
5889 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5890     const struct i40e_virtchnl_vector_map *vector)
5891 {
5892 	struct i40e_hw *hw;
5893 	u_int qindex;
5894 	enum i40e_queue_type type, last_type;
5895 	uint32_t lnklst_reg;
5896 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5897 
5898 	hw = &pf->hw;
5899 
5900 	rxq_map = vector->rxq_map;
5901 	txq_map = vector->txq_map;
5902 
5903 	last_queue = IXL_END_OF_INTR_LNKLST;
5904 	last_type = I40E_QUEUE_TYPE_RX;
5905 
5906 	/*
5907 	 * The datasheet says to optimize performance, RX queues and TX queues
5908 	 * should be interleaved in the interrupt linked list, so we process
5909 	 * both at once here.
5910 	 */
5911 	while ((rxq_map != 0) || (txq_map != 0)) {
5912 		if (txq_map != 0) {
5913 			qindex = ffs(txq_map) - 1;
5914 			type = I40E_QUEUE_TYPE_TX;
5915 			cur_queue = vf->vsi.first_queue + qindex;
5916 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5917 			    &last_type, &last_queue);
5918 			txq_map &= ~(1 << qindex);
5919 		}
5920 
5921 		if (rxq_map != 0) {
5922 			qindex = ffs(rxq_map) - 1;
5923 			type = I40E_QUEUE_TYPE_RX;
5924 			cur_queue = vf->vsi.first_queue + qindex;
5925 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5926 			    &last_type, &last_queue);
5927 			rxq_map &= ~(1 << qindex);
5928 		}
5929 	}
5930 
5931 	if (vector->vector_id == 0)
5932 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5933 	else
5934 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5935 		    vf->vf_num);
5936 	wr32(hw, lnklst_reg,
5937 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5938 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5939 
5940 	ixl_flush(hw);
5941 }
5942 
5943 static void
5944 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5945     uint16_t msg_size)
5946 {
5947 	struct i40e_virtchnl_irq_map_info *map;
5948 	struct i40e_virtchnl_vector_map *vector;
5949 	struct i40e_hw *hw;
5950 	int i, largest_txq, largest_rxq;
5951 
5952 	hw = &pf->hw;
5953 
5954 	if (msg_size < sizeof(*map)) {
5955 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5956 		    I40E_ERR_PARAM);
5957 		return;
5958 	}
5959 
5960 	map = msg;
5961 	if (map->num_vectors == 0) {
5962 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5963 		    I40E_ERR_PARAM);
5964 		return;
5965 	}
5966 
5967 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5968 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5969 		    I40E_ERR_PARAM);
5970 		return;
5971 	}
5972 
5973 	for (i = 0; i < map->num_vectors; i++) {
5974 		vector = &map->vecmap[i];
5975 
5976 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5977 		    vector->vsi_id != vf->vsi.vsi_num) {
5978 			i40e_send_vf_nack(pf, vf,
5979 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5980 			return;
5981 		}
5982 
5983 		if (vector->rxq_map != 0) {
5984 			largest_rxq = fls(vector->rxq_map) - 1;
5985 			if (largest_rxq >= vf->vsi.num_queues) {
5986 				i40e_send_vf_nack(pf, vf,
5987 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5988 				    I40E_ERR_PARAM);
5989 				return;
5990 			}
5991 		}
5992 
5993 		if (vector->txq_map != 0) {
5994 			largest_txq = fls(vector->txq_map) - 1;
5995 			if (largest_txq >= vf->vsi.num_queues) {
5996 				i40e_send_vf_nack(pf, vf,
5997 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5998 				    I40E_ERR_PARAM);
5999 				return;
6000 			}
6001 		}
6002 
6003 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6004 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6005 			i40e_send_vf_nack(pf, vf,
6006 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6007 			    I40E_ERR_PARAM);
6008 			return;
6009 		}
6010 
6011 		ixl_vf_config_vector(pf, vf, vector);
6012 	}
6013 
6014 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6015 }
6016 
6017 static void
6018 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6019     uint16_t msg_size)
6020 {
6021 	struct i40e_virtchnl_queue_select *select;
6022 	int error;
6023 
6024 	if (msg_size != sizeof(*select)) {
6025 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6026 		    I40E_ERR_PARAM);
6027 		return;
6028 	}
6029 
6030 	select = msg;
6031 	if (select->vsi_id != vf->vsi.vsi_num ||
6032 	    select->rx_queues == 0 || select->tx_queues == 0) {
6033 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6034 		    I40E_ERR_PARAM);
6035 		return;
6036 	}
6037 
6038 	error = ixl_enable_rings(&vf->vsi);
6039 	if (error) {
6040 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6041 		    I40E_ERR_TIMEOUT);
6042 		return;
6043 	}
6044 
6045 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6046 }
6047 
6048 static void
6049 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6050     void *msg, uint16_t msg_size)
6051 {
6052 	struct i40e_virtchnl_queue_select *select;
6053 	int error;
6054 
6055 	if (msg_size != sizeof(*select)) {
6056 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6057 		    I40E_ERR_PARAM);
6058 		return;
6059 	}
6060 
6061 	select = msg;
6062 	if (select->vsi_id != vf->vsi.vsi_num ||
6063 	    select->rx_queues == 0 || select->tx_queues == 0) {
6064 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6065 		    I40E_ERR_PARAM);
6066 		return;
6067 	}
6068 
6069 	error = ixl_disable_rings(&vf->vsi);
6070 	if (error) {
6071 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6072 		    I40E_ERR_TIMEOUT);
6073 		return;
6074 	}
6075 
6076 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6077 }
6078 
6079 static boolean_t
6080 ixl_zero_mac(const uint8_t *addr)
6081 {
6082 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6083 
6084 	return (cmp_etheraddr(addr, zero));
6085 }
6086 
6087 static boolean_t
6088 ixl_bcast_mac(const uint8_t *addr)
6089 {
6090 
6091 	return (cmp_etheraddr(addr, ixl_bcast_addr));
6092 }
6093 
6094 static int
6095 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6096 {
6097 
6098 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6099 		return (EINVAL);
6100 
6101 	/*
6102 	 * If the VF is not allowed to change its MAC address, don't let it
6103 	 * set a MAC filter for an address that is not a multicast address and
6104 	 * is not its assigned MAC.
6105 	 */
6106 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6107 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6108 		return (EPERM);
6109 
6110 	return (0);
6111 }
6112 
6113 static void
6114 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6115     uint16_t msg_size)
6116 {
6117 	struct i40e_virtchnl_ether_addr_list *addr_list;
6118 	struct i40e_virtchnl_ether_addr *addr;
6119 	struct ixl_vsi *vsi;
6120 	int i;
6121 	size_t expected_size;
6122 
6123 	vsi = &vf->vsi;
6124 
6125 	if (msg_size < sizeof(*addr_list)) {
6126 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6127 		    I40E_ERR_PARAM);
6128 		return;
6129 	}
6130 
6131 	addr_list = msg;
6132 	expected_size = sizeof(*addr_list) +
6133 	    addr_list->num_elements * sizeof(*addr);
6134 
6135 	if (addr_list->num_elements == 0 ||
6136 	    addr_list->vsi_id != vsi->vsi_num ||
6137 	    msg_size != expected_size) {
6138 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6139 		    I40E_ERR_PARAM);
6140 		return;
6141 	}
6142 
6143 	for (i = 0; i < addr_list->num_elements; i++) {
6144 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6145 			i40e_send_vf_nack(pf, vf,
6146 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6147 			return;
6148 		}
6149 	}
6150 
6151 	for (i = 0; i < addr_list->num_elements; i++) {
6152 		addr = &addr_list->list[i];
6153 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6154 	}
6155 
6156 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6157 }
6158 
6159 static void
6160 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6161     uint16_t msg_size)
6162 {
6163 	struct i40e_virtchnl_ether_addr_list *addr_list;
6164 	struct i40e_virtchnl_ether_addr *addr;
6165 	size_t expected_size;
6166 	int i;
6167 
6168 	if (msg_size < sizeof(*addr_list)) {
6169 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6170 		    I40E_ERR_PARAM);
6171 		return;
6172 	}
6173 
6174 	addr_list = msg;
6175 	expected_size = sizeof(*addr_list) +
6176 	    addr_list->num_elements * sizeof(*addr);
6177 
6178 	if (addr_list->num_elements == 0 ||
6179 	    addr_list->vsi_id != vf->vsi.vsi_num ||
6180 	    msg_size != expected_size) {
6181 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6182 		    I40E_ERR_PARAM);
6183 		return;
6184 	}
6185 
6186 	for (i = 0; i < addr_list->num_elements; i++) {
6187 		addr = &addr_list->list[i];
6188 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6189 			i40e_send_vf_nack(pf, vf,
6190 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6191 			return;
6192 		}
6193 	}
6194 
6195 	for (i = 0; i < addr_list->num_elements; i++) {
6196 		addr = &addr_list->list[i];
6197 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6198 	}
6199 
6200 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6201 }
6202 
6203 static enum i40e_status_code
6204 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6205 {
6206 	struct i40e_vsi_context vsi_ctx;
6207 
6208 	vsi_ctx.seid = vf->vsi.seid;
6209 
6210 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6211 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6212 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6213 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6214 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6215 }
6216 
6217 static void
6218 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6219     uint16_t msg_size)
6220 {
6221 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6222 	enum i40e_status_code code;
6223 	size_t expected_size;
6224 	int i;
6225 
6226 	if (msg_size < sizeof(*filter_list)) {
6227 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6228 		    I40E_ERR_PARAM);
6229 		return;
6230 	}
6231 
6232 	filter_list = msg;
6233 	expected_size = sizeof(*filter_list) +
6234 	    filter_list->num_elements * sizeof(uint16_t);
6235 	if (filter_list->num_elements == 0 ||
6236 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6237 	    msg_size != expected_size) {
6238 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6239 		    I40E_ERR_PARAM);
6240 		return;
6241 	}
6242 
6243 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6244 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6245 		    I40E_ERR_PARAM);
6246 		return;
6247 	}
6248 
6249 	for (i = 0; i < filter_list->num_elements; i++) {
6250 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6251 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6252 			    I40E_ERR_PARAM);
6253 			return;
6254 		}
6255 	}
6256 
6257 	code = ixl_vf_enable_vlan_strip(pf, vf);
6258 	if (code != I40E_SUCCESS) {
6259 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6260 		    I40E_ERR_PARAM);
6261 	}
6262 
6263 	for (i = 0; i < filter_list->num_elements; i++)
6264 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6265 
6266 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6267 }
6268 
6269 static void
6270 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6271     uint16_t msg_size)
6272 {
6273 	struct i40e_virtchnl_vlan_filter_list *filter_list;
6274 	int i;
6275 	size_t expected_size;
6276 
6277 	if (msg_size < sizeof(*filter_list)) {
6278 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6279 		    I40E_ERR_PARAM);
6280 		return;
6281 	}
6282 
6283 	filter_list = msg;
6284 	expected_size = sizeof(*filter_list) +
6285 	    filter_list->num_elements * sizeof(uint16_t);
6286 	if (filter_list->num_elements == 0 ||
6287 	    filter_list->vsi_id != vf->vsi.vsi_num ||
6288 	    msg_size != expected_size) {
6289 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6290 		    I40E_ERR_PARAM);
6291 		return;
6292 	}
6293 
6294 	for (i = 0; i < filter_list->num_elements; i++) {
6295 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6296 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6297 			    I40E_ERR_PARAM);
6298 			return;
6299 		}
6300 	}
6301 
6302 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6303 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6304 		    I40E_ERR_PARAM);
6305 		return;
6306 	}
6307 
6308 	for (i = 0; i < filter_list->num_elements; i++)
6309 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6310 
6311 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6312 }
6313 
6314 static void
6315 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6316     void *msg, uint16_t msg_size)
6317 {
6318 	struct i40e_virtchnl_promisc_info *info;
6319 	enum i40e_status_code code;
6320 
6321 	if (msg_size != sizeof(*info)) {
6322 		i40e_send_vf_nack(pf, vf,
6323 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6324 		return;
6325 	}
6326 
6327 	if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6328 		i40e_send_vf_nack(pf, vf,
6329 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6330 		return;
6331 	}
6332 
6333 	info = msg;
6334 	if (info->vsi_id != vf->vsi.vsi_num) {
6335 		i40e_send_vf_nack(pf, vf,
6336 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6337 		return;
6338 	}
6339 
6340 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6341 	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6342 	if (code != I40E_SUCCESS) {
6343 		i40e_send_vf_nack(pf, vf,
6344 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6345 		return;
6346 	}
6347 
6348 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6349 	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6350 	if (code != I40E_SUCCESS) {
6351 		i40e_send_vf_nack(pf, vf,
6352 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6353 		return;
6354 	}
6355 
6356 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6357 }
6358 
6359 static void
6360 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6361     uint16_t msg_size)
6362 {
6363 	struct i40e_virtchnl_queue_select *queue;
6364 
6365 	if (msg_size != sizeof(*queue)) {
6366 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6367 		    I40E_ERR_PARAM);
6368 		return;
6369 	}
6370 
6371 	queue = msg;
6372 	if (queue->vsi_id != vf->vsi.vsi_num) {
6373 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6374 		    I40E_ERR_PARAM);
6375 		return;
6376 	}
6377 
6378 	ixl_update_eth_stats(&vf->vsi);
6379 
6380 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6381 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6382 }
6383 
6384 static void
6385 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6386 {
6387 	struct ixl_vf *vf;
6388 	void *msg;
6389 	uint16_t vf_num, msg_size;
6390 	uint32_t opcode;
6391 
6392 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6393 	opcode = le32toh(event->desc.cookie_high);
6394 
6395 	if (vf_num >= pf->num_vfs) {
6396 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6397 		return;
6398 	}
6399 
6400 	vf = &pf->vfs[vf_num];
6401 	msg = event->msg_buf;
6402 	msg_size = event->msg_len;
6403 
6404 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6405 	    "Got msg %s(%d) from VF-%d of size %d\n",
6406 	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6407 
6408 	switch (opcode) {
6409 	case I40E_VIRTCHNL_OP_VERSION:
6410 		ixl_vf_version_msg(pf, vf, msg, msg_size);
6411 		break;
6412 	case I40E_VIRTCHNL_OP_RESET_VF:
6413 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6414 		break;
6415 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6416 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6417 		break;
6418 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6419 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6420 		break;
6421 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6422 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6423 		break;
6424 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6425 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6426 		break;
6427 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6428 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6429 		break;
6430 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6431 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6432 		break;
6433 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6434 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6435 		break;
6436 	case I40E_VIRTCHNL_OP_ADD_VLAN:
6437 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6438 		break;
6439 	case I40E_VIRTCHNL_OP_DEL_VLAN:
6440 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6441 		break;
6442 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6443 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6444 		break;
6445 	case I40E_VIRTCHNL_OP_GET_STATS:
6446 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6447 		break;
6448 
6449 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6450 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6451 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6452 	default:
6453 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6454 		break;
6455 	}
6456 }
6457 
6458 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6459 static void
6460 ixl_handle_vflr(void *arg, int pending)
6461 {
6462 	struct ixl_pf *pf;
6463 	struct i40e_hw *hw;
6464 	uint16_t global_vf_num;
6465 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6466 	int i;
6467 
6468 	pf = arg;
6469 	hw = &pf->hw;
6470 
6471 	IXL_PF_LOCK(pf);
6472 	for (i = 0; i < pf->num_vfs; i++) {
6473 		global_vf_num = hw->func_caps.vf_base_id + i;
6474 
6475 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6476 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6477 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6478 		if (vflrstat & vflrstat_mask) {
6479 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6480 			    vflrstat_mask);
6481 
6482 			ixl_reinit_vf(pf, &pf->vfs[i]);
6483 		}
6484 	}
6485 
6486 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6487 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6488 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6489 	ixl_flush(hw);
6490 
6491 	IXL_PF_UNLOCK(pf);
6492 }
6493 
6494 static int
6495 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6496 {
6497 
6498 	switch (err) {
6499 	case I40E_AQ_RC_EPERM:
6500 		return (EPERM);
6501 	case I40E_AQ_RC_ENOENT:
6502 		return (ENOENT);
6503 	case I40E_AQ_RC_ESRCH:
6504 		return (ESRCH);
6505 	case I40E_AQ_RC_EINTR:
6506 		return (EINTR);
6507 	case I40E_AQ_RC_EIO:
6508 		return (EIO);
6509 	case I40E_AQ_RC_ENXIO:
6510 		return (ENXIO);
6511 	case I40E_AQ_RC_E2BIG:
6512 		return (E2BIG);
6513 	case I40E_AQ_RC_EAGAIN:
6514 		return (EAGAIN);
6515 	case I40E_AQ_RC_ENOMEM:
6516 		return (ENOMEM);
6517 	case I40E_AQ_RC_EACCES:
6518 		return (EACCES);
6519 	case I40E_AQ_RC_EFAULT:
6520 		return (EFAULT);
6521 	case I40E_AQ_RC_EBUSY:
6522 		return (EBUSY);
6523 	case I40E_AQ_RC_EEXIST:
6524 		return (EEXIST);
6525 	case I40E_AQ_RC_EINVAL:
6526 		return (EINVAL);
6527 	case I40E_AQ_RC_ENOTTY:
6528 		return (ENOTTY);
6529 	case I40E_AQ_RC_ENOSPC:
6530 		return (ENOSPC);
6531 	case I40E_AQ_RC_ENOSYS:
6532 		return (ENOSYS);
6533 	case I40E_AQ_RC_ERANGE:
6534 		return (ERANGE);
6535 	case I40E_AQ_RC_EFLUSHED:
6536 		return (EINVAL);	/* No exact equivalent in errno.h */
6537 	case I40E_AQ_RC_BAD_ADDR:
6538 		return (EFAULT);
6539 	case I40E_AQ_RC_EMODE:
6540 		return (EPERM);
6541 	case I40E_AQ_RC_EFBIG:
6542 		return (EFBIG);
6543 	default:
6544 		return (EINVAL);
6545 	}
6546 }
6547 
6548 static int
6549 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6550 {
6551 	struct ixl_pf *pf;
6552 	struct i40e_hw *hw;
6553 	struct ixl_vsi *pf_vsi;
6554 	enum i40e_status_code ret;
6555 	int i, error;
6556 
6557 	pf = device_get_softc(dev);
6558 	hw = &pf->hw;
6559 	pf_vsi = &pf->vsi;
6560 
6561 	IXL_PF_LOCK(pf);
6562 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6563 	    M_ZERO);
6564 
6565 	if (pf->vfs == NULL) {
6566 		error = ENOMEM;
6567 		goto fail;
6568 	}
6569 
6570 	for (i = 0; i < num_vfs; i++)
6571 		sysctl_ctx_init(&pf->vfs[i].ctx);
6572 
6573 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6574 	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6575 	if (ret != I40E_SUCCESS) {
6576 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6577 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6578 		    error);
6579 		goto fail;
6580 	}
6581 
6582 	ixl_configure_msix(pf);
6583 	ixl_enable_adminq(hw);
6584 
6585 	pf->num_vfs = num_vfs;
6586 	IXL_PF_UNLOCK(pf);
6587 	return (0);
6588 
6589 fail:
6590 	free(pf->vfs, M_IXL);
6591 	pf->vfs = NULL;
6592 	IXL_PF_UNLOCK(pf);
6593 	return (error);
6594 }
6595 
6596 static void
6597 ixl_iov_uninit(device_t dev)
6598 {
6599 	struct ixl_pf *pf;
6600 	struct i40e_hw *hw;
6601 	struct ixl_vsi *vsi;
6602 	struct ifnet *ifp;
6603 	struct ixl_vf *vfs;
6604 	int i, num_vfs;
6605 
6606 	pf = device_get_softc(dev);
6607 	hw = &pf->hw;
6608 	vsi = &pf->vsi;
6609 	ifp = vsi->ifp;
6610 
6611 	IXL_PF_LOCK(pf);
6612 	for (i = 0; i < pf->num_vfs; i++) {
6613 		if (pf->vfs[i].vsi.seid != 0)
6614 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6615 	}
6616 
6617 	if (pf->veb_seid != 0) {
6618 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6619 		pf->veb_seid = 0;
6620 	}
6621 
6622 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6623 		ixl_disable_intr(vsi);
6624 
6625 	vfs = pf->vfs;
6626 	num_vfs = pf->num_vfs;
6627 
6628 	pf->vfs = NULL;
6629 	pf->num_vfs = 0;
6630 	IXL_PF_UNLOCK(pf);
6631 
6632 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6633 	for (i = 0; i < num_vfs; i++)
6634 		sysctl_ctx_free(&vfs[i].ctx);
6635 	free(vfs, M_IXL);
6636 }
6637 
6638 static int
6639 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6640 {
6641 	char sysctl_name[QUEUE_NAME_LEN];
6642 	struct ixl_pf *pf;
6643 	struct ixl_vf *vf;
6644 	const void *mac;
6645 	size_t size;
6646 	int error;
6647 
6648 	pf = device_get_softc(dev);
6649 	vf = &pf->vfs[vfnum];
6650 
6651 	IXL_PF_LOCK(pf);
6652 	vf->vf_num = vfnum;
6653 
6654 	vf->vsi.back = pf;
6655 	vf->vf_flags = VF_FLAG_ENABLED;
6656 	SLIST_INIT(&vf->vsi.ftl);
6657 
6658 	error = ixl_vf_setup_vsi(pf, vf);
6659 	if (error != 0)
6660 		goto out;
6661 
6662 	if (nvlist_exists_binary(params, "mac-addr")) {
6663 		mac = nvlist_get_binary(params, "mac-addr", &size);
6664 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6665 
6666 		if (nvlist_get_bool(params, "allow-set-mac"))
6667 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6668 	} else
6669 		/*
6670 		 * If the administrator has not specified a MAC address then
6671 		 * we must allow the VF to choose one.
6672 		 */
6673 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6674 
6675 	if (nvlist_get_bool(params, "mac-anti-spoof"))
6676 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6677 
6678 	if (nvlist_get_bool(params, "allow-promisc"))
6679 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6680 
6681 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6682 
6683 	ixl_reset_vf(pf, vf);
6684 out:
6685 	IXL_PF_UNLOCK(pf);
6686 	if (error == 0) {
6687 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6688 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6689 	}
6690 
6691 	return (error);
6692 }
6693 #endif /* PCI_IOV */
6694