xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 3823d5e198425b4f5e5a80267d195769d1063773)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixl_pf.h"
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 char ixl_driver_version[] = "1.2.2";
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *  Last field stores an index into ixl_strings
50  *  Last entry must be all 0s
51  *
52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53  *********************************************************************/
54 
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
56 {
57 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65 	/* required last entry */
66 	{0, 0, 0, 0, 0}
67 };
68 
69 /*********************************************************************
70  *  Table of branding strings
71  *********************************************************************/
72 
73 static char    *ixl_strings[] = {
74 	"Intel(R) Ethernet Connection XL710 Driver"
75 };
76 
77 
78 /*********************************************************************
79  *  Function prototypes
80  *********************************************************************/
81 static int      ixl_probe(device_t);
82 static int      ixl_attach(device_t);
83 static int      ixl_detach(device_t);
84 static int      ixl_shutdown(device_t);
85 static int	ixl_get_hw_capabilities(struct ixl_pf *);
86 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void	ixl_init(void *);
89 static void	ixl_init_locked(struct ixl_pf *);
90 static void     ixl_stop(struct ixl_pf *);
91 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int      ixl_media_change(struct ifnet *);
93 static void     ixl_update_link_status(struct ixl_pf *);
94 static int      ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int	ixl_setup_stations(struct ixl_pf *);
97 static int	ixl_setup_vsi(struct ixl_vsi *);
98 static int	ixl_initialize_vsi(struct ixl_vsi *);
99 static int	ixl_assign_vsi_msix(struct ixl_pf *);
100 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int	ixl_init_msix(struct ixl_pf *);
102 static void	ixl_configure_msix(struct ixl_pf *);
103 static void	ixl_configure_itr(struct ixl_pf *);
104 static void	ixl_configure_legacy(struct ixl_pf *);
105 static void	ixl_free_pci_resources(struct ixl_pf *);
106 static void	ixl_local_timer(void *);
107 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool	ixl_config_link(struct i40e_hw *);
109 static void	ixl_config_rss(struct ixl_vsi *);
110 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
112 
113 static void	ixl_enable_rings(struct ixl_vsi *);
114 static void	ixl_disable_rings(struct ixl_vsi *);
115 static void     ixl_enable_intr(struct ixl_vsi *);
116 static void     ixl_disable_intr(struct ixl_vsi *);
117 
118 static void     ixl_enable_adminq(struct i40e_hw *);
119 static void     ixl_disable_adminq(struct i40e_hw *);
120 static void     ixl_enable_queue(struct i40e_hw *, int);
121 static void     ixl_disable_queue(struct i40e_hw *, int);
122 static void     ixl_enable_legacy(struct i40e_hw *);
123 static void     ixl_disable_legacy(struct i40e_hw *);
124 
125 static void     ixl_set_promisc(struct ixl_vsi *);
126 static void     ixl_add_multi(struct ixl_vsi *);
127 static void     ixl_del_multi(struct ixl_vsi *);
128 static void	ixl_register_vlan(void *, struct ifnet *, u16);
129 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
130 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
131 
132 static void	ixl_init_filters(struct ixl_vsi *);
133 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
136 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
137 static struct ixl_mac_filter *
138 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
140 
141 /* Sysctl debug interface */
142 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
143 static void	ixl_print_debug_info(struct ixl_pf *);
144 
145 /* The MSI/X Interrupt handlers */
146 static void	ixl_intr(void *);
147 static void	ixl_msix_que(void *);
148 static void	ixl_msix_adminq(void *);
149 static void	ixl_handle_mdd_event(struct ixl_pf *);
150 
151 /* Deferred interrupt tasklets */
152 static void	ixl_do_adminq(void *, int);
153 
154 /* Sysctl handlers */
155 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
158 
159 /* Statistics */
160 static void     ixl_add_hw_stats(struct ixl_pf *);
161 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164 		    struct sysctl_oid_list *,
165 		    struct i40e_eth_stats *);
166 static void	ixl_update_stats_counters(struct ixl_pf *);
167 static void	ixl_update_eth_stats(struct ixl_vsi *);
168 static void	ixl_pf_reset_stats(struct ixl_pf *);
169 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
170 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
171 		    u64 *, u64 *);
172 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
173 		    u64 *, u64 *);
174 
175 #ifdef IXL_DEBUG
176 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179 static int	ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180 static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
181 #endif
182 
183 /*********************************************************************
184  *  FreeBSD Device Interface Entry Points
185  *********************************************************************/
186 
187 static device_method_t ixl_methods[] = {
188 	/* Device interface */
189 	DEVMETHOD(device_probe, ixl_probe),
190 	DEVMETHOD(device_attach, ixl_attach),
191 	DEVMETHOD(device_detach, ixl_detach),
192 	DEVMETHOD(device_shutdown, ixl_shutdown),
193 	{0, 0}
194 };
195 
196 static driver_t ixl_driver = {
197 	"ixl", ixl_methods, sizeof(struct ixl_pf),
198 };
199 
200 devclass_t ixl_devclass;
201 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
202 
203 MODULE_DEPEND(ixl, pci, 1, 1, 1);
204 MODULE_DEPEND(ixl, ether, 1, 1, 1);
205 
206 /*
207 ** Global reset mutex
208 */
209 static struct mtx ixl_reset_mtx;
210 
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214 
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216                    "IXL driver parameters");
217 
218 /*
219  * MSIX should be the default for best performance,
220  * but this allows it to be forced off for testing.
221  */
222 static int ixl_enable_msix = 1;
223 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225     "Enable MSI-X interrupts");
226 
227 /*
228 ** Number of descriptors per ring:
229 **   - TX and RX are the same size
230 */
231 static int ixl_ringsz = DEFAULT_RING;
232 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234     &ixl_ringsz, 0, "Descriptor Ring Size");
235 
236 /*
237 ** This can be set manually, if left as 0 the
238 ** number of queues will be calculated based
239 ** on cpus and msix vectors available.
240 */
241 int ixl_max_queues = 0;
242 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244     &ixl_max_queues, 0, "Number of Queues");
245 
246 /*
247 ** Controls for Interrupt Throttling
248 **	- true/false for dynamic adjustment
249 ** 	- default values for static ITR
250 */
251 int ixl_dynamic_rx_itr = 0;
252 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
255 
256 int ixl_dynamic_tx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
260 
261 int ixl_rx_itr = IXL_ITR_8K;
262 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264     &ixl_rx_itr, 0, "RX Interrupt Rate");
265 
266 int ixl_tx_itr = IXL_ITR_4K;
267 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269     &ixl_tx_itr, 0, "TX Interrupt Rate");
270 
271 #ifdef IXL_FDIR
272 static int ixl_enable_fdir = 1;
273 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274 /* Rate at which we sample */
275 int ixl_atr_rate = 20;
276 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
277 #endif
278 
279 static char *ixl_fc_string[6] = {
280 	"None",
281 	"Rx",
282 	"Tx",
283 	"Full",
284 	"Priority",
285 	"Default"
286 };
287 
288 
289 /*********************************************************************
290  *  Device identification routine
291  *
292  *  ixl_probe determines if the driver should be loaded on
293  *  the hardware based on PCI vendor/device id of the device.
294  *
295  *  return BUS_PROBE_DEFAULT on success, positive on failure
296  *********************************************************************/
297 
298 static int
299 ixl_probe(device_t dev)
300 {
301 	ixl_vendor_info_t *ent;
302 
303 	u16	pci_vendor_id, pci_device_id;
304 	u16	pci_subvendor_id, pci_subdevice_id;
305 	char	device_name[256];
306 	static bool lock_init = FALSE;
307 
308 	INIT_DEBUGOUT("ixl_probe: begin");
309 
310 	pci_vendor_id = pci_get_vendor(dev);
311 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
312 		return (ENXIO);
313 
314 	pci_device_id = pci_get_device(dev);
315 	pci_subvendor_id = pci_get_subvendor(dev);
316 	pci_subdevice_id = pci_get_subdevice(dev);
317 
318 	ent = ixl_vendor_info_array;
319 	while (ent->vendor_id != 0) {
320 		if ((pci_vendor_id == ent->vendor_id) &&
321 		    (pci_device_id == ent->device_id) &&
322 
323 		    ((pci_subvendor_id == ent->subvendor_id) ||
324 		     (ent->subvendor_id == 0)) &&
325 
326 		    ((pci_subdevice_id == ent->subdevice_id) ||
327 		     (ent->subdevice_id == 0))) {
328 			sprintf(device_name, "%s, Version - %s",
329 				ixl_strings[ent->index],
330 				ixl_driver_version);
331 			device_set_desc_copy(dev, device_name);
332 			/* One shot mutex init */
333 			if (lock_init == FALSE) {
334 				lock_init = TRUE;
335 				mtx_init(&ixl_reset_mtx,
336 				    "ixl_reset",
337 				    "IXL RESET Lock", MTX_DEF);
338 			}
339 			return (BUS_PROBE_DEFAULT);
340 		}
341 		ent++;
342 	}
343 	return (ENXIO);
344 }
345 
346 /*********************************************************************
347  *  Device initialization routine
348  *
349  *  The attach entry point is called when the driver is being loaded.
350  *  This routine identifies the type of hardware, allocates all resources
351  *  and initializes the hardware.
352  *
353  *  return 0 on success, positive on failure
354  *********************************************************************/
355 
356 static int
357 ixl_attach(device_t dev)
358 {
359 	struct ixl_pf	*pf;
360 	struct i40e_hw	*hw;
361 	struct ixl_vsi *vsi;
362 	u16		bus;
363 	int             error = 0;
364 
365 	INIT_DEBUGOUT("ixl_attach: begin");
366 
367 	/* Allocate, clear, and link in our primary soft structure */
368 	pf = device_get_softc(dev);
369 	pf->dev = pf->osdep.dev = dev;
370 	hw = &pf->hw;
371 
372 	/*
373 	** Note this assumes we have a single embedded VSI,
374 	** this could be enhanced later to allocate multiple
375 	*/
376 	vsi = &pf->vsi;
377 	vsi->dev = pf->dev;
378 
379 	/* Core Lock Init*/
380 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
381 
382 	/* Set up the timer callout */
383 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
384 
385 	/* Set up sysctls */
386 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
389 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
390 
391 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
394 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
395 
396 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
399 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
400 
401 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
404 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
405 
406 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
409 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
410 
411 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
414 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
415 
416 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
419 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
420 
421 #ifdef IXL_DEBUG
422 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
423 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
424 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
425 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
426 
427 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
430 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
431 
432 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
435 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
436 
437 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439 	    OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
440 	    pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
441 
442 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444 	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
445 	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
446 #endif
447 
448 	/* Save off the information about this board */
449 	hw->vendor_id = pci_get_vendor(dev);
450 	hw->device_id = pci_get_device(dev);
451 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
452 	hw->subsystem_vendor_id =
453 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
454 	hw->subsystem_device_id =
455 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
456 
457 	hw->bus.device = pci_get_slot(dev);
458 	hw->bus.func = pci_get_function(dev);
459 
460 	/* Do PCI setup - map BAR0, etc */
461 	if (ixl_allocate_pci_resources(pf)) {
462 		device_printf(dev, "Allocation of PCI resources failed\n");
463 		error = ENXIO;
464 		goto err_out;
465 	}
466 
467 	/* Create for initial debugging use */
468 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
469 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
470 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
471 	    ixl_debug_info, "I", "Debug Information");
472 
473 
474 	/* Establish a clean starting point */
475 	i40e_clear_hw(hw);
476 	error = i40e_pf_reset(hw);
477 	if (error) {
478 		device_printf(dev,"PF reset failure %x\n", error);
479 		error = EIO;
480 		goto err_out;
481 	}
482 
483 	/* For now always do an initial CORE reset on first device */
484 	{
485 		static int	ixl_dev_count;
486 		static int	ixl_dev_track[32];
487 		u32		my_dev;
488 		int		i, found = FALSE;
489 		u16		bus = pci_get_bus(dev);
490 
491 		mtx_lock(&ixl_reset_mtx);
492 		my_dev = (bus << 8) | hw->bus.device;
493 
494 		for (i = 0; i < ixl_dev_count; i++) {
495 			if (ixl_dev_track[i] == my_dev)
496 				found = TRUE;
497 		}
498 
499                 if (!found) {
500                         u32 reg;
501 
502                         ixl_dev_track[ixl_dev_count] = my_dev;
503                         ixl_dev_count++;
504 
505 			INIT_DEBUGOUT("Initial CORE RESET\n");
506                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
507                         ixl_flush(hw);
508                         i = 50;
509                         do {
510 				i40e_msec_delay(50);
511                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
512                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
513                                         break;
514                         } while (i--);
515 
516                         /* paranoia */
517                         wr32(hw, I40E_PF_ATQLEN, 0);
518                         wr32(hw, I40E_PF_ATQBAL, 0);
519                         wr32(hw, I40E_PF_ATQBAH, 0);
520                         i40e_clear_pxe_mode(hw);
521                 }
522                 mtx_unlock(&ixl_reset_mtx);
523 	}
524 
525 	/* Set admin queue parameters */
526 	hw->aq.num_arq_entries = IXL_AQ_LEN;
527 	hw->aq.num_asq_entries = IXL_AQ_LEN;
528 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
529 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
530 
531 	/* Initialize the shared code */
532 	error = i40e_init_shared_code(hw);
533 	if (error) {
534 		device_printf(dev,"Unable to initialize the shared code\n");
535 		error = EIO;
536 		goto err_out;
537 	}
538 
539 	/* Set up the admin queue */
540 	error = i40e_init_adminq(hw);
541 	if (error) {
542 		device_printf(dev, "The driver for the device stopped "
543 		    "because the NVM image is newer than expected.\n"
544 		    "You must install the most recent version of "
545 		    " the network driver.\n");
546 		goto err_out;
547 	}
548 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
549 
550         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
551 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
552 		device_printf(dev, "The driver for the device detected "
553 		    "a newer version of the NVM image than expected.\n"
554 		    "Please install the most recent version of the network driver.\n");
555 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
556 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
557 		device_printf(dev, "The driver for the device detected "
558 		    "an older version of the NVM image than expected.\n"
559 		    "Please update the NVM image.\n");
560 
561 	/* Clear PXE mode */
562 	i40e_clear_pxe_mode(hw);
563 
564 	/* Get capabilities from the device */
565 	error = ixl_get_hw_capabilities(pf);
566 	if (error) {
567 		device_printf(dev, "HW capabilities failure!\n");
568 		goto err_get_cap;
569 	}
570 
571 	/* Set up host memory cache */
572 	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
573 	if (error) {
574 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
575 		goto err_get_cap;
576 	}
577 
578 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
579 	if (error) {
580 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
581 		goto err_mac_hmc;
582 	}
583 
584 	/* Disable LLDP from the firmware */
585 	i40e_aq_stop_lldp(hw, TRUE, NULL);
586 
587 	i40e_get_mac_addr(hw, hw->mac.addr);
588 	error = i40e_validate_mac_addr(hw->mac.addr);
589 	if (error) {
590 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
591 		goto err_mac_hmc;
592 	}
593 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
594 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
595 
596 	if (ixl_setup_stations(pf) != 0) {
597 		device_printf(dev, "setup stations failed!\n");
598 		error = ENOMEM;
599 		goto err_mac_hmc;
600 	}
601 
602 	/* Initialize mac filter list for VSI */
603 	SLIST_INIT(&vsi->ftl);
604 
605 	/* Set up interrupt routing here */
606 	if (pf->msix > 1)
607 		error = ixl_assign_vsi_msix(pf);
608 	else
609 		error = ixl_assign_vsi_legacy(pf);
610 	if (error)
611 		goto err_late;
612 
613 	i40e_msec_delay(75);
614 	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
615 	if (error) {
616 		device_printf(dev, "link restart failed, aq_err=%d\n",
617 		    pf->hw.aq.asq_last_status);
618 	}
619 
620 	/* Determine link state */
621 	vsi->link_up = ixl_config_link(hw);
622 
623 	/* Report if Unqualified modules are found */
624 	if ((vsi->link_up == FALSE) &&
625 	    (pf->hw.phy.link_info.link_info &
626 	    I40E_AQ_MEDIA_AVAILABLE) &&
627 	    (!(pf->hw.phy.link_info.an_info &
628 	    I40E_AQ_QUALIFIED_MODULE)))
629 		device_printf(dev, "Link failed because "
630 		    "an unqualified module was detected\n");
631 
632 	/* Setup OS specific network interface */
633 	if (ixl_setup_interface(dev, vsi) != 0)
634 		goto err_late;
635 
636 	/* Get the bus configuration and set the shared code */
637 	bus = ixl_get_bus_info(hw, dev);
638 	i40e_set_pci_config_data(hw, bus);
639 
640 	/* Initialize statistics */
641 	ixl_pf_reset_stats(pf);
642 	ixl_update_stats_counters(pf);
643 	ixl_add_hw_stats(pf);
644 
645 	/* Register for VLAN events */
646 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
647 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
648 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
649 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
650 
651 	INIT_DEBUGOUT("ixl_attach: end");
652 	return (0);
653 
654 err_late:
655 	ixl_free_vsi(vsi);
656 err_mac_hmc:
657 	i40e_shutdown_lan_hmc(hw);
658 err_get_cap:
659 	i40e_shutdown_adminq(hw);
660 err_out:
661 	if (vsi->ifp != NULL)
662 		if_free(vsi->ifp);
663 	ixl_free_pci_resources(pf);
664 	IXL_PF_LOCK_DESTROY(pf);
665 	return (error);
666 }
667 
668 /*********************************************************************
669  *  Device removal routine
670  *
671  *  The detach entry point is called when the driver is being removed.
672  *  This routine stops the adapter and deallocates all the resources
673  *  that were allocated for driver operation.
674  *
675  *  return 0 on success, positive on failure
676  *********************************************************************/
677 
678 static int
679 ixl_detach(device_t dev)
680 {
681 	struct ixl_pf		*pf = device_get_softc(dev);
682 	struct i40e_hw		*hw = &pf->hw;
683 	struct ixl_vsi		*vsi = &pf->vsi;
684 	struct ixl_queue	*que = vsi->queues;
685 	i40e_status		status;
686 
687 	INIT_DEBUGOUT("ixl_detach: begin");
688 
689 	/* Make sure VLANS are not using driver */
690 	if (vsi->ifp->if_vlantrunk != NULL) {
691 		device_printf(dev,"Vlan in use, detach first\n");
692 		return (EBUSY);
693 	}
694 
695 	IXL_PF_LOCK(pf);
696 	ixl_stop(pf);
697 	IXL_PF_UNLOCK(pf);
698 
699 	for (int i = 0; i < vsi->num_queues; i++, que++) {
700 		if (que->tq) {
701 			taskqueue_drain(que->tq, &que->task);
702 			taskqueue_drain(que->tq, &que->tx_task);
703 			taskqueue_free(que->tq);
704 		}
705 	}
706 
707 	/* Shutdown LAN HMC */
708 	status = i40e_shutdown_lan_hmc(hw);
709 	if (status)
710 		device_printf(dev,
711 		    "Shutdown LAN HMC failed with code %d\n", status);
712 
713 	/* Shutdown admin queue */
714 	status = i40e_shutdown_adminq(hw);
715 	if (status)
716 		device_printf(dev,
717 		    "Shutdown Admin queue failed with code %d\n", status);
718 
719 	/* Unregister VLAN events */
720 	if (vsi->vlan_attach != NULL)
721 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
722 	if (vsi->vlan_detach != NULL)
723 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
724 
725 	ether_ifdetach(vsi->ifp);
726 	callout_drain(&pf->timer);
727 
728 	ixl_free_pci_resources(pf);
729 	bus_generic_detach(dev);
730 	if_free(vsi->ifp);
731 	ixl_free_vsi(vsi);
732 	IXL_PF_LOCK_DESTROY(pf);
733 	return (0);
734 }
735 
736 /*********************************************************************
737  *
738  *  Shutdown entry point
739  *
740  **********************************************************************/
741 
742 static int
743 ixl_shutdown(device_t dev)
744 {
745 	struct ixl_pf *pf = device_get_softc(dev);
746 	IXL_PF_LOCK(pf);
747 	ixl_stop(pf);
748 	IXL_PF_UNLOCK(pf);
749 	return (0);
750 }
751 
752 
753 /*********************************************************************
754  *
755  *  Get the hardware capabilities
756  *
757  **********************************************************************/
758 
759 static int
760 ixl_get_hw_capabilities(struct ixl_pf *pf)
761 {
762 	struct i40e_aqc_list_capabilities_element_resp *buf;
763 	struct i40e_hw	*hw = &pf->hw;
764 	device_t 	dev = pf->dev;
765 	int             error, len;
766 	u16		needed;
767 	bool		again = TRUE;
768 
769 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770 retry:
771 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773 		device_printf(dev, "Unable to allocate cap memory\n");
774                 return (ENOMEM);
775 	}
776 
777 	/* This populates the hw struct */
778         error = i40e_aq_discover_capabilities(hw, buf, len,
779 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780 	free(buf, M_DEVBUF);
781 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782 	    (again == TRUE)) {
783 		/* retry once with a larger buffer */
784 		again = FALSE;
785 		len = needed;
786 		goto retry;
787 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788 		device_printf(dev, "capability discovery failed: %d\n",
789 		    pf->hw.aq.asq_last_status);
790 		return (ENODEV);
791 	}
792 
793 	/* Capture this PF's starting queue pair */
794 	pf->qbase = hw->func_caps.base_queue;
795 
796 #ifdef IXL_DEBUG
797 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799 	    hw->pf_id, hw->func_caps.num_vfs,
800 	    hw->func_caps.num_msix_vectors,
801 	    hw->func_caps.num_msix_vectors_vf,
802 	    hw->func_caps.fd_filters_guaranteed,
803 	    hw->func_caps.fd_filters_best_effort,
804 	    hw->func_caps.num_tx_qp,
805 	    hw->func_caps.num_rx_qp,
806 	    hw->func_caps.base_queue);
807 #endif
808 	return (error);
809 }
810 
811 static void
812 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813 {
814 	device_t 	dev = vsi->dev;
815 
816 	/* Enable/disable TXCSUM/TSO4 */
817 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
818 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
819 		if (mask & IFCAP_TXCSUM) {
820 			ifp->if_capenable |= IFCAP_TXCSUM;
821 			/* enable TXCSUM, restore TSO if previously enabled */
822 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824 				ifp->if_capenable |= IFCAP_TSO4;
825 			}
826 		}
827 		else if (mask & IFCAP_TSO4) {
828 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830 			device_printf(dev,
831 			    "TSO4 requires txcsum, enabling both...\n");
832 		}
833 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
834 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
835 		if (mask & IFCAP_TXCSUM)
836 			ifp->if_capenable &= ~IFCAP_TXCSUM;
837 		else if (mask & IFCAP_TSO4)
838 			ifp->if_capenable |= IFCAP_TSO4;
839 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
840 	    && (ifp->if_capenable & IFCAP_TSO4)) {
841 		if (mask & IFCAP_TXCSUM) {
842 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844 			device_printf(dev,
845 			    "TSO4 requires txcsum, disabling both...\n");
846 		} else if (mask & IFCAP_TSO4)
847 			ifp->if_capenable &= ~IFCAP_TSO4;
848 	}
849 
850 	/* Enable/disable TXCSUM_IPV6/TSO6 */
851 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
853 		if (mask & IFCAP_TXCSUM_IPV6) {
854 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857 				ifp->if_capenable |= IFCAP_TSO6;
858 			}
859 		} else if (mask & IFCAP_TSO6) {
860 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862 			device_printf(dev,
863 			    "TSO6 requires txcsum6, enabling both...\n");
864 		}
865 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
867 		if (mask & IFCAP_TXCSUM_IPV6)
868 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869 		else if (mask & IFCAP_TSO6)
870 			ifp->if_capenable |= IFCAP_TSO6;
871 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872 	    && (ifp->if_capenable & IFCAP_TSO6)) {
873 		if (mask & IFCAP_TXCSUM_IPV6) {
874 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876 			device_printf(dev,
877 			    "TSO6 requires txcsum6, disabling both...\n");
878 		} else if (mask & IFCAP_TSO6)
879 			ifp->if_capenable &= ~IFCAP_TSO6;
880 	}
881 }
882 
883 /*********************************************************************
884  *  Ioctl entry point
885  *
886  *  ixl_ioctl is called when the user wants to configure the
887  *  interface.
888  *
889  *  return 0 on success, positive on failure
890  **********************************************************************/
891 
892 static int
893 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894 {
895 	struct ixl_vsi	*vsi = ifp->if_softc;
896 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
897 	struct ifreq	*ifr = (struct ifreq *) data;
898 #if defined(INET) || defined(INET6)
899 	struct ifaddr *ifa = (struct ifaddr *)data;
900 	bool		avoid_reset = FALSE;
901 #endif
902 	int             error = 0;
903 
904 	switch (command) {
905 
906         case SIOCSIFADDR:
907 #ifdef INET
908 		if (ifa->ifa_addr->sa_family == AF_INET)
909 			avoid_reset = TRUE;
910 #endif
911 #ifdef INET6
912 		if (ifa->ifa_addr->sa_family == AF_INET6)
913 			avoid_reset = TRUE;
914 #endif
915 #if defined(INET) || defined(INET6)
916 		/*
917 		** Calling init results in link renegotiation,
918 		** so we avoid doing it when possible.
919 		*/
920 		if (avoid_reset) {
921 			ifp->if_flags |= IFF_UP;
922 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
923 				ixl_init(pf);
924 #ifdef INET
925 			if (!(ifp->if_flags & IFF_NOARP))
926 				arp_ifinit(ifp, ifa);
927 #endif
928 		} else
929 			error = ether_ioctl(ifp, command, data);
930 		break;
931 #endif
932 	case SIOCSIFMTU:
933 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
934 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
935 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
936 			error = EINVAL;
937 		} else {
938 			IXL_PF_LOCK(pf);
939 			ifp->if_mtu = ifr->ifr_mtu;
940 			vsi->max_frame_size =
941 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
942 			    + ETHER_VLAN_ENCAP_LEN;
943 			ixl_init_locked(pf);
944 			IXL_PF_UNLOCK(pf);
945 		}
946 		break;
947 	case SIOCSIFFLAGS:
948 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
949 		IXL_PF_LOCK(pf);
950 		if (ifp->if_flags & IFF_UP) {
951 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
952 				if ((ifp->if_flags ^ pf->if_flags) &
953 				    (IFF_PROMISC | IFF_ALLMULTI)) {
954 					ixl_set_promisc(vsi);
955 				}
956 			} else
957 				ixl_init_locked(pf);
958 		} else
959 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
960 				ixl_stop(pf);
961 		pf->if_flags = ifp->if_flags;
962 		IXL_PF_UNLOCK(pf);
963 		break;
964 	case SIOCADDMULTI:
965 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
966 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967 			IXL_PF_LOCK(pf);
968 			ixl_disable_intr(vsi);
969 			ixl_add_multi(vsi);
970 			ixl_enable_intr(vsi);
971 			IXL_PF_UNLOCK(pf);
972 		}
973 		break;
974 	case SIOCDELMULTI:
975 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
976 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 			IXL_PF_LOCK(pf);
978 			ixl_disable_intr(vsi);
979 			ixl_del_multi(vsi);
980 			ixl_enable_intr(vsi);
981 			IXL_PF_UNLOCK(pf);
982 		}
983 		break;
984 	case SIOCSIFMEDIA:
985 	case SIOCGIFMEDIA:
986 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
987 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
988 		break;
989 	case SIOCSIFCAP:
990 	{
991 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
992 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
993 
994 		ixl_cap_txcsum_tso(vsi, ifp, mask);
995 
996 		if (mask & IFCAP_RXCSUM)
997 			ifp->if_capenable ^= IFCAP_RXCSUM;
998 		if (mask & IFCAP_RXCSUM_IPV6)
999 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1000 		if (mask & IFCAP_LRO)
1001 			ifp->if_capenable ^= IFCAP_LRO;
1002 		if (mask & IFCAP_VLAN_HWTAGGING)
1003 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1004 		if (mask & IFCAP_VLAN_HWFILTER)
1005 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1006 		if (mask & IFCAP_VLAN_HWTSO)
1007 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1008 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1009 			IXL_PF_LOCK(pf);
1010 			ixl_init_locked(pf);
1011 			IXL_PF_UNLOCK(pf);
1012 		}
1013 		VLAN_CAPABILITIES(ifp);
1014 
1015 		break;
1016 	}
1017 
1018 	default:
1019 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1020 		error = ether_ioctl(ifp, command, data);
1021 		break;
1022 	}
1023 
1024 	return (error);
1025 }
1026 
1027 
1028 /*********************************************************************
1029  *  Init entry point
1030  *
1031  *  This routine is used in two ways. It is used by the stack as
1032  *  init entry point in network interface structure. It is also used
1033  *  by the driver as a hw/sw initialization routine to get to a
1034  *  consistent state.
1035  *
1036  *  return 0 on success, positive on failure
1037  **********************************************************************/
1038 
1039 static void
1040 ixl_init_locked(struct ixl_pf *pf)
1041 {
1042 	struct i40e_hw	*hw = &pf->hw;
1043 	struct ixl_vsi	*vsi = &pf->vsi;
1044 	struct ifnet	*ifp = vsi->ifp;
1045 	device_t 	dev = pf->dev;
1046 	struct i40e_filter_control_settings	filter;
1047 	u8		tmpaddr[ETHER_ADDR_LEN];
1048 	int		ret;
1049 
1050 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1051 	INIT_DEBUGOUT("ixl_init: begin");
1052 	ixl_stop(pf);
1053 
1054 	/* Get the latest mac address... User might use a LAA */
1055 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1056 	      I40E_ETH_LENGTH_OF_ADDRESS);
1057 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1058 	    i40e_validate_mac_addr(tmpaddr)) {
1059 		bcopy(tmpaddr, hw->mac.addr,
1060 		    I40E_ETH_LENGTH_OF_ADDRESS);
1061 		ret = i40e_aq_mac_address_write(hw,
1062 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1063 		    hw->mac.addr, NULL);
1064 		if (ret) {
1065 			device_printf(dev, "LLA address"
1066 			 "change failed!!\n");
1067 			return;
1068 		}
1069 	}
1070 
1071 	/* Set the various hardware offload abilities */
1072 	ifp->if_hwassist = 0;
1073 	if (ifp->if_capenable & IFCAP_TSO)
1074 		ifp->if_hwassist |= CSUM_TSO;
1075 	if (ifp->if_capenable & IFCAP_TXCSUM)
1076 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1077 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1078 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1079 
1080 	/* Set up the device filtering */
1081 	bzero(&filter, sizeof(filter));
1082 	filter.enable_ethtype = TRUE;
1083 	filter.enable_macvlan = TRUE;
1084 #ifdef IXL_FDIR
1085 	filter.enable_fdir = TRUE;
1086 #endif
1087 	if (i40e_set_filter_control(hw, &filter))
1088 		device_printf(dev, "set_filter_control() failed\n");
1089 
1090 	/* Set up RSS */
1091 	ixl_config_rss(vsi);
1092 
1093 	/* Setup the VSI */
1094 	ixl_setup_vsi(vsi);
1095 
1096 	/*
1097 	** Prepare the rings, hmc contexts, etc...
1098 	*/
1099 	if (ixl_initialize_vsi(vsi)) {
1100 		device_printf(dev, "initialize vsi failed!!\n");
1101 		return;
1102 	}
1103 
1104 	/* Add protocol filters to list */
1105 	ixl_init_filters(vsi);
1106 
1107 	/* Setup vlan's if needed */
1108 	ixl_setup_vlan_filters(vsi);
1109 
1110 	/* Start the local timer */
1111 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1112 
1113 	/* Set up MSI/X routing and the ITR settings */
1114 	if (ixl_enable_msix) {
1115 		ixl_configure_msix(pf);
1116 		ixl_configure_itr(pf);
1117 	} else
1118 		ixl_configure_legacy(pf);
1119 
1120 	ixl_enable_rings(vsi);
1121 
1122 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1123 
1124 	/* Set MTU in hardware*/
1125 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1126 	    TRUE, 0, NULL);
1127 	if (aq_error)
1128 		device_printf(vsi->dev,
1129 			"aq_set_mac_config in init error, code %d\n",
1130 		    aq_error);
1131 
1132 	/* And now turn on interrupts */
1133 	ixl_enable_intr(vsi);
1134 
1135 	/* Now inform the stack we're ready */
1136 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1137 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1138 
1139 	return;
1140 }
1141 
1142 static void
1143 ixl_init(void *arg)
1144 {
1145 	struct ixl_pf *pf = arg;
1146 
1147 	IXL_PF_LOCK(pf);
1148 	ixl_init_locked(pf);
1149 	IXL_PF_UNLOCK(pf);
1150 	return;
1151 }
1152 
1153 /*
1154 **
1155 ** MSIX Interrupt Handlers and Tasklets
1156 **
1157 */
1158 static void
1159 ixl_handle_que(void *context, int pending)
1160 {
1161 	struct ixl_queue *que = context;
1162 	struct ixl_vsi *vsi = que->vsi;
1163 	struct i40e_hw  *hw = vsi->hw;
1164 	struct tx_ring  *txr = &que->txr;
1165 	struct ifnet    *ifp = vsi->ifp;
1166 	bool		more;
1167 
1168 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1169 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1170 		IXL_TX_LOCK(txr);
1171 		ixl_txeof(que);
1172 		if (!drbr_empty(ifp, txr->br))
1173 			ixl_mq_start_locked(ifp, txr);
1174 		IXL_TX_UNLOCK(txr);
1175 		if (more) {
1176 			taskqueue_enqueue(que->tq, &que->task);
1177 			return;
1178 		}
1179 	}
1180 
1181 	/* Reenable this interrupt - hmmm */
1182 	ixl_enable_queue(hw, que->me);
1183 	return;
1184 }
1185 
1186 
1187 /*********************************************************************
1188  *
1189  *  Legacy Interrupt Service routine
1190  *
1191  **********************************************************************/
1192 void
1193 ixl_intr(void *arg)
1194 {
1195 	struct ixl_pf		*pf = arg;
1196 	struct i40e_hw		*hw =  &pf->hw;
1197 	struct ixl_vsi		*vsi = &pf->vsi;
1198 	struct ixl_queue	*que = vsi->queues;
1199 	struct ifnet		*ifp = vsi->ifp;
1200 	struct tx_ring		*txr = &que->txr;
1201         u32			reg, icr0, mask;
1202 	bool			more_tx, more_rx;
1203 
1204 	++que->irqs;
1205 
1206 	/* Protect against spurious interrupts */
1207 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1208 		return;
1209 
1210 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1211 
1212 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1213 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1214 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1215 
1216         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1217 
1218 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1219 		taskqueue_enqueue(pf->tq, &pf->adminq);
1220 		return;
1221 	}
1222 
1223 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1224 
1225 	IXL_TX_LOCK(txr);
1226 	more_tx = ixl_txeof(que);
1227 	if (!drbr_empty(vsi->ifp, txr->br))
1228 		more_tx = 1;
1229 	IXL_TX_UNLOCK(txr);
1230 
1231 	/* re-enable other interrupt causes */
1232 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1233 
1234 	/* And now the queues */
1235 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1236 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1237 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1238 
1239 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1240 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1241 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1242 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1243 
1244 	ixl_enable_legacy(hw);
1245 
1246 	return;
1247 }
1248 
1249 
1250 /*********************************************************************
1251  *
1252  *  MSIX VSI Interrupt Service routine
1253  *
1254  **********************************************************************/
1255 void
1256 ixl_msix_que(void *arg)
1257 {
1258 	struct ixl_queue	*que = arg;
1259 	struct ixl_vsi	*vsi = que->vsi;
1260 	struct i40e_hw	*hw = vsi->hw;
1261 	struct tx_ring	*txr = &que->txr;
1262 	bool		more_tx, more_rx;
1263 
1264 	/* Protect against spurious interrupts */
1265 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1266 		return;
1267 
1268 	++que->irqs;
1269 
1270 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1271 
1272 	IXL_TX_LOCK(txr);
1273 	more_tx = ixl_txeof(que);
1274 	/*
1275 	** Make certain that if the stack
1276 	** has anything queued the task gets
1277 	** scheduled to handle it.
1278 	*/
1279 	if (!drbr_empty(vsi->ifp, txr->br))
1280 		more_tx = 1;
1281 	IXL_TX_UNLOCK(txr);
1282 
1283 	ixl_set_queue_rx_itr(que);
1284 	ixl_set_queue_tx_itr(que);
1285 
1286 	if (more_tx || more_rx)
1287 		taskqueue_enqueue(que->tq, &que->task);
1288 	else
1289 		ixl_enable_queue(hw, que->me);
1290 
1291 	return;
1292 }
1293 
1294 
1295 /*********************************************************************
1296  *
1297  *  MSIX Admin Queue Interrupt Service routine
1298  *
1299  **********************************************************************/
1300 static void
1301 ixl_msix_adminq(void *arg)
1302 {
1303 	struct ixl_pf	*pf = arg;
1304 	struct i40e_hw	*hw = &pf->hw;
1305 	u32		reg, mask;
1306 
1307 	++pf->admin_irq;
1308 
1309 	reg = rd32(hw, I40E_PFINT_ICR0);
1310 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1311 
1312 	/* Check on the cause */
1313 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1314 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1315 
1316 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1317 		ixl_handle_mdd_event(pf);
1318 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1319 	}
1320 
1321 	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1322 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1323 
1324 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1325 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1326 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1327 
1328 	taskqueue_enqueue(pf->tq, &pf->adminq);
1329 	return;
1330 }
1331 
1332 /*********************************************************************
1333  *
1334  *  Media Ioctl callback
1335  *
1336  *  This routine is called whenever the user queries the status of
1337  *  the interface using ifconfig.
1338  *
1339  **********************************************************************/
1340 static void
1341 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1342 {
1343 	struct ixl_vsi	*vsi = ifp->if_softc;
1344 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1345 	struct i40e_hw  *hw = &pf->hw;
1346 
1347 	INIT_DEBUGOUT("ixl_media_status: begin");
1348 	IXL_PF_LOCK(pf);
1349 
1350 	ixl_update_link_status(pf);
1351 
1352 	ifmr->ifm_status = IFM_AVALID;
1353 	ifmr->ifm_active = IFM_ETHER;
1354 
1355 	if (!vsi->link_up) {
1356 		IXL_PF_UNLOCK(pf);
1357 		return;
1358 	}
1359 
1360 	ifmr->ifm_status |= IFM_ACTIVE;
1361 	/* Hardware is always full-duplex */
1362 	ifmr->ifm_active |= IFM_FDX;
1363 
1364 	switch (hw->phy.link_info.phy_type) {
1365 		/* 100 M */
1366 		case I40E_PHY_TYPE_100BASE_TX:
1367 			ifmr->ifm_active |= IFM_100_TX;
1368 			break;
1369 		/* 1 G */
1370 		case I40E_PHY_TYPE_1000BASE_T:
1371 			ifmr->ifm_active |= IFM_1000_T;
1372 			break;
1373 		case I40E_PHY_TYPE_1000BASE_SX:
1374 			ifmr->ifm_active |= IFM_1000_SX;
1375 			break;
1376 		case I40E_PHY_TYPE_1000BASE_LX:
1377 			ifmr->ifm_active |= IFM_1000_LX;
1378 			break;
1379 		/* 10 G */
1380 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1381 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1382 			ifmr->ifm_active |= IFM_10G_TWINAX;
1383 			break;
1384 		case I40E_PHY_TYPE_10GBASE_SR:
1385 			ifmr->ifm_active |= IFM_10G_SR;
1386 			break;
1387 		case I40E_PHY_TYPE_10GBASE_LR:
1388 			ifmr->ifm_active |= IFM_10G_LR;
1389 			break;
1390 		case I40E_PHY_TYPE_10GBASE_T:
1391 			ifmr->ifm_active |= IFM_10G_T;
1392 			break;
1393 		/* 40 G */
1394 		case I40E_PHY_TYPE_40GBASE_CR4:
1395 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1396 			ifmr->ifm_active |= IFM_40G_CR4;
1397 			break;
1398 		case I40E_PHY_TYPE_40GBASE_SR4:
1399 			ifmr->ifm_active |= IFM_40G_SR4;
1400 			break;
1401 		case I40E_PHY_TYPE_40GBASE_LR4:
1402 			ifmr->ifm_active |= IFM_40G_LR4;
1403 			break;
1404 		default:
1405 			ifmr->ifm_active |= IFM_UNKNOWN;
1406 			break;
1407 	}
1408 	/* Report flow control status as well */
1409 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1410 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1411 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1412 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1413 
1414 	IXL_PF_UNLOCK(pf);
1415 
1416 	return;
1417 }
1418 
1419 /*********************************************************************
1420  *
1421  *  Media Ioctl callback
1422  *
1423  *  This routine is called when the user changes speed/duplex using
1424  *  media/mediopt option with ifconfig.
1425  *
1426  **********************************************************************/
1427 static int
1428 ixl_media_change(struct ifnet * ifp)
1429 {
1430 	struct ixl_vsi *vsi = ifp->if_softc;
1431 	struct ifmedia *ifm = &vsi->media;
1432 
1433 	INIT_DEBUGOUT("ixl_media_change: begin");
1434 
1435 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1436 		return (EINVAL);
1437 
1438 	if_printf(ifp, "Media change is currently not supported.\n");
1439 
1440 	return (ENODEV);
1441 }
1442 
1443 
1444 #ifdef IXL_FDIR
1445 /*
1446 ** ATR: Application Targetted Receive - creates a filter
1447 **	based on TX flow info that will keep the receive
1448 **	portion of the flow on the same queue. Based on the
1449 **	implementation this is only available for TCP connections
1450 */
1451 void
1452 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1453 {
1454 	struct ixl_vsi			*vsi = que->vsi;
1455 	struct tx_ring			*txr = &que->txr;
1456 	struct i40e_filter_program_desc	*FDIR;
1457 	u32				ptype, dtype;
1458 	int				idx;
1459 
1460 	/* check if ATR is enabled and sample rate */
1461 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1462 		return;
1463 	/*
1464 	** We sample all TCP SYN/FIN packets,
1465 	** or at the selected sample rate
1466 	*/
1467 	txr->atr_count++;
1468 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1469 	    (txr->atr_count < txr->atr_rate))
1470                 return;
1471 	txr->atr_count = 0;
1472 
1473 	/* Get a descriptor to use */
1474 	idx = txr->next_avail;
1475 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1476 	if (++idx == que->num_desc)
1477 		idx = 0;
1478 	txr->avail--;
1479 	txr->next_avail = idx;
1480 
1481 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1482 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1483 
1484 	ptype |= (etype == ETHERTYPE_IP) ?
1485 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1486 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1487 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1488 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1489 
1490 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1491 
1492 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1493 
1494 	/*
1495 	** We use the TCP TH_FIN as a trigger to remove
1496 	** the filter, otherwise its an update.
1497 	*/
1498 	dtype |= (th->th_flags & TH_FIN) ?
1499 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1500 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1501 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1502 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1503 
1504 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1505 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1506 
1507 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1508 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1509 
1510 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1511 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1512 	return;
1513 }
1514 #endif
1515 
1516 
1517 static void
1518 ixl_set_promisc(struct ixl_vsi *vsi)
1519 {
1520 	struct ifnet	*ifp = vsi->ifp;
1521 	struct i40e_hw	*hw = vsi->hw;
1522 	int		err, mcnt = 0;
1523 	bool		uni = FALSE, multi = FALSE;
1524 
1525 	if (ifp->if_flags & IFF_ALLMULTI)
1526                 multi = TRUE;
1527 	else { /* Need to count the multicast addresses */
1528 		struct  ifmultiaddr *ifma;
1529 		if_maddr_rlock(ifp);
1530 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1531                         if (ifma->ifma_addr->sa_family != AF_LINK)
1532                                 continue;
1533                         if (mcnt == MAX_MULTICAST_ADDR)
1534                                 break;
1535                         mcnt++;
1536 		}
1537 		if_maddr_runlock(ifp);
1538 	}
1539 
1540 	if (mcnt >= MAX_MULTICAST_ADDR)
1541                 multi = TRUE;
1542         if (ifp->if_flags & IFF_PROMISC)
1543 		uni = TRUE;
1544 
1545 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1546 	    vsi->seid, uni, NULL);
1547 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1548 	    vsi->seid, multi, NULL);
1549 	return;
1550 }
1551 
1552 /*********************************************************************
1553  * 	Filter Routines
1554  *
1555  *	Routines for multicast and vlan filter management.
1556  *
1557  *********************************************************************/
1558 static void
1559 ixl_add_multi(struct ixl_vsi *vsi)
1560 {
1561 	struct	ifmultiaddr	*ifma;
1562 	struct ifnet		*ifp = vsi->ifp;
1563 	struct i40e_hw		*hw = vsi->hw;
1564 	int			mcnt = 0, flags;
1565 
1566 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1567 
1568 	if_maddr_rlock(ifp);
1569 	/*
1570 	** First just get a count, to decide if we
1571 	** we simply use multicast promiscuous.
1572 	*/
1573 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1574 		if (ifma->ifma_addr->sa_family != AF_LINK)
1575 			continue;
1576 		mcnt++;
1577 	}
1578 	if_maddr_runlock(ifp);
1579 
1580 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1581 		/* delete existing MC filters */
1582 		ixl_del_hw_filters(vsi, mcnt);
1583 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1584 		    vsi->seid, TRUE, NULL);
1585 		return;
1586 	}
1587 
1588 	mcnt = 0;
1589 	if_maddr_rlock(ifp);
1590 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1591 		if (ifma->ifma_addr->sa_family != AF_LINK)
1592 			continue;
1593 		ixl_add_mc_filter(vsi,
1594 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1595 		mcnt++;
1596 	}
1597 	if_maddr_runlock(ifp);
1598 	if (mcnt > 0) {
1599 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1600 		ixl_add_hw_filters(vsi, flags, mcnt);
1601 	}
1602 
1603 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1604 	return;
1605 }
1606 
1607 static void
1608 ixl_del_multi(struct ixl_vsi *vsi)
1609 {
1610 	struct ifnet		*ifp = vsi->ifp;
1611 	struct ifmultiaddr	*ifma;
1612 	struct ixl_mac_filter	*f;
1613 	int			mcnt = 0;
1614 	bool		match = FALSE;
1615 
1616 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1617 
1618 	/* Search for removed multicast addresses */
1619 	if_maddr_rlock(ifp);
1620 	SLIST_FOREACH(f, &vsi->ftl, next) {
1621 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1622 			match = FALSE;
1623 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1624 				if (ifma->ifma_addr->sa_family != AF_LINK)
1625 					continue;
1626 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1627 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1628 					match = TRUE;
1629 					break;
1630 				}
1631 			}
1632 			if (match == FALSE) {
1633 				f->flags |= IXL_FILTER_DEL;
1634 				mcnt++;
1635 			}
1636 		}
1637 	}
1638 	if_maddr_runlock(ifp);
1639 
1640 	if (mcnt > 0)
1641 		ixl_del_hw_filters(vsi, mcnt);
1642 }
1643 
1644 
1645 /*********************************************************************
1646  *  Timer routine
1647  *
1648  *  This routine checks for link status,updates statistics,
1649  *  and runs the watchdog check.
1650  *
1651  **********************************************************************/
1652 
1653 static void
1654 ixl_local_timer(void *arg)
1655 {
1656 	struct ixl_pf		*pf = arg;
1657 	struct i40e_hw		*hw = &pf->hw;
1658 	struct ixl_vsi		*vsi = &pf->vsi;
1659 	struct ixl_queue	*que = vsi->queues;
1660 	device_t		dev = pf->dev;
1661 	int			hung = 0;
1662 	u32			mask;
1663 
1664 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1665 
1666 	/* Fire off the adminq task */
1667 	taskqueue_enqueue(pf->tq, &pf->adminq);
1668 
1669 	/* Update stats */
1670 	ixl_update_stats_counters(pf);
1671 
1672 	/*
1673 	** Check status of the queues
1674 	*/
1675 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1676 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1677 
1678 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1679 		/* Any queues with outstanding work get a sw irq */
1680 		if (que->busy)
1681 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1682 		/*
1683 		** Each time txeof runs without cleaning, but there
1684 		** are uncleaned descriptors it increments busy. If
1685 		** we get to 5 we declare it hung.
1686 		*/
1687 		if (que->busy == IXL_QUEUE_HUNG) {
1688 			++hung;
1689 			/* Mark the queue as inactive */
1690 			vsi->active_queues &= ~((u64)1 << que->me);
1691 			continue;
1692 		} else {
1693 			/* Check if we've come back from hung */
1694 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1695 				vsi->active_queues |= ((u64)1 << que->me);
1696 		}
1697 		if (que->busy >= IXL_MAX_TX_BUSY) {
1698 			device_printf(dev,"Warning queue %d "
1699 			    "appears to be hung!\n", i);
1700 			que->busy = IXL_QUEUE_HUNG;
1701 			++hung;
1702 		}
1703 	}
1704 	/* Only reinit if all queues show hung */
1705 	if (hung == vsi->num_queues)
1706 		goto hung;
1707 
1708 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1709 	return;
1710 
1711 hung:
1712 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1713 	ixl_init_locked(pf);
1714 }
1715 
1716 /*
1717 ** Note: this routine updates the OS on the link state
1718 **	the real check of the hardware only happens with
1719 **	a link interrupt.
1720 */
1721 static void
1722 ixl_update_link_status(struct ixl_pf *pf)
1723 {
1724 	struct ixl_vsi		*vsi = &pf->vsi;
1725 	struct i40e_hw		*hw = &pf->hw;
1726 	struct ifnet		*ifp = vsi->ifp;
1727 	device_t		dev = pf->dev;
1728 	enum i40e_fc_mode 	fc;
1729 
1730 
1731 	if (vsi->link_up){
1732 		if (vsi->link_active == FALSE) {
1733 			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1734 			if (bootverbose) {
1735 				fc = hw->fc.current_mode;
1736 				device_printf(dev,"Link is up %d Gbps %s,"
1737 				    " Flow Control: %s\n",
1738 				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1739 				    "Full Duplex", ixl_fc_string[fc]);
1740 			}
1741 			vsi->link_active = TRUE;
1742 			if_link_state_change(ifp, LINK_STATE_UP);
1743 		}
1744 	} else { /* Link down */
1745 		if (vsi->link_active == TRUE) {
1746 			if (bootverbose)
1747 				device_printf(dev,"Link is Down\n");
1748 			if_link_state_change(ifp, LINK_STATE_DOWN);
1749 			vsi->link_active = FALSE;
1750 		}
1751 	}
1752 
1753 	return;
1754 }
1755 
1756 /*********************************************************************
1757  *
1758  *  This routine disables all traffic on the adapter by issuing a
1759  *  global reset on the MAC and deallocates TX/RX buffers.
1760  *
1761  **********************************************************************/
1762 
1763 static void
1764 ixl_stop(struct ixl_pf *pf)
1765 {
1766 	struct ixl_vsi	*vsi = &pf->vsi;
1767 	struct ifnet	*ifp = vsi->ifp;
1768 
1769 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1770 
1771 	INIT_DEBUGOUT("ixl_stop: begin\n");
1772 	ixl_disable_intr(vsi);
1773 	ixl_disable_rings(vsi);
1774 
1775 	/* Tell the stack that the interface is no longer active */
1776 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1777 
1778 	/* Stop the local timer */
1779 	callout_stop(&pf->timer);
1780 
1781 	return;
1782 }
1783 
1784 
1785 /*********************************************************************
1786  *
1787  *  Setup MSIX Interrupt resources and handlers for the VSI
1788  *
1789  **********************************************************************/
1790 static int
1791 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1792 {
1793 	device_t        dev = pf->dev;
1794 	struct 		ixl_vsi *vsi = &pf->vsi;
1795 	struct		ixl_queue *que = vsi->queues;
1796 	int 		error, rid = 0;
1797 
1798 	if (pf->msix == 1)
1799 		rid = 1;
1800 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1801 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1802 	if (pf->res == NULL) {
1803 		device_printf(dev,"Unable to allocate"
1804 		    " bus resource: vsi legacy/msi interrupt\n");
1805 		return (ENXIO);
1806 	}
1807 
1808 	/* Set the handler function */
1809 	error = bus_setup_intr(dev, pf->res,
1810 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1811 	    ixl_intr, pf, &pf->tag);
1812 	if (error) {
1813 		pf->res = NULL;
1814 		device_printf(dev, "Failed to register legacy/msi handler");
1815 		return (error);
1816 	}
1817 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1818 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1819 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1820 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1821 	    taskqueue_thread_enqueue, &que->tq);
1822 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1823 	    device_get_nameunit(dev));
1824 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1825 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1826 	    taskqueue_thread_enqueue, &pf->tq);
1827 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1828 	    device_get_nameunit(dev));
1829 
1830 	return (0);
1831 }
1832 
1833 
1834 /*********************************************************************
1835  *
1836  *  Setup MSIX Interrupt resources and handlers for the VSI
1837  *
1838  **********************************************************************/
1839 static int
1840 ixl_assign_vsi_msix(struct ixl_pf *pf)
1841 {
1842 	device_t	dev = pf->dev;
1843 	struct 		ixl_vsi *vsi = &pf->vsi;
1844 	struct 		ixl_queue *que = vsi->queues;
1845 	struct		tx_ring	 *txr;
1846 	int 		error, rid, vector = 0;
1847 
1848 	/* Admin Que is vector 0*/
1849 	rid = vector + 1;
1850 	pf->res = bus_alloc_resource_any(dev,
1851     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1852 	if (!pf->res) {
1853 		device_printf(dev,"Unable to allocate"
1854     	    " bus resource: Adminq interrupt [%d]\n", rid);
1855 		return (ENXIO);
1856 	}
1857 	/* Set the adminq vector and handler */
1858 	error = bus_setup_intr(dev, pf->res,
1859 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1860 	    ixl_msix_adminq, pf, &pf->tag);
1861 	if (error) {
1862 		pf->res = NULL;
1863 		device_printf(dev, "Failed to register Admin que handler");
1864 		return (error);
1865 	}
1866 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1867 	pf->admvec = vector;
1868 	/* Tasklet for Admin Queue */
1869 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1870 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1871 	    taskqueue_thread_enqueue, &pf->tq);
1872 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1873 	    device_get_nameunit(pf->dev));
1874 	++vector;
1875 
1876 	/* Now set up the stations */
1877 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1878 		rid = vector + 1;
1879 		txr = &que->txr;
1880 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1881 		    RF_SHAREABLE | RF_ACTIVE);
1882 		if (que->res == NULL) {
1883 			device_printf(dev,"Unable to allocate"
1884 		    	    " bus resource: que interrupt [%d]\n", vector);
1885 			return (ENXIO);
1886 		}
1887 		/* Set the handler function */
1888 		error = bus_setup_intr(dev, que->res,
1889 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1890 		    ixl_msix_que, que, &que->tag);
1891 		if (error) {
1892 			que->res = NULL;
1893 			device_printf(dev, "Failed to register que handler");
1894 			return (error);
1895 		}
1896 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1897 		/* Bind the vector to a CPU */
1898 		bus_bind_intr(dev, que->res, i);
1899 		que->msix = vector;
1900 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1901 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1902 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1903 		    taskqueue_thread_enqueue, &que->tq);
1904 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1905 		    device_get_nameunit(pf->dev));
1906 	}
1907 
1908 	return (0);
1909 }
1910 
1911 
1912 /*
1913  * Allocate MSI/X vectors
1914  */
1915 static int
1916 ixl_init_msix(struct ixl_pf *pf)
1917 {
1918 	device_t dev = pf->dev;
1919 	int rid, want, vectors, queues, available;
1920 
1921 	/* Override by tuneable */
1922 	if (ixl_enable_msix == 0)
1923 		goto msi;
1924 
1925 	/*
1926 	** When used in a virtualized environment
1927 	** PCI BUSMASTER capability may not be set
1928 	** so explicity set it here and rewrite
1929 	** the ENABLE in the MSIX control register
1930 	** at this point to cause the host to
1931 	** successfully initialize us.
1932 	*/
1933 	{
1934 		u16 pci_cmd_word;
1935 		int msix_ctrl;
1936 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1937 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1938 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1939 		pci_find_cap(dev, PCIY_MSIX, &rid);
1940 		rid += PCIR_MSIX_CTRL;
1941 		msix_ctrl = pci_read_config(dev, rid, 2);
1942 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1943 		pci_write_config(dev, rid, msix_ctrl, 2);
1944 	}
1945 
1946 	/* First try MSI/X */
1947 	rid = PCIR_BAR(IXL_BAR);
1948 	pf->msix_mem = bus_alloc_resource_any(dev,
1949 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1950        	if (!pf->msix_mem) {
1951 		/* May not be enabled */
1952 		device_printf(pf->dev,
1953 		    "Unable to map MSIX table \n");
1954 		goto msi;
1955 	}
1956 
1957 	available = pci_msix_count(dev);
1958 	if (available == 0) { /* system has msix disabled */
1959 		bus_release_resource(dev, SYS_RES_MEMORY,
1960 		    rid, pf->msix_mem);
1961 		pf->msix_mem = NULL;
1962 		goto msi;
1963 	}
1964 
1965 	/* Figure out a reasonable auto config value */
1966 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1967 
1968 	/* Override with hardcoded value if sane */
1969 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1970 		queues = ixl_max_queues;
1971 
1972 	/*
1973 	** Want one vector (RX/TX pair) per queue
1974 	** plus an additional for the admin queue.
1975 	*/
1976 	want = queues + 1;
1977 	if (want <= available)	/* Have enough */
1978 		vectors = want;
1979 	else {
1980                	device_printf(pf->dev,
1981 		    "MSIX Configuration Problem, "
1982 		    "%d vectors available but %d wanted!\n",
1983 		    available, want);
1984 		return (0); /* Will go to Legacy setup */
1985 	}
1986 
1987 	if (pci_alloc_msix(dev, &vectors) == 0) {
1988                	device_printf(pf->dev,
1989 		    "Using MSIX interrupts with %d vectors\n", vectors);
1990 		pf->msix = vectors;
1991 		pf->vsi.num_queues = queues;
1992 		return (vectors);
1993 	}
1994 msi:
1995        	vectors = pci_msi_count(dev);
1996 	pf->vsi.num_queues = 1;
1997 	pf->msix = 1;
1998 	ixl_max_queues = 1;
1999 	ixl_enable_msix = 0;
2000        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2001                	device_printf(pf->dev,"Using an MSI interrupt\n");
2002 	else {
2003 		pf->msix = 0;
2004                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2005 	}
2006 	return (vectors);
2007 }
2008 
2009 
2010 /*
2011  * Plumb MSI/X vectors
2012  */
2013 static void
2014 ixl_configure_msix(struct ixl_pf *pf)
2015 {
2016 	struct i40e_hw	*hw = &pf->hw;
2017 	struct ixl_vsi *vsi = &pf->vsi;
2018 	u32		reg;
2019 	u16		vector = 1;
2020 
2021 	/* First set up the adminq - vector 0 */
2022 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2023 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2024 
2025 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2026 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2027 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2028 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2029 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2030 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2031 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2032 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2033 
2034 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2035 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2036 
2037 	wr32(hw, I40E_PFINT_DYN_CTL0,
2038 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2039 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2040 
2041 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2042 
2043 	/* Next configure the queues */
2044 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2045 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2046 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2047 
2048 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2049 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2050 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2051 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2052 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2053 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2054 
2055 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2056 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2057 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2058 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2059 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2060 		if (i == (vsi->num_queues - 1))
2061 			reg |= (IXL_QUEUE_EOL
2062 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2063 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2064 	}
2065 }
2066 
2067 /*
2068  * Configure for MSI single vector operation
2069  */
2070 static void
2071 ixl_configure_legacy(struct ixl_pf *pf)
2072 {
2073 	struct i40e_hw	*hw = &pf->hw;
2074 	u32		reg;
2075 
2076 
2077 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2078 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2079 
2080 
2081 	/* Setup "other" causes */
2082 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2083 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2084 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2085 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2086 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2087 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2088 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2089 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2090 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2091 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2092 	    ;
2093 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2094 
2095 	/* SW_ITR_IDX = 0, but don't change INTENA */
2096 	wr32(hw, I40E_PFINT_DYN_CTL0,
2097 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2098 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2099 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2100 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2101 
2102 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2103 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2104 
2105 	/* Associate the queue pair to the vector and enable the q int */
2106 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2107 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2108 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2109 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2110 
2111 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2112 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2113 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2114 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2115 
2116 	/* Next enable the queue pair */
2117 	reg = rd32(hw, I40E_QTX_ENA(0));
2118 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2119 	wr32(hw, I40E_QTX_ENA(0), reg);
2120 
2121 	reg = rd32(hw, I40E_QRX_ENA(0));
2122 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2123 	wr32(hw, I40E_QRX_ENA(0), reg);
2124 }
2125 
2126 
2127 /*
2128  * Set the Initial ITR state
2129  */
2130 static void
2131 ixl_configure_itr(struct ixl_pf *pf)
2132 {
2133 	struct i40e_hw		*hw = &pf->hw;
2134 	struct ixl_vsi		*vsi = &pf->vsi;
2135 	struct ixl_queue	*que = vsi->queues;
2136 
2137 	vsi->rx_itr_setting = ixl_rx_itr;
2138 	if (ixl_dynamic_rx_itr)
2139 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2140 	vsi->tx_itr_setting = ixl_tx_itr;
2141 	if (ixl_dynamic_tx_itr)
2142 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2143 
2144 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2145 		struct tx_ring	*txr = &que->txr;
2146 		struct rx_ring 	*rxr = &que->rxr;
2147 
2148 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2149 		    vsi->rx_itr_setting);
2150 		rxr->itr = vsi->rx_itr_setting;
2151 		rxr->latency = IXL_AVE_LATENCY;
2152 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2153 		    vsi->tx_itr_setting);
2154 		txr->itr = vsi->tx_itr_setting;
2155 		txr->latency = IXL_AVE_LATENCY;
2156 	}
2157 }
2158 
2159 
2160 static int
2161 ixl_allocate_pci_resources(struct ixl_pf *pf)
2162 {
2163 	int             rid;
2164 	device_t        dev = pf->dev;
2165 
2166 	rid = PCIR_BAR(0);
2167 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2168 	    &rid, RF_ACTIVE);
2169 
2170 	if (!(pf->pci_mem)) {
2171 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2172 		return (ENXIO);
2173 	}
2174 
2175 	pf->osdep.mem_bus_space_tag =
2176 		rman_get_bustag(pf->pci_mem);
2177 	pf->osdep.mem_bus_space_handle =
2178 		rman_get_bushandle(pf->pci_mem);
2179 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2180 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2181 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2182 
2183 	pf->hw.back = &pf->osdep;
2184 
2185 	/*
2186 	** Now setup MSI or MSI/X, should
2187 	** return us the number of supported
2188 	** vectors. (Will be 1 for MSI)
2189 	*/
2190 	pf->msix = ixl_init_msix(pf);
2191 	return (0);
2192 }
2193 
2194 static void
2195 ixl_free_pci_resources(struct ixl_pf * pf)
2196 {
2197 	struct ixl_vsi		*vsi = &pf->vsi;
2198 	struct ixl_queue	*que = vsi->queues;
2199 	device_t		dev = pf->dev;
2200 	int			rid, memrid;
2201 
2202 	memrid = PCIR_BAR(IXL_BAR);
2203 
2204 	/* We may get here before stations are setup */
2205 	if ((!ixl_enable_msix) || (que == NULL))
2206 		goto early;
2207 
2208 	/*
2209 	**  Release all msix VSI resources:
2210 	*/
2211 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2212 		rid = que->msix + 1;
2213 		if (que->tag != NULL) {
2214 			bus_teardown_intr(dev, que->res, que->tag);
2215 			que->tag = NULL;
2216 		}
2217 		if (que->res != NULL)
2218 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2219 	}
2220 
2221 early:
2222 	/* Clean the AdminQ interrupt last */
2223 	if (pf->admvec) /* we are doing MSIX */
2224 		rid = pf->admvec + 1;
2225 	else
2226 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2227 
2228 	if (pf->tag != NULL) {
2229 		bus_teardown_intr(dev, pf->res, pf->tag);
2230 		pf->tag = NULL;
2231 	}
2232 	if (pf->res != NULL)
2233 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2234 
2235 	if (pf->msix)
2236 		pci_release_msi(dev);
2237 
2238 	if (pf->msix_mem != NULL)
2239 		bus_release_resource(dev, SYS_RES_MEMORY,
2240 		    memrid, pf->msix_mem);
2241 
2242 	if (pf->pci_mem != NULL)
2243 		bus_release_resource(dev, SYS_RES_MEMORY,
2244 		    PCIR_BAR(0), pf->pci_mem);
2245 
2246 	return;
2247 }
2248 
2249 
2250 /*********************************************************************
2251  *
2252  *  Setup networking device structure and register an interface.
2253  *
2254  **********************************************************************/
2255 static int
2256 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2257 {
2258 	struct ifnet		*ifp;
2259 	struct i40e_hw		*hw = vsi->hw;
2260 	struct ixl_queue	*que = vsi->queues;
2261 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2262 	enum i40e_status_code aq_error = 0;
2263 
2264 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2265 
2266 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2267 	if (ifp == NULL) {
2268 		device_printf(dev, "can not allocate ifnet structure\n");
2269 		return (-1);
2270 	}
2271 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2272 	ifp->if_mtu = ETHERMTU;
2273 	ifp->if_baudrate = 4000000000;  // ??
2274 	ifp->if_init = ixl_init;
2275 	ifp->if_softc = vsi;
2276 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2277 	ifp->if_ioctl = ixl_ioctl;
2278 
2279 #if __FreeBSD_version >= 1100000
2280 	if_setgetcounterfn(ifp, ixl_get_counter);
2281 #endif
2282 
2283 	ifp->if_transmit = ixl_mq_start;
2284 
2285 	ifp->if_qflush = ixl_qflush;
2286 
2287 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2288 
2289 	ether_ifattach(ifp, hw->mac.addr);
2290 
2291 	vsi->max_frame_size =
2292 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2293 	    + ETHER_VLAN_ENCAP_LEN;
2294 
2295 	/*
2296 	 * Tell the upper layer(s) we support long frames.
2297 	 */
2298 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2299 
2300 	ifp->if_capabilities |= IFCAP_HWCSUM;
2301 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2302 	ifp->if_capabilities |= IFCAP_TSO;
2303 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2304 	ifp->if_capabilities |= IFCAP_LRO;
2305 
2306 	/* VLAN capabilties */
2307 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2308 			     |  IFCAP_VLAN_HWTSO
2309 			     |  IFCAP_VLAN_MTU
2310 			     |  IFCAP_VLAN_HWCSUM;
2311 	ifp->if_capenable = ifp->if_capabilities;
2312 
2313 	/*
2314 	** Don't turn this on by default, if vlans are
2315 	** created on another pseudo device (eg. lagg)
2316 	** then vlan events are not passed thru, breaking
2317 	** operation, but with HW FILTER off it works. If
2318 	** using vlans directly on the ixl driver you can
2319 	** enable this and get full hardware tag filtering.
2320 	*/
2321 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2322 
2323 	/*
2324 	 * Specify the media types supported by this adapter and register
2325 	 * callbacks to update media and link information
2326 	 */
2327 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2328 		     ixl_media_status);
2329 
2330 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2331 	if (aq_error) {
2332 		printf("Error getting supported media types, AQ error %d\n", aq_error);
2333 		return (EPERM);
2334 	}
2335 
2336 	/* Display supported media types */
2337 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2338 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2339 
2340 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2341 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2342 
2343 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2344 	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2345 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2346 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2347 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2348 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2349 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2350 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2351 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2352 
2353 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2354 	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2355 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2356 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2357 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2358 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2359 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2360 
2361 	/* Use autoselect media by default */
2362 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2363 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2364 
2365 	return (0);
2366 }
2367 
2368 static bool
2369 ixl_config_link(struct i40e_hw *hw)
2370 {
2371 	bool check;
2372 
2373 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2374 	check = i40e_get_link_status(hw);
2375 #ifdef IXL_DEBUG
2376 	printf("Link is %s\n", check ? "up":"down");
2377 #endif
2378 	return (check);
2379 }
2380 
2381 /*********************************************************************
2382  *
2383  *  Initialize this VSI
2384  *
2385  **********************************************************************/
2386 static int
2387 ixl_setup_vsi(struct ixl_vsi *vsi)
2388 {
2389 	struct i40e_hw	*hw = vsi->hw;
2390 	device_t 	dev = vsi->dev;
2391 	struct i40e_aqc_get_switch_config_resp *sw_config;
2392 	struct i40e_vsi_context	ctxt;
2393 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2394 	int	ret = I40E_SUCCESS;
2395 	u16	next = 0;
2396 
2397 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2398 	ret = i40e_aq_get_switch_config(hw, sw_config,
2399 	    sizeof(aq_buf), &next, NULL);
2400 	if (ret) {
2401 		device_printf(dev,"aq_get_switch_config failed!!\n");
2402 		return (ret);
2403 	}
2404 #ifdef IXL_DEBUG
2405 	printf("Switch config: header reported: %d in structure, %d total\n",
2406     	    sw_config->header.num_reported, sw_config->header.num_total);
2407 	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2408 	    sw_config->element[0].element_type,
2409 	    sw_config->element[0].seid,
2410 	    sw_config->element[0].uplink_seid,
2411 	    sw_config->element[0].downlink_seid);
2412 #endif
2413 	/* Save off this important value */
2414 	vsi->seid = sw_config->element[0].seid;
2415 
2416 	memset(&ctxt, 0, sizeof(ctxt));
2417 	ctxt.seid = vsi->seid;
2418 	ctxt.pf_num = hw->pf_id;
2419 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2420 	if (ret) {
2421 		device_printf(dev,"get vsi params failed %x!!\n", ret);
2422 		return (ret);
2423 	}
2424 #ifdef IXL_DEBUG
2425 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2426 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2427 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2428 	    ctxt.uplink_seid, ctxt.vsi_number,
2429 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2430 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2431 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2432 #endif
2433 	/*
2434 	** Set the queue and traffic class bits
2435 	**  - when multiple traffic classes are supported
2436 	**    this will need to be more robust.
2437 	*/
2438 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2439 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2440 	ctxt.info.queue_mapping[0] = 0;
2441 	ctxt.info.tc_mapping[0] = 0x0800;
2442 
2443 	/* Set VLAN receive stripping mode */
2444 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2445 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2446 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2447 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2448 	else
2449 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2450 
2451 	/* Keep copy of VSI info in VSI for statistic counters */
2452 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2453 
2454 	/* Reset VSI statistics */
2455 	ixl_vsi_reset_stats(vsi);
2456 	vsi->hw_filters_add = 0;
2457 	vsi->hw_filters_del = 0;
2458 
2459 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2460 	if (ret)
2461 		device_printf(dev,"update vsi params failed %x!!\n",
2462 		   hw->aq.asq_last_status);
2463 	return (ret);
2464 }
2465 
2466 
2467 /*********************************************************************
2468  *
2469  *  Initialize the VSI:  this handles contexts, which means things
2470  *  			 like the number of descriptors, buffer size,
2471  *			 plus we init the rings thru this function.
2472  *
2473  **********************************************************************/
2474 static int
2475 ixl_initialize_vsi(struct ixl_vsi *vsi)
2476 {
2477 	struct ixl_queue	*que = vsi->queues;
2478 	device_t		dev = vsi->dev;
2479 	struct i40e_hw		*hw = vsi->hw;
2480 	int			err = 0;
2481 
2482 
2483 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2484 		struct tx_ring		*txr = &que->txr;
2485 		struct rx_ring 		*rxr = &que->rxr;
2486 		struct i40e_hmc_obj_txq tctx;
2487 		struct i40e_hmc_obj_rxq rctx;
2488 		u32			txctl;
2489 		u16			size;
2490 
2491 
2492 		/* Setup the HMC TX Context  */
2493 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2494 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2495 		tctx.new_context = 1;
2496 		tctx.base = (txr->dma.pa/128);
2497 		tctx.qlen = que->num_desc;
2498 		tctx.fc_ena = 0;
2499 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2500 		/* Enable HEAD writeback */
2501 		tctx.head_wb_ena = 1;
2502 		tctx.head_wb_addr = txr->dma.pa +
2503 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2504 		tctx.rdylist_act = 0;
2505 		err = i40e_clear_lan_tx_queue_context(hw, i);
2506 		if (err) {
2507 			device_printf(dev, "Unable to clear TX context\n");
2508 			break;
2509 		}
2510 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2511 		if (err) {
2512 			device_printf(dev, "Unable to set TX context\n");
2513 			break;
2514 		}
2515 		/* Associate the ring with this PF */
2516 		txctl = I40E_QTX_CTL_PF_QUEUE;
2517 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2518 		    I40E_QTX_CTL_PF_INDX_MASK);
2519 		wr32(hw, I40E_QTX_CTL(i), txctl);
2520 		ixl_flush(hw);
2521 
2522 		/* Do ring (re)init */
2523 		ixl_init_tx_ring(que);
2524 
2525 		/* Next setup the HMC RX Context  */
2526 		if (vsi->max_frame_size <= 2048)
2527 			rxr->mbuf_sz = MCLBYTES;
2528 		else
2529 			rxr->mbuf_sz = MJUMPAGESIZE;
2530 
2531 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2532 
2533 		/* Set up an RX context for the HMC */
2534 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2535 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2536 		/* ignore header split for now */
2537 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2538 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2539 		    vsi->max_frame_size : max_rxmax;
2540 		rctx.dtype = 0;
2541 		rctx.dsize = 1;	/* do 32byte descriptors */
2542 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2543 		rctx.base = (rxr->dma.pa/128);
2544 		rctx.qlen = que->num_desc;
2545 		rctx.tphrdesc_ena = 1;
2546 		rctx.tphwdesc_ena = 1;
2547 		rctx.tphdata_ena = 0;
2548 		rctx.tphhead_ena = 0;
2549 		rctx.lrxqthresh = 2;
2550 		rctx.crcstrip = 1;
2551 		rctx.l2tsel = 1;
2552 		rctx.showiv = 1;
2553 		rctx.fc_ena = 0;
2554 		rctx.prefena = 1;
2555 
2556 		err = i40e_clear_lan_rx_queue_context(hw, i);
2557 		if (err) {
2558 			device_printf(dev,
2559 			    "Unable to clear RX context %d\n", i);
2560 			break;
2561 		}
2562 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2563 		if (err) {
2564 			device_printf(dev, "Unable to set RX context %d\n", i);
2565 			break;
2566 		}
2567 		err = ixl_init_rx_ring(que);
2568 		if (err) {
2569 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2570 			break;
2571 		}
2572 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2573 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2574 	}
2575 	return (err);
2576 }
2577 
2578 
2579 /*********************************************************************
2580  *
2581  *  Free all VSI structs.
2582  *
2583  **********************************************************************/
2584 void
2585 ixl_free_vsi(struct ixl_vsi *vsi)
2586 {
2587 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2588 	struct ixl_queue	*que = vsi->queues;
2589 	struct ixl_mac_filter *f;
2590 
2591 	/* Free station queues */
2592 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2593 		struct tx_ring *txr = &que->txr;
2594 		struct rx_ring *rxr = &que->rxr;
2595 
2596 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2597 			continue;
2598 		IXL_TX_LOCK(txr);
2599 		ixl_free_que_tx(que);
2600 		if (txr->base)
2601 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2602 		IXL_TX_UNLOCK(txr);
2603 		IXL_TX_LOCK_DESTROY(txr);
2604 
2605 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2606 			continue;
2607 		IXL_RX_LOCK(rxr);
2608 		ixl_free_que_rx(que);
2609 		if (rxr->base)
2610 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2611 		IXL_RX_UNLOCK(rxr);
2612 		IXL_RX_LOCK_DESTROY(rxr);
2613 
2614 	}
2615 	free(vsi->queues, M_DEVBUF);
2616 
2617 	/* Free VSI filter list */
2618 	while (!SLIST_EMPTY(&vsi->ftl)) {
2619 		f = SLIST_FIRST(&vsi->ftl);
2620 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2621 		free(f, M_DEVBUF);
2622 	}
2623 }
2624 
2625 
2626 /*********************************************************************
2627  *
2628  *  Allocate memory for the VSI (virtual station interface) and their
2629  *  associated queues, rings and the descriptors associated with each,
2630  *  called only once at attach.
2631  *
2632  **********************************************************************/
2633 static int
2634 ixl_setup_stations(struct ixl_pf *pf)
2635 {
2636 	device_t		dev = pf->dev;
2637 	struct ixl_vsi		*vsi;
2638 	struct ixl_queue	*que;
2639 	struct tx_ring		*txr;
2640 	struct rx_ring		*rxr;
2641 	int 			rsize, tsize;
2642 	int			error = I40E_SUCCESS;
2643 
2644 	vsi = &pf->vsi;
2645 	vsi->back = (void *)pf;
2646 	vsi->hw = &pf->hw;
2647 	vsi->id = 0;
2648 	vsi->num_vlans = 0;
2649 
2650 	/* Get memory for the station queues */
2651         if (!(vsi->queues =
2652             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2653             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2654                 device_printf(dev, "Unable to allocate queue memory\n");
2655                 error = ENOMEM;
2656                 goto early;
2657         }
2658 
2659 	for (int i = 0; i < vsi->num_queues; i++) {
2660 		que = &vsi->queues[i];
2661 		que->num_desc = ixl_ringsz;
2662 		que->me = i;
2663 		que->vsi = vsi;
2664 		/* mark the queue as active */
2665 		vsi->active_queues |= (u64)1 << que->me;
2666 		txr = &que->txr;
2667 		txr->que = que;
2668 		txr->tail = I40E_QTX_TAIL(que->me);
2669 
2670 		/* Initialize the TX lock */
2671 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2672 		    device_get_nameunit(dev), que->me);
2673 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2674 		/* Create the TX descriptor ring */
2675 		tsize = roundup2((que->num_desc *
2676 		    sizeof(struct i40e_tx_desc)) +
2677 		    sizeof(u32), DBA_ALIGN);
2678 		if (i40e_allocate_dma_mem(&pf->hw,
2679 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2680 			device_printf(dev,
2681 			    "Unable to allocate TX Descriptor memory\n");
2682 			error = ENOMEM;
2683 			goto fail;
2684 		}
2685 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2686 		bzero((void *)txr->base, tsize);
2687        		/* Now allocate transmit soft structs for the ring */
2688        		if (ixl_allocate_tx_data(que)) {
2689 			device_printf(dev,
2690 			    "Critical Failure setting up TX structures\n");
2691 			error = ENOMEM;
2692 			goto fail;
2693        		}
2694 		/* Allocate a buf ring */
2695 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2696 		    M_WAITOK, &txr->mtx);
2697 		if (txr->br == NULL) {
2698 			device_printf(dev,
2699 			    "Critical Failure setting up TX buf ring\n");
2700 			error = ENOMEM;
2701 			goto fail;
2702        		}
2703 
2704 		/*
2705 		 * Next the RX queues...
2706 		 */
2707 		rsize = roundup2(que->num_desc *
2708 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2709 		rxr = &que->rxr;
2710 		rxr->que = que;
2711 		rxr->tail = I40E_QRX_TAIL(que->me);
2712 
2713 		/* Initialize the RX side lock */
2714 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2715 		    device_get_nameunit(dev), que->me);
2716 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2717 
2718 		if (i40e_allocate_dma_mem(&pf->hw,
2719 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2720 			device_printf(dev,
2721 			    "Unable to allocate RX Descriptor memory\n");
2722 			error = ENOMEM;
2723 			goto fail;
2724 		}
2725 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2726 		bzero((void *)rxr->base, rsize);
2727 
2728         	/* Allocate receive soft structs for the ring*/
2729 		if (ixl_allocate_rx_data(que)) {
2730 			device_printf(dev,
2731 			    "Critical Failure setting up receive structs\n");
2732 			error = ENOMEM;
2733 			goto fail;
2734 		}
2735 	}
2736 
2737 	return (0);
2738 
2739 fail:
2740 	for (int i = 0; i < vsi->num_queues; i++) {
2741 		que = &vsi->queues[i];
2742 		rxr = &que->rxr;
2743 		txr = &que->txr;
2744 		if (rxr->base)
2745 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2746 		if (txr->base)
2747 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2748 	}
2749 
2750 early:
2751 	return (error);
2752 }
2753 
2754 /*
2755 ** Provide a update to the queue RX
2756 ** interrupt moderation value.
2757 */
2758 static void
2759 ixl_set_queue_rx_itr(struct ixl_queue *que)
2760 {
2761 	struct ixl_vsi	*vsi = que->vsi;
2762 	struct i40e_hw	*hw = vsi->hw;
2763 	struct rx_ring	*rxr = &que->rxr;
2764 	u16		rx_itr;
2765 	u16		rx_latency = 0;
2766 	int		rx_bytes;
2767 
2768 
2769 	/* Idle, do nothing */
2770 	if (rxr->bytes == 0)
2771 		return;
2772 
2773 	if (ixl_dynamic_rx_itr) {
2774 		rx_bytes = rxr->bytes/rxr->itr;
2775 		rx_itr = rxr->itr;
2776 
2777 		/* Adjust latency range */
2778 		switch (rxr->latency) {
2779 		case IXL_LOW_LATENCY:
2780 			if (rx_bytes > 10) {
2781 				rx_latency = IXL_AVE_LATENCY;
2782 				rx_itr = IXL_ITR_20K;
2783 			}
2784 			break;
2785 		case IXL_AVE_LATENCY:
2786 			if (rx_bytes > 20) {
2787 				rx_latency = IXL_BULK_LATENCY;
2788 				rx_itr = IXL_ITR_8K;
2789 			} else if (rx_bytes <= 10) {
2790 				rx_latency = IXL_LOW_LATENCY;
2791 				rx_itr = IXL_ITR_100K;
2792 			}
2793 			break;
2794 		case IXL_BULK_LATENCY:
2795 			if (rx_bytes <= 20) {
2796 				rx_latency = IXL_AVE_LATENCY;
2797 				rx_itr = IXL_ITR_20K;
2798 			}
2799 			break;
2800        		 }
2801 
2802 		rxr->latency = rx_latency;
2803 
2804 		if (rx_itr != rxr->itr) {
2805 			/* do an exponential smoothing */
2806 			rx_itr = (10 * rx_itr * rxr->itr) /
2807 			    ((9 * rx_itr) + rxr->itr);
2808 			rxr->itr = rx_itr & IXL_MAX_ITR;
2809 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2810 			    que->me), rxr->itr);
2811 		}
2812 	} else { /* We may have have toggled to non-dynamic */
2813 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2814 			vsi->rx_itr_setting = ixl_rx_itr;
2815 		/* Update the hardware if needed */
2816 		if (rxr->itr != vsi->rx_itr_setting) {
2817 			rxr->itr = vsi->rx_itr_setting;
2818 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2819 			    que->me), rxr->itr);
2820 		}
2821 	}
2822 	rxr->bytes = 0;
2823 	rxr->packets = 0;
2824 	return;
2825 }
2826 
2827 
2828 /*
2829 ** Provide a update to the queue TX
2830 ** interrupt moderation value.
2831 */
2832 static void
2833 ixl_set_queue_tx_itr(struct ixl_queue *que)
2834 {
2835 	struct ixl_vsi	*vsi = que->vsi;
2836 	struct i40e_hw	*hw = vsi->hw;
2837 	struct tx_ring	*txr = &que->txr;
2838 	u16		tx_itr;
2839 	u16		tx_latency = 0;
2840 	int		tx_bytes;
2841 
2842 
2843 	/* Idle, do nothing */
2844 	if (txr->bytes == 0)
2845 		return;
2846 
2847 	if (ixl_dynamic_tx_itr) {
2848 		tx_bytes = txr->bytes/txr->itr;
2849 		tx_itr = txr->itr;
2850 
2851 		switch (txr->latency) {
2852 		case IXL_LOW_LATENCY:
2853 			if (tx_bytes > 10) {
2854 				tx_latency = IXL_AVE_LATENCY;
2855 				tx_itr = IXL_ITR_20K;
2856 			}
2857 			break;
2858 		case IXL_AVE_LATENCY:
2859 			if (tx_bytes > 20) {
2860 				tx_latency = IXL_BULK_LATENCY;
2861 				tx_itr = IXL_ITR_8K;
2862 			} else if (tx_bytes <= 10) {
2863 				tx_latency = IXL_LOW_LATENCY;
2864 				tx_itr = IXL_ITR_100K;
2865 			}
2866 			break;
2867 		case IXL_BULK_LATENCY:
2868 			if (tx_bytes <= 20) {
2869 				tx_latency = IXL_AVE_LATENCY;
2870 				tx_itr = IXL_ITR_20K;
2871 			}
2872 			break;
2873 		}
2874 
2875 		txr->latency = tx_latency;
2876 
2877 		if (tx_itr != txr->itr) {
2878        	         /* do an exponential smoothing */
2879 			tx_itr = (10 * tx_itr * txr->itr) /
2880 			    ((9 * tx_itr) + txr->itr);
2881 			txr->itr = tx_itr & IXL_MAX_ITR;
2882 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2883 			    que->me), txr->itr);
2884 		}
2885 
2886 	} else { /* We may have have toggled to non-dynamic */
2887 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2888 			vsi->tx_itr_setting = ixl_tx_itr;
2889 		/* Update the hardware if needed */
2890 		if (txr->itr != vsi->tx_itr_setting) {
2891 			txr->itr = vsi->tx_itr_setting;
2892 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2893 			    que->me), txr->itr);
2894 		}
2895 	}
2896 	txr->bytes = 0;
2897 	txr->packets = 0;
2898 	return;
2899 }
2900 
2901 
2902 static void
2903 ixl_add_hw_stats(struct ixl_pf *pf)
2904 {
2905 	device_t dev = pf->dev;
2906 	struct ixl_vsi *vsi = &pf->vsi;
2907 	struct ixl_queue *queues = vsi->queues;
2908 	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2909 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2910 
2911 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2912 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2913 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2914 
2915 	struct sysctl_oid *vsi_node, *queue_node;
2916 	struct sysctl_oid_list *vsi_list, *queue_list;
2917 
2918 	struct tx_ring *txr;
2919 	struct rx_ring *rxr;
2920 
2921 	/* Driver statistics */
2922 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2923 			CTLFLAG_RD, &pf->watchdog_events,
2924 			"Watchdog timeouts");
2925 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2926 			CTLFLAG_RD, &pf->admin_irq,
2927 			"Admin Queue IRQ Handled");
2928 
2929 	/* VSI statistics */
2930 #define QUEUE_NAME_LEN 32
2931 	char queue_namebuf[QUEUE_NAME_LEN];
2932 
2933 	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2934 	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2935 	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2936 				   CTLFLAG_RD, NULL, "VSI-specific stats");
2937 	vsi_list = SYSCTL_CHILDREN(vsi_node);
2938 
2939 	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2940 
2941 	/* Queue statistics */
2942 	for (int q = 0; q < vsi->num_queues; q++) {
2943 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2944 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2945 					     CTLFLAG_RD, NULL, "Queue #");
2946 		queue_list = SYSCTL_CHILDREN(queue_node);
2947 
2948 		txr = &(queues[q].txr);
2949 		rxr = &(queues[q].rxr);
2950 
2951 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2952 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2953 				"m_defrag() failed");
2954 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2955 				CTLFLAG_RD, &(queues[q].dropped_pkts),
2956 				"Driver dropped packets");
2957 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2958 				CTLFLAG_RD, &(queues[q].irqs),
2959 				"irqs on this queue");
2960 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2961 				CTLFLAG_RD, &(queues[q].tso),
2962 				"TSO");
2963 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2964 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2965 				"Driver tx dma failure in xmit");
2966 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2967 				CTLFLAG_RD, &(txr->no_desc),
2968 				"Queue No Descriptor Available");
2969 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2970 				CTLFLAG_RD, &(txr->total_packets),
2971 				"Queue Packets Transmitted");
2972 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2973 				CTLFLAG_RD, &(txr->tx_bytes),
2974 				"Queue Bytes Transmitted");
2975 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2976 				CTLFLAG_RD, &(rxr->rx_packets),
2977 				"Queue Packets Received");
2978 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2979 				CTLFLAG_RD, &(rxr->rx_bytes),
2980 				"Queue Bytes Received");
2981 	}
2982 
2983 	/* MAC stats */
2984 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2985 }
2986 
2987 static void
2988 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2989 	struct sysctl_oid_list *child,
2990 	struct i40e_eth_stats *eth_stats)
2991 {
2992 	struct ixl_sysctl_info ctls[] =
2993 	{
2994 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2995 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2996 			"Unicast Packets Received"},
2997 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2998 			"Multicast Packets Received"},
2999 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3000 			"Broadcast Packets Received"},
3001 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3002 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3003 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3004 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3005 			"Multicast Packets Transmitted"},
3006 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3007 			"Broadcast Packets Transmitted"},
3008 		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3009 		// end
3010 		{0,0,0}
3011 	};
3012 
3013 	struct ixl_sysctl_info *entry = ctls;
3014 	while (entry->stat != 0)
3015 	{
3016 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3017 				CTLFLAG_RD, entry->stat,
3018 				entry->description);
3019 		entry++;
3020 	}
3021 }
3022 
3023 static void
3024 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3025 	struct sysctl_oid_list *child,
3026 	struct i40e_hw_port_stats *stats)
3027 {
3028 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3029 				    CTLFLAG_RD, NULL, "Mac Statistics");
3030 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3031 
3032 	struct i40e_eth_stats *eth_stats = &stats->eth;
3033 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3034 
3035 	struct ixl_sysctl_info ctls[] =
3036 	{
3037 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3038 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3039 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3040 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3041 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3042 		/* Packet Reception Stats */
3043 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3044 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3045 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3046 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3047 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3048 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3049 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3050 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3051 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3052 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3053 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3054 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3055 		/* Packet Transmission Stats */
3056 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3057 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3058 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3059 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3060 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3061 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3062 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3063 		/* Flow control */
3064 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3065 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3066 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3067 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3068 		/* End */
3069 		{0,0,0}
3070 	};
3071 
3072 	struct ixl_sysctl_info *entry = ctls;
3073 	while (entry->stat != 0)
3074 	{
3075 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3076 				CTLFLAG_RD, entry->stat,
3077 				entry->description);
3078 		entry++;
3079 	}
3080 }
3081 
3082 /*
3083 ** ixl_config_rss - setup RSS
3084 **  - note this is done for the single vsi
3085 */
3086 static void ixl_config_rss(struct ixl_vsi *vsi)
3087 {
3088 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3089 	struct i40e_hw	*hw = vsi->hw;
3090 	u32		lut = 0;
3091 	u64		set_hena, hena;
3092 	int		i, j;
3093 
3094 	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3095 	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3096 	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3097 	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3098 
3099 	/* Fill out hash function seed */
3100 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3101                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3102 
3103 	/* Enable PCTYPES for RSS: */
3104 	set_hena =
3105 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3106 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3107 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3108 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3109 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3110 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3111 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3112 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3113 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3114 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3115 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3116 
3117 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3118 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3119 	hena |= set_hena;
3120 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3121 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3122 
3123 	/* Populate the LUT with max no. of queues in round robin fashion */
3124 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3125 		if (j == vsi->num_queues)
3126 			j = 0;
3127 		/* lut = 4-byte sliding window of 4 lut entries */
3128 		lut = (lut << 8) | (j &
3129 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3130 		/* On i = 3, we have 4 entries in lut; write to the register */
3131 		if ((i & 3) == 3)
3132 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3133 	}
3134 	ixl_flush(hw);
3135 }
3136 
3137 
3138 /*
3139 ** This routine is run via an vlan config EVENT,
3140 ** it enables us to use the HW Filter table since
3141 ** we can get the vlan id. This just creates the
3142 ** entry in the soft version of the VFTA, init will
3143 ** repopulate the real table.
3144 */
3145 static void
3146 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3147 {
3148 	struct ixl_vsi	*vsi = ifp->if_softc;
3149 	struct i40e_hw	*hw = vsi->hw;
3150 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3151 
3152 	if (ifp->if_softc !=  arg)   /* Not our event */
3153 		return;
3154 
3155 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3156 		return;
3157 
3158 	IXL_PF_LOCK(pf);
3159 	++vsi->num_vlans;
3160 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3161 	IXL_PF_UNLOCK(pf);
3162 }
3163 
3164 /*
3165 ** This routine is run via an vlan
3166 ** unconfig EVENT, remove our entry
3167 ** in the soft vfta.
3168 */
3169 static void
3170 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3171 {
3172 	struct ixl_vsi	*vsi = ifp->if_softc;
3173 	struct i40e_hw	*hw = vsi->hw;
3174 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3175 
3176 	if (ifp->if_softc !=  arg)
3177 		return;
3178 
3179 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3180 		return;
3181 
3182 	IXL_PF_LOCK(pf);
3183 	--vsi->num_vlans;
3184 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3185 	IXL_PF_UNLOCK(pf);
3186 }
3187 
3188 /*
3189 ** This routine updates vlan filters, called by init
3190 ** it scans the filter table and then updates the hw
3191 ** after a soft reset.
3192 */
3193 static void
3194 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3195 {
3196 	struct ixl_mac_filter	*f;
3197 	int			cnt = 0, flags;
3198 
3199 	if (vsi->num_vlans == 0)
3200 		return;
3201 	/*
3202 	** Scan the filter list for vlan entries,
3203 	** mark them for addition and then call
3204 	** for the AQ update.
3205 	*/
3206 	SLIST_FOREACH(f, &vsi->ftl, next) {
3207 		if (f->flags & IXL_FILTER_VLAN) {
3208 			f->flags |=
3209 			    (IXL_FILTER_ADD |
3210 			    IXL_FILTER_USED);
3211 			cnt++;
3212 		}
3213 	}
3214 	if (cnt == 0) {
3215 		printf("setup vlan: no filters found!\n");
3216 		return;
3217 	}
3218 	flags = IXL_FILTER_VLAN;
3219 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3220 	ixl_add_hw_filters(vsi, flags, cnt);
3221 	return;
3222 }
3223 
3224 /*
3225 ** Initialize filter list and add filters that the hardware
3226 ** needs to know about.
3227 */
3228 static void
3229 ixl_init_filters(struct ixl_vsi *vsi)
3230 {
3231 	/* Add broadcast address */
3232 	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3233 	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3234 }
3235 
3236 /*
3237 ** This routine adds mulicast filters
3238 */
3239 static void
3240 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3241 {
3242 	struct ixl_mac_filter *f;
3243 
3244 	/* Does one already exist */
3245 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3246 	if (f != NULL)
3247 		return;
3248 
3249 	f = ixl_get_filter(vsi);
3250 	if (f == NULL) {
3251 		printf("WARNING: no filter available!!\n");
3252 		return;
3253 	}
3254 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3255 	f->vlan = IXL_VLAN_ANY;
3256 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3257 	    | IXL_FILTER_MC);
3258 
3259 	return;
3260 }
3261 
3262 /*
3263 ** This routine adds macvlan filters
3264 */
3265 static void
3266 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3267 {
3268 	struct ixl_mac_filter	*f, *tmp;
3269 	device_t		dev = vsi->dev;
3270 
3271 	DEBUGOUT("ixl_add_filter: begin");
3272 
3273 	/* Does one already exist */
3274 	f = ixl_find_filter(vsi, macaddr, vlan);
3275 	if (f != NULL)
3276 		return;
3277 	/*
3278 	** Is this the first vlan being registered, if so we
3279 	** need to remove the ANY filter that indicates we are
3280 	** not in a vlan, and replace that with a 0 filter.
3281 	*/
3282 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3283 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3284 		if (tmp != NULL) {
3285 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3286 			ixl_add_filter(vsi, macaddr, 0);
3287 		}
3288 	}
3289 
3290 	f = ixl_get_filter(vsi);
3291 	if (f == NULL) {
3292 		device_printf(dev, "WARNING: no filter available!!\n");
3293 		return;
3294 	}
3295 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3296 	f->vlan = vlan;
3297 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3298 	if (f->vlan != IXL_VLAN_ANY)
3299 		f->flags |= IXL_FILTER_VLAN;
3300 
3301 	ixl_add_hw_filters(vsi, f->flags, 1);
3302 	return;
3303 }
3304 
3305 static void
3306 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3307 {
3308 	struct ixl_mac_filter *f;
3309 
3310 	f = ixl_find_filter(vsi, macaddr, vlan);
3311 	if (f == NULL)
3312 		return;
3313 
3314 	f->flags |= IXL_FILTER_DEL;
3315 	ixl_del_hw_filters(vsi, 1);
3316 
3317 	/* Check if this is the last vlan removal */
3318 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3319 		/* Switch back to a non-vlan filter */
3320 		ixl_del_filter(vsi, macaddr, 0);
3321 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3322 	}
3323 	return;
3324 }
3325 
3326 /*
3327 ** Find the filter with both matching mac addr and vlan id
3328 */
3329 static struct ixl_mac_filter *
3330 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3331 {
3332 	struct ixl_mac_filter	*f;
3333 	bool			match = FALSE;
3334 
3335 	SLIST_FOREACH(f, &vsi->ftl, next) {
3336 		if (!cmp_etheraddr(f->macaddr, macaddr))
3337 			continue;
3338 		if (f->vlan == vlan) {
3339 			match = TRUE;
3340 			break;
3341 		}
3342 	}
3343 
3344 	if (!match)
3345 		f = NULL;
3346 	return (f);
3347 }
3348 
3349 /*
3350 ** This routine takes additions to the vsi filter
3351 ** table and creates an Admin Queue call to create
3352 ** the filters in the hardware.
3353 */
3354 static void
3355 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3356 {
3357 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3358 	struct ixl_mac_filter	*f;
3359 	struct i40e_hw	*hw = vsi->hw;
3360 	device_t	dev = vsi->dev;
3361 	int		err, j = 0;
3362 
3363 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3364 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3365 	if (a == NULL) {
3366 		device_printf(dev, "add hw filter failed to get memory\n");
3367 		return;
3368 	}
3369 
3370 	/*
3371 	** Scan the filter list, each time we find one
3372 	** we add it to the admin queue array and turn off
3373 	** the add bit.
3374 	*/
3375 	SLIST_FOREACH(f, &vsi->ftl, next) {
3376 		if (f->flags == flags) {
3377 			b = &a[j]; // a pox on fvl long names :)
3378 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3379 			b->vlan_tag =
3380 			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3381 			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3382 			f->flags &= ~IXL_FILTER_ADD;
3383 			j++;
3384 		}
3385 		if (j == cnt)
3386 			break;
3387 	}
3388 	if (j > 0) {
3389 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3390 		if (err)
3391 			device_printf(dev, "aq_add_macvlan failure %d\n",
3392 			    hw->aq.asq_last_status);
3393 		else
3394 			vsi->hw_filters_add += j;
3395 	}
3396 	free(a, M_DEVBUF);
3397 	return;
3398 }
3399 
3400 /*
3401 ** This routine takes removals in the vsi filter
3402 ** table and creates an Admin Queue call to delete
3403 ** the filters in the hardware.
3404 */
3405 static void
3406 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3407 {
3408 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3409 	struct i40e_hw		*hw = vsi->hw;
3410 	device_t		dev = vsi->dev;
3411 	struct ixl_mac_filter	*f, *f_temp;
3412 	int			err, j = 0;
3413 
3414 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3415 
3416 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3417 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3418 	if (d == NULL) {
3419 		printf("del hw filter failed to get memory\n");
3420 		return;
3421 	}
3422 
3423 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3424 		if (f->flags & IXL_FILTER_DEL) {
3425 			e = &d[j]; // a pox on fvl long names :)
3426 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3427 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3428 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3429 			/* delete entry from vsi list */
3430 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3431 			free(f, M_DEVBUF);
3432 			j++;
3433 		}
3434 		if (j == cnt)
3435 			break;
3436 	}
3437 	if (j > 0) {
3438 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3439 		/* NOTE: returns ENOENT every time but seems to work fine,
3440 		   so we'll ignore that specific error. */
3441 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3442 			int sc = 0;
3443 			for (int i = 0; i < j; i++)
3444 				sc += (!d[i].error_code);
3445 			vsi->hw_filters_del += sc;
3446 			device_printf(dev,
3447 			    "Failed to remove %d/%d filters, aq error %d\n",
3448 			    j - sc, j, hw->aq.asq_last_status);
3449 		} else
3450 			vsi->hw_filters_del += j;
3451 	}
3452 	free(d, M_DEVBUF);
3453 
3454 	DEBUGOUT("ixl_del_hw_filters: end\n");
3455 	return;
3456 }
3457 
3458 
3459 static void
3460 ixl_enable_rings(struct ixl_vsi *vsi)
3461 {
3462 	struct i40e_hw	*hw = vsi->hw;
3463 	u32		reg;
3464 
3465 	for (int i = 0; i < vsi->num_queues; i++) {
3466 		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3467 
3468 		reg = rd32(hw, I40E_QTX_ENA(i));
3469 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3470 		    I40E_QTX_ENA_QENA_STAT_MASK;
3471 		wr32(hw, I40E_QTX_ENA(i), reg);
3472 		/* Verify the enable took */
3473 		for (int j = 0; j < 10; j++) {
3474 			reg = rd32(hw, I40E_QTX_ENA(i));
3475 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3476 				break;
3477 			i40e_msec_delay(10);
3478 		}
3479 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3480 			printf("TX queue %d disabled!\n", i);
3481 
3482 		reg = rd32(hw, I40E_QRX_ENA(i));
3483 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3484 		    I40E_QRX_ENA_QENA_STAT_MASK;
3485 		wr32(hw, I40E_QRX_ENA(i), reg);
3486 		/* Verify the enable took */
3487 		for (int j = 0; j < 10; j++) {
3488 			reg = rd32(hw, I40E_QRX_ENA(i));
3489 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3490 				break;
3491 			i40e_msec_delay(10);
3492 		}
3493 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3494 			printf("RX queue %d disabled!\n", i);
3495 	}
3496 }
3497 
3498 static void
3499 ixl_disable_rings(struct ixl_vsi *vsi)
3500 {
3501 	struct i40e_hw	*hw = vsi->hw;
3502 	u32		reg;
3503 
3504 	for (int i = 0; i < vsi->num_queues; i++) {
3505 		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3506 		i40e_usec_delay(500);
3507 
3508 		reg = rd32(hw, I40E_QTX_ENA(i));
3509 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3510 		wr32(hw, I40E_QTX_ENA(i), reg);
3511 		/* Verify the disable took */
3512 		for (int j = 0; j < 10; j++) {
3513 			reg = rd32(hw, I40E_QTX_ENA(i));
3514 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3515 				break;
3516 			i40e_msec_delay(10);
3517 		}
3518 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3519 			printf("TX queue %d still enabled!\n", i);
3520 
3521 		reg = rd32(hw, I40E_QRX_ENA(i));
3522 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3523 		wr32(hw, I40E_QRX_ENA(i), reg);
3524 		/* Verify the disable took */
3525 		for (int j = 0; j < 10; j++) {
3526 			reg = rd32(hw, I40E_QRX_ENA(i));
3527 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3528 				break;
3529 			i40e_msec_delay(10);
3530 		}
3531 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3532 			printf("RX queue %d still enabled!\n", i);
3533 	}
3534 }
3535 
3536 /**
3537  * ixl_handle_mdd_event
3538  *
3539  * Called from interrupt handler to identify possibly malicious vfs
3540  * (But also detects events from the PF, as well)
3541  **/
3542 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3543 {
3544 	struct i40e_hw *hw = &pf->hw;
3545 	device_t dev = pf->dev;
3546 	bool mdd_detected = false;
3547 	bool pf_mdd_detected = false;
3548 	u32 reg;
3549 
3550 	/* find what triggered the MDD event */
3551 	reg = rd32(hw, I40E_GL_MDET_TX);
3552 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3553 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3554 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3555 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3556 				I40E_GL_MDET_TX_EVENT_SHIFT;
3557 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3558 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3559 		device_printf(dev,
3560 			 "Malicious Driver Detection event 0x%02x"
3561 			 " on TX queue %d pf number 0x%02x\n",
3562 			 event, queue, pf_num);
3563 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3564 		mdd_detected = true;
3565 	}
3566 	reg = rd32(hw, I40E_GL_MDET_RX);
3567 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3568 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3569 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3570 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3571 				I40E_GL_MDET_RX_EVENT_SHIFT;
3572 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3573 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3574 		device_printf(dev,
3575 			 "Malicious Driver Detection event 0x%02x"
3576 			 " on RX queue %d of function 0x%02x\n",
3577 			 event, queue, func);
3578 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3579 		mdd_detected = true;
3580 	}
3581 
3582 	if (mdd_detected) {
3583 		reg = rd32(hw, I40E_PF_MDET_TX);
3584 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3585 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3586 			device_printf(dev,
3587 				 "MDD TX event is for this function 0x%08x",
3588 				 reg);
3589 			pf_mdd_detected = true;
3590 		}
3591 		reg = rd32(hw, I40E_PF_MDET_RX);
3592 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3593 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3594 			device_printf(dev,
3595 				 "MDD RX event is for this function 0x%08x",
3596 				 reg);
3597 			pf_mdd_detected = true;
3598 		}
3599 	}
3600 
3601 	/* re-enable mdd interrupt cause */
3602 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3603 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3604 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3605 	ixl_flush(hw);
3606 }
3607 
3608 static void
3609 ixl_enable_intr(struct ixl_vsi *vsi)
3610 {
3611 	struct i40e_hw		*hw = vsi->hw;
3612 	struct ixl_queue	*que = vsi->queues;
3613 
3614 	if (ixl_enable_msix) {
3615 		ixl_enable_adminq(hw);
3616 		for (int i = 0; i < vsi->num_queues; i++, que++)
3617 			ixl_enable_queue(hw, que->me);
3618 	} else
3619 		ixl_enable_legacy(hw);
3620 }
3621 
3622 static void
3623 ixl_disable_intr(struct ixl_vsi *vsi)
3624 {
3625 	struct i40e_hw		*hw = vsi->hw;
3626 	struct ixl_queue	*que = vsi->queues;
3627 
3628 	if (ixl_enable_msix) {
3629 		ixl_disable_adminq(hw);
3630 		for (int i = 0; i < vsi->num_queues; i++, que++)
3631 			ixl_disable_queue(hw, que->me);
3632 	} else
3633 		ixl_disable_legacy(hw);
3634 }
3635 
3636 static void
3637 ixl_enable_adminq(struct i40e_hw *hw)
3638 {
3639 	u32		reg;
3640 
3641 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3642 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3643 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3644 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3645 	ixl_flush(hw);
3646 	return;
3647 }
3648 
3649 static void
3650 ixl_disable_adminq(struct i40e_hw *hw)
3651 {
3652 	u32		reg;
3653 
3654 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3655 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3656 
3657 	return;
3658 }
3659 
3660 static void
3661 ixl_enable_queue(struct i40e_hw *hw, int id)
3662 {
3663 	u32		reg;
3664 
3665 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3666 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3667 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3668 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3669 }
3670 
3671 static void
3672 ixl_disable_queue(struct i40e_hw *hw, int id)
3673 {
3674 	u32		reg;
3675 
3676 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3677 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3678 
3679 	return;
3680 }
3681 
3682 static void
3683 ixl_enable_legacy(struct i40e_hw *hw)
3684 {
3685 	u32		reg;
3686 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3687 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3688 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3689 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3690 }
3691 
3692 static void
3693 ixl_disable_legacy(struct i40e_hw *hw)
3694 {
3695 	u32		reg;
3696 
3697 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3698 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3699 
3700 	return;
3701 }
3702 
3703 static void
3704 ixl_update_stats_counters(struct ixl_pf *pf)
3705 {
3706 	struct i40e_hw	*hw = &pf->hw;
3707 	struct ixl_vsi *vsi = &pf->vsi;
3708 
3709 	struct i40e_hw_port_stats *nsd = &pf->stats;
3710 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3711 
3712 	/* Update hw stats */
3713 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3714 			   pf->stat_offsets_loaded,
3715 			   &osd->crc_errors, &nsd->crc_errors);
3716 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3717 			   pf->stat_offsets_loaded,
3718 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3719 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3720 			   I40E_GLPRT_GORCL(hw->port),
3721 			   pf->stat_offsets_loaded,
3722 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3723 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3724 			   I40E_GLPRT_GOTCL(hw->port),
3725 			   pf->stat_offsets_loaded,
3726 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3727 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3728 			   pf->stat_offsets_loaded,
3729 			   &osd->eth.rx_discards,
3730 			   &nsd->eth.rx_discards);
3731 	ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3732 			   pf->stat_offsets_loaded,
3733 			   &osd->eth.tx_discards,
3734 			   &nsd->eth.tx_discards);
3735 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3736 			   I40E_GLPRT_UPRCL(hw->port),
3737 			   pf->stat_offsets_loaded,
3738 			   &osd->eth.rx_unicast,
3739 			   &nsd->eth.rx_unicast);
3740 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3741 			   I40E_GLPRT_UPTCL(hw->port),
3742 			   pf->stat_offsets_loaded,
3743 			   &osd->eth.tx_unicast,
3744 			   &nsd->eth.tx_unicast);
3745 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3746 			   I40E_GLPRT_MPRCL(hw->port),
3747 			   pf->stat_offsets_loaded,
3748 			   &osd->eth.rx_multicast,
3749 			   &nsd->eth.rx_multicast);
3750 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3751 			   I40E_GLPRT_MPTCL(hw->port),
3752 			   pf->stat_offsets_loaded,
3753 			   &osd->eth.tx_multicast,
3754 			   &nsd->eth.tx_multicast);
3755 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3756 			   I40E_GLPRT_BPRCL(hw->port),
3757 			   pf->stat_offsets_loaded,
3758 			   &osd->eth.rx_broadcast,
3759 			   &nsd->eth.rx_broadcast);
3760 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3761 			   I40E_GLPRT_BPTCL(hw->port),
3762 			   pf->stat_offsets_loaded,
3763 			   &osd->eth.tx_broadcast,
3764 			   &nsd->eth.tx_broadcast);
3765 
3766 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3767 			   pf->stat_offsets_loaded,
3768 			   &osd->tx_dropped_link_down,
3769 			   &nsd->tx_dropped_link_down);
3770 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3771 			   pf->stat_offsets_loaded,
3772 			   &osd->mac_local_faults,
3773 			   &nsd->mac_local_faults);
3774 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3775 			   pf->stat_offsets_loaded,
3776 			   &osd->mac_remote_faults,
3777 			   &nsd->mac_remote_faults);
3778 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3779 			   pf->stat_offsets_loaded,
3780 			   &osd->rx_length_errors,
3781 			   &nsd->rx_length_errors);
3782 
3783 	/* Flow control (LFC) stats */
3784 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3785 			   pf->stat_offsets_loaded,
3786 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3787 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3788 			   pf->stat_offsets_loaded,
3789 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3790 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3791 			   pf->stat_offsets_loaded,
3792 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3793 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3794 			   pf->stat_offsets_loaded,
3795 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3796 
3797 	/* Priority flow control stats */
3798 #if 0
3799 	for (int i = 0; i < 8; i++) {
3800 		ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3801 				   pf->stat_offsets_loaded,
3802 				   &osd->priority_xon_rx[i],
3803 				   &nsd->priority_xon_rx[i]);
3804 		ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3805 				   pf->stat_offsets_loaded,
3806 				   &osd->priority_xon_tx[i],
3807 				   &nsd->priority_xon_tx[i]);
3808 		ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3809 				   pf->stat_offsets_loaded,
3810 				   &osd->priority_xoff_tx[i],
3811 				   &nsd->priority_xoff_tx[i]);
3812 		ixl_stat_update32(hw,
3813 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3814 				   pf->stat_offsets_loaded,
3815 				   &osd->priority_xon_2_xoff[i],
3816 				   &nsd->priority_xon_2_xoff[i]);
3817 	}
3818 #endif
3819 
3820 	/* Packet size stats rx */
3821 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3822 			   I40E_GLPRT_PRC64L(hw->port),
3823 			   pf->stat_offsets_loaded,
3824 			   &osd->rx_size_64, &nsd->rx_size_64);
3825 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3826 			   I40E_GLPRT_PRC127L(hw->port),
3827 			   pf->stat_offsets_loaded,
3828 			   &osd->rx_size_127, &nsd->rx_size_127);
3829 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3830 			   I40E_GLPRT_PRC255L(hw->port),
3831 			   pf->stat_offsets_loaded,
3832 			   &osd->rx_size_255, &nsd->rx_size_255);
3833 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3834 			   I40E_GLPRT_PRC511L(hw->port),
3835 			   pf->stat_offsets_loaded,
3836 			   &osd->rx_size_511, &nsd->rx_size_511);
3837 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3838 			   I40E_GLPRT_PRC1023L(hw->port),
3839 			   pf->stat_offsets_loaded,
3840 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3841 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3842 			   I40E_GLPRT_PRC1522L(hw->port),
3843 			   pf->stat_offsets_loaded,
3844 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3845 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3846 			   I40E_GLPRT_PRC9522L(hw->port),
3847 			   pf->stat_offsets_loaded,
3848 			   &osd->rx_size_big, &nsd->rx_size_big);
3849 
3850 	/* Packet size stats tx */
3851 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3852 			   I40E_GLPRT_PTC64L(hw->port),
3853 			   pf->stat_offsets_loaded,
3854 			   &osd->tx_size_64, &nsd->tx_size_64);
3855 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3856 			   I40E_GLPRT_PTC127L(hw->port),
3857 			   pf->stat_offsets_loaded,
3858 			   &osd->tx_size_127, &nsd->tx_size_127);
3859 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3860 			   I40E_GLPRT_PTC255L(hw->port),
3861 			   pf->stat_offsets_loaded,
3862 			   &osd->tx_size_255, &nsd->tx_size_255);
3863 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3864 			   I40E_GLPRT_PTC511L(hw->port),
3865 			   pf->stat_offsets_loaded,
3866 			   &osd->tx_size_511, &nsd->tx_size_511);
3867 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3868 			   I40E_GLPRT_PTC1023L(hw->port),
3869 			   pf->stat_offsets_loaded,
3870 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3871 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3872 			   I40E_GLPRT_PTC1522L(hw->port),
3873 			   pf->stat_offsets_loaded,
3874 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3875 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3876 			   I40E_GLPRT_PTC9522L(hw->port),
3877 			   pf->stat_offsets_loaded,
3878 			   &osd->tx_size_big, &nsd->tx_size_big);
3879 
3880 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3881 			   pf->stat_offsets_loaded,
3882 			   &osd->rx_undersize, &nsd->rx_undersize);
3883 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3884 			   pf->stat_offsets_loaded,
3885 			   &osd->rx_fragments, &nsd->rx_fragments);
3886 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3887 			   pf->stat_offsets_loaded,
3888 			   &osd->rx_oversize, &nsd->rx_oversize);
3889 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3890 			   pf->stat_offsets_loaded,
3891 			   &osd->rx_jabber, &nsd->rx_jabber);
3892 	pf->stat_offsets_loaded = true;
3893 	/* End hw stats */
3894 
3895 	/* Update vsi stats */
3896 	ixl_update_eth_stats(vsi);
3897 
3898 	/* OS statistics */
3899 	// ERJ - these are per-port, update all vsis?
3900 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
3901 }
3902 
3903 /*
3904 ** Tasklet handler for MSIX Adminq interrupts
3905 **  - do outside interrupt since it might sleep
3906 */
3907 static void
3908 ixl_do_adminq(void *context, int pending)
3909 {
3910 	struct ixl_pf			*pf = context;
3911 	struct i40e_hw			*hw = &pf->hw;
3912 	struct ixl_vsi			*vsi = &pf->vsi;
3913 	struct i40e_arq_event_info	event;
3914 	i40e_status			ret;
3915 	u32				reg, loop = 0;
3916 	u16				opcode, result;
3917 
3918 	event.msg_len = IXL_AQ_BUF_SZ;
3919 	event.msg_buf = malloc(event.msg_len,
3920 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3921 	if (!event.msg_buf) {
3922 		printf("Unable to allocate adminq memory\n");
3923 		return;
3924 	}
3925 
3926 	/* clean and process any events */
3927 	do {
3928 		ret = i40e_clean_arq_element(hw, &event, &result);
3929 		if (ret)
3930 			break;
3931 		opcode = LE16_TO_CPU(event.desc.opcode);
3932 		switch (opcode) {
3933 		case i40e_aqc_opc_get_link_status:
3934 			vsi->link_up = ixl_config_link(hw);
3935 			ixl_update_link_status(pf);
3936 			break;
3937 		case i40e_aqc_opc_send_msg_to_pf:
3938 			/* process pf/vf communication here */
3939 			break;
3940 		case i40e_aqc_opc_event_lan_overflow:
3941 			break;
3942 		default:
3943 #ifdef IXL_DEBUG
3944 			printf("AdminQ unknown event %x\n", opcode);
3945 #endif
3946 			break;
3947 		}
3948 
3949 	} while (result && (loop++ < IXL_ADM_LIMIT));
3950 
3951 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3952 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3953 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3954 	free(event.msg_buf, M_DEVBUF);
3955 
3956 	if (pf->msix > 1)
3957 		ixl_enable_adminq(&pf->hw);
3958 	else
3959 		ixl_enable_intr(vsi);
3960 }
3961 
3962 static int
3963 ixl_debug_info(SYSCTL_HANDLER_ARGS)
3964 {
3965 	struct ixl_pf	*pf;
3966 	int		error, input = 0;
3967 
3968 	error = sysctl_handle_int(oidp, &input, 0, req);
3969 
3970 	if (error || !req->newptr)
3971 		return (error);
3972 
3973 	if (input == 1) {
3974 		pf = (struct ixl_pf *)arg1;
3975 		ixl_print_debug_info(pf);
3976 	}
3977 
3978 	return (error);
3979 }
3980 
3981 static void
3982 ixl_print_debug_info(struct ixl_pf *pf)
3983 {
3984 	struct i40e_hw		*hw = &pf->hw;
3985 	struct ixl_vsi		*vsi = &pf->vsi;
3986 	struct ixl_queue	*que = vsi->queues;
3987 	struct rx_ring		*rxr = &que->rxr;
3988 	struct tx_ring		*txr = &que->txr;
3989 	u32			reg;
3990 
3991 
3992 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
3993 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
3994 	printf("RX next check = %x\n", rxr->next_check);
3995 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
3996 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
3997 	printf("TX desc avail = %x\n", txr->avail);
3998 
3999 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4000 	 printf("RX Bytes = %x\n", reg);
4001 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4002 	 printf("Port RX Bytes = %x\n", reg);
4003 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4004 	 printf("RX discard = %x\n", reg);
4005 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4006 	 printf("Port RX discard = %x\n", reg);
4007 
4008 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4009 	 printf("TX errors = %x\n", reg);
4010 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4011 	 printf("TX Bytes = %x\n", reg);
4012 
4013 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4014 	 printf("RX undersize = %x\n", reg);
4015 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4016 	 printf("RX fragments = %x\n", reg);
4017 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4018 	 printf("RX oversize = %x\n", reg);
4019 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4020 	 printf("RX length error = %x\n", reg);
4021 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4022 	 printf("mac remote fault = %x\n", reg);
4023 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4024 	 printf("mac local fault = %x\n", reg);
4025 }
4026 
4027 /**
4028  * Update VSI-specific ethernet statistics counters.
4029  **/
4030 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4031 {
4032 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4033 	struct i40e_hw *hw = &pf->hw;
4034 	struct i40e_eth_stats *es;
4035 	struct i40e_eth_stats *oes;
4036 	int i;
4037 	uint64_t tx_discards;
4038 	struct i40e_hw_port_stats *nsd;
4039 	u16 stat_idx = vsi->info.stat_counter_idx;
4040 
4041 	es = &vsi->eth_stats;
4042 	oes = &vsi->eth_stats_offsets;
4043 	nsd = &pf->stats;
4044 
4045 	/* Gather up the stats that the hw collects */
4046 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4047 			   vsi->stat_offsets_loaded,
4048 			   &oes->tx_errors, &es->tx_errors);
4049 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4050 			   vsi->stat_offsets_loaded,
4051 			   &oes->rx_discards, &es->rx_discards);
4052 
4053 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4054 			   I40E_GLV_GORCL(stat_idx),
4055 			   vsi->stat_offsets_loaded,
4056 			   &oes->rx_bytes, &es->rx_bytes);
4057 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4058 			   I40E_GLV_UPRCL(stat_idx),
4059 			   vsi->stat_offsets_loaded,
4060 			   &oes->rx_unicast, &es->rx_unicast);
4061 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4062 			   I40E_GLV_MPRCL(stat_idx),
4063 			   vsi->stat_offsets_loaded,
4064 			   &oes->rx_multicast, &es->rx_multicast);
4065 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4066 			   I40E_GLV_BPRCL(stat_idx),
4067 			   vsi->stat_offsets_loaded,
4068 			   &oes->rx_broadcast, &es->rx_broadcast);
4069 
4070 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4071 			   I40E_GLV_GOTCL(stat_idx),
4072 			   vsi->stat_offsets_loaded,
4073 			   &oes->tx_bytes, &es->tx_bytes);
4074 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4075 			   I40E_GLV_UPTCL(stat_idx),
4076 			   vsi->stat_offsets_loaded,
4077 			   &oes->tx_unicast, &es->tx_unicast);
4078 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4079 			   I40E_GLV_MPTCL(stat_idx),
4080 			   vsi->stat_offsets_loaded,
4081 			   &oes->tx_multicast, &es->tx_multicast);
4082 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4083 			   I40E_GLV_BPTCL(stat_idx),
4084 			   vsi->stat_offsets_loaded,
4085 			   &oes->tx_broadcast, &es->tx_broadcast);
4086 	vsi->stat_offsets_loaded = true;
4087 
4088 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4089 	for (i = 0; i < vsi->num_queues; i++)
4090 		tx_discards += vsi->queues[i].txr.br->br_drops;
4091 
4092 	/* Update ifnet stats */
4093 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4094 	                   es->rx_multicast +
4095 			   es->rx_broadcast);
4096 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4097 	                   es->tx_multicast +
4098 			   es->tx_broadcast);
4099 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4100 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4101 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4102 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4103 
4104 	IXL_SET_OERRORS(vsi, es->tx_errors);
4105 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4106 	IXL_SET_OQDROPS(vsi, tx_discards);
4107 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4108 	IXL_SET_COLLISIONS(vsi, 0);
4109 }
4110 
4111 /**
4112  * Reset all of the stats for the given pf
4113  **/
4114 void ixl_pf_reset_stats(struct ixl_pf *pf)
4115 {
4116 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4117 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4118 	pf->stat_offsets_loaded = false;
4119 }
4120 
4121 /**
4122  * Resets all stats of the given vsi
4123  **/
4124 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4125 {
4126 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4127 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4128 	vsi->stat_offsets_loaded = false;
4129 }
4130 
4131 /**
4132  * Read and update a 48 bit stat from the hw
4133  *
4134  * Since the device stats are not reset at PFReset, they likely will not
4135  * be zeroed when the driver starts.  We'll save the first values read
4136  * and use them as offsets to be subtracted from the raw values in order
4137  * to report stats that count from zero.
4138  **/
4139 static void
4140 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4141 	bool offset_loaded, u64 *offset, u64 *stat)
4142 {
4143 	u64 new_data;
4144 
4145 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4146 	new_data = rd64(hw, loreg);
4147 #else
4148 	/*
4149 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4150 	 * 10 don't support 8 byte bus reads/writes.
4151 	 */
4152 	new_data = rd32(hw, loreg);
4153 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4154 #endif
4155 
4156 	if (!offset_loaded)
4157 		*offset = new_data;
4158 	if (new_data >= *offset)
4159 		*stat = new_data - *offset;
4160 	else
4161 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4162 	*stat &= 0xFFFFFFFFFFFFULL;
4163 }
4164 
4165 /**
4166  * Read and update a 32 bit stat from the hw
4167  **/
4168 static void
4169 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4170 	bool offset_loaded, u64 *offset, u64 *stat)
4171 {
4172 	u32 new_data;
4173 
4174 	new_data = rd32(hw, reg);
4175 	if (!offset_loaded)
4176 		*offset = new_data;
4177 	if (new_data >= *offset)
4178 		*stat = (u32)(new_data - *offset);
4179 	else
4180 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4181 }
4182 
4183 /*
4184 ** Set flow control using sysctl:
4185 ** 	0 - off
4186 **	1 - rx pause
4187 **	2 - tx pause
4188 **	3 - full
4189 */
4190 static int
4191 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4192 {
4193 	/*
4194 	 * TODO: ensure flow control is disabled if
4195 	 * priority flow control is enabled
4196 	 *
4197 	 * TODO: ensure tx CRC by hardware should be enabled
4198 	 * if tx flow control is enabled.
4199 	 */
4200 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4201 	struct i40e_hw *hw = &pf->hw;
4202 	device_t dev = pf->dev;
4203 	int requested_fc = 0, error = 0;
4204 	enum i40e_status_code aq_error = 0;
4205 	u8 fc_aq_err = 0;
4206 
4207 	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4208 	if (aq_error) {
4209 		device_printf(dev,
4210 		    "%s: Error retrieving link info from aq, %d\n",
4211 		    __func__, aq_error);
4212 		return (EAGAIN);
4213 	}
4214 
4215 	/* Read in new mode */
4216 	requested_fc = hw->fc.current_mode;
4217 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4218 	if ((error) || (req->newptr == NULL))
4219 		return (error);
4220 	if (requested_fc < 0 || requested_fc > 3) {
4221 		device_printf(dev,
4222 		    "Invalid fc mode; valid modes are 0 through 3\n");
4223 		return (EINVAL);
4224 	}
4225 
4226 	/*
4227 	** Changing flow control mode currently does not work on
4228 	** 40GBASE-CR4 PHYs
4229 	*/
4230 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4231 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4232 		device_printf(dev, "Changing flow control mode unsupported"
4233 		    " on 40GBase-CR4 media.\n");
4234 		return (ENODEV);
4235 	}
4236 
4237 	/* Set fc ability for port */
4238 	hw->fc.requested_mode = requested_fc;
4239 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4240 	if (aq_error) {
4241 		device_printf(dev,
4242 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4243 		    __func__, aq_error, fc_aq_err);
4244 		return (EAGAIN);
4245 	}
4246 
4247 	if (hw->fc.current_mode != hw->fc.requested_mode) {
4248 		device_printf(dev, "%s: FC set failure:\n", __func__);
4249 		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4250 		    __func__,
4251 		    ixl_fc_string[hw->fc.current_mode],
4252 		    ixl_fc_string[hw->fc.requested_mode]);
4253 	}
4254 
4255 	return (0);
4256 }
4257 
4258 static int
4259 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4260 {
4261 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4262 	struct i40e_hw *hw = &pf->hw;
4263 	int error = 0, index = 0;
4264 
4265 	char *speeds[] = {
4266 		"Unknown",
4267 		"100M",
4268 		"1G",
4269 		"10G",
4270 		"40G",
4271 		"20G"
4272 	};
4273 
4274 	ixl_update_link_status(pf);
4275 
4276 	switch (hw->phy.link_info.link_speed) {
4277 	case I40E_LINK_SPEED_100MB:
4278 		index = 1;
4279 		break;
4280 	case I40E_LINK_SPEED_1GB:
4281 		index = 2;
4282 		break;
4283 	case I40E_LINK_SPEED_10GB:
4284 		index = 3;
4285 		break;
4286 	case I40E_LINK_SPEED_40GB:
4287 		index = 4;
4288 		break;
4289 	case I40E_LINK_SPEED_20GB:
4290 		index = 5;
4291 		break;
4292 	case I40E_LINK_SPEED_UNKNOWN:
4293 	default:
4294 		index = 0;
4295 		break;
4296 	}
4297 
4298 	error = sysctl_handle_string(oidp, speeds[index],
4299 	    strlen(speeds[index]), req);
4300 	return (error);
4301 }
4302 
4303 /*
4304 ** Control link advertise speed:
4305 **	Flags:
4306 **	0x1 - advertise 100 Mb
4307 **	0x2 - advertise 1G
4308 **	0x4 - advertise 10G
4309 **
4310 ** Does not work on 40G devices.
4311 */
4312 static int
4313 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4314 {
4315 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4316 	struct i40e_hw *hw = &pf->hw;
4317 	device_t dev = pf->dev;
4318 	struct i40e_aq_get_phy_abilities_resp abilities;
4319 	struct i40e_aq_set_phy_config config;
4320 	int requested_ls = 0;
4321 	enum i40e_status_code aq_error = 0;
4322 	int error = 0;
4323 
4324 	/*
4325 	** FW doesn't support changing advertised speed
4326 	** for 40G devices; speed is always 40G.
4327 	*/
4328 	if (i40e_is_40G_device(hw->device_id))
4329 		return (ENODEV);
4330 
4331 	/* Read in new mode */
4332 	requested_ls = pf->advertised_speed;
4333 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4334 	if ((error) || (req->newptr == NULL))
4335 		return (error);
4336 	if (requested_ls < 1 || requested_ls > 7) {
4337 		device_printf(dev,
4338 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4339 		return (EINVAL);
4340 	}
4341 
4342 	/* Exit if no change */
4343 	if (pf->advertised_speed == requested_ls)
4344 		return (0);
4345 
4346 	/* Get current capability information */
4347 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4348 	if (aq_error) {
4349 		device_printf(dev, "%s: Error getting phy capabilities %d,"
4350 		    " aq error: %d\n", __func__, aq_error,
4351 		    hw->aq.asq_last_status);
4352 		return (EAGAIN);
4353 	}
4354 
4355 	/* Prepare new config */
4356 	bzero(&config, sizeof(config));
4357 	config.phy_type = abilities.phy_type;
4358 	config.abilities = abilities.abilities
4359 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4360 	config.eee_capability = abilities.eee_capability;
4361 	config.eeer = abilities.eeer_val;
4362 	config.low_power_ctrl = abilities.d3_lpan;
4363 	/* Translate into aq cmd link_speed */
4364 	if (requested_ls & 0x4)
4365 		config.link_speed |= I40E_LINK_SPEED_10GB;
4366 	if (requested_ls & 0x2)
4367 		config.link_speed |= I40E_LINK_SPEED_1GB;
4368 	if (requested_ls & 0x1)
4369 		config.link_speed |= I40E_LINK_SPEED_100MB;
4370 
4371 	/* Do aq command & restart link */
4372 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4373 	if (aq_error) {
4374 		device_printf(dev, "%s: Error setting new phy config %d,"
4375 		    " aq error: %d\n", __func__, aq_error,
4376 		    hw->aq.asq_last_status);
4377 		return (EAGAIN);
4378 	}
4379 
4380 	pf->advertised_speed = requested_ls;
4381 	ixl_update_link_status(pf);
4382 	return (0);
4383 }
4384 
4385 /*
4386 ** Get the width and transaction speed of
4387 ** the bus this adapter is plugged into.
4388 */
4389 static u16
4390 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4391 {
4392         u16                     link;
4393         u32                     offset;
4394 
4395 
4396         /* Get the PCI Express Capabilities offset */
4397         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4398 
4399         /* ...and read the Link Status Register */
4400         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4401 
4402         switch (link & I40E_PCI_LINK_WIDTH) {
4403         case I40E_PCI_LINK_WIDTH_1:
4404                 hw->bus.width = i40e_bus_width_pcie_x1;
4405                 break;
4406         case I40E_PCI_LINK_WIDTH_2:
4407                 hw->bus.width = i40e_bus_width_pcie_x2;
4408                 break;
4409         case I40E_PCI_LINK_WIDTH_4:
4410                 hw->bus.width = i40e_bus_width_pcie_x4;
4411                 break;
4412         case I40E_PCI_LINK_WIDTH_8:
4413                 hw->bus.width = i40e_bus_width_pcie_x8;
4414                 break;
4415         default:
4416                 hw->bus.width = i40e_bus_width_unknown;
4417                 break;
4418         }
4419 
4420         switch (link & I40E_PCI_LINK_SPEED) {
4421         case I40E_PCI_LINK_SPEED_2500:
4422                 hw->bus.speed = i40e_bus_speed_2500;
4423                 break;
4424         case I40E_PCI_LINK_SPEED_5000:
4425                 hw->bus.speed = i40e_bus_speed_5000;
4426                 break;
4427         case I40E_PCI_LINK_SPEED_8000:
4428                 hw->bus.speed = i40e_bus_speed_8000;
4429                 break;
4430         default:
4431                 hw->bus.speed = i40e_bus_speed_unknown;
4432                 break;
4433         }
4434 
4435 
4436         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4437             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4438             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4439             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4440             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4441             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4442             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4443             ("Unknown"));
4444 
4445         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4446             (hw->bus.speed < i40e_bus_speed_8000)) {
4447                 device_printf(dev, "PCI-Express bandwidth available"
4448                     " for this device\n     is not sufficient for"
4449                     " normal operation.\n");
4450                 device_printf(dev, "For expected performance a x8 "
4451                     "PCIE Gen3 slot is required.\n");
4452         }
4453 
4454         return (link);
4455 }
4456 
4457 #ifdef IXL_DEBUG
4458 static int
4459 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4460 {
4461 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4462 	struct i40e_hw *hw = &pf->hw;
4463 	struct i40e_link_status link_status;
4464 	char buf[512];
4465 
4466 	enum i40e_status_code aq_error = 0;
4467 
4468 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4469 	if (aq_error) {
4470 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4471 		return (EPERM);
4472 	}
4473 
4474 	sprintf(buf, "\n"
4475 	    "PHY Type : %#04x\n"
4476 	    "Speed    : %#04x\n"
4477 	    "Link info: %#04x\n"
4478 	    "AN info  : %#04x\n"
4479 	    "Ext info : %#04x",
4480 	    link_status.phy_type, link_status.link_speed,
4481 	    link_status.link_info, link_status.an_info,
4482 	    link_status.ext_info);
4483 
4484 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4485 }
4486 
4487 static int
4488 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4489 {
4490 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4491 	struct i40e_hw *hw = &pf->hw;
4492 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4493 	char buf[512];
4494 
4495 	enum i40e_status_code aq_error = 0;
4496 
4497 	// TODO: Print out list of qualified modules as well?
4498 	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4499 	if (aq_error) {
4500 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4501 		return (EPERM);
4502 	}
4503 
4504 	sprintf(buf, "\n"
4505 	    "PHY Type : %#010x\n"
4506 	    "Speed    : %#04x\n"
4507 	    "Abilities: %#04x\n"
4508 	    "EEE cap  : %#06x\n"
4509 	    "EEER reg : %#010x\n"
4510 	    "D3 Lpan  : %#04x",
4511 	    abilities_resp.phy_type, abilities_resp.link_speed,
4512 	    abilities_resp.abilities, abilities_resp.eee_capability,
4513 	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4514 
4515 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4516 }
4517 
4518 static int
4519 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4520 {
4521 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4522 	struct ixl_vsi *vsi = &pf->vsi;
4523 	struct ixl_mac_filter *f;
4524 	char *buf, *buf_i;
4525 
4526 	int error = 0;
4527 	int ftl_len = 0;
4528 	int ftl_counter = 0;
4529 	int buf_len = 0;
4530 	int entry_len = 42;
4531 
4532 	SLIST_FOREACH(f, &vsi->ftl, next) {
4533 		ftl_len++;
4534 	}
4535 
4536 	if (ftl_len < 1) {
4537 		sysctl_handle_string(oidp, "(none)", 6, req);
4538 		return (0);
4539 	}
4540 
4541 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4542 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4543 
4544 	sprintf(buf_i++, "\n");
4545 	SLIST_FOREACH(f, &vsi->ftl, next) {
4546 		sprintf(buf_i,
4547 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4548 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4549 		buf_i += entry_len;
4550 		/* don't print '\n' for last entry */
4551 		if (++ftl_counter != ftl_len) {
4552 			sprintf(buf_i, "\n");
4553 			buf_i++;
4554 		}
4555 	}
4556 
4557 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4558 	if (error)
4559 		printf("sysctl error: %d\n", error);
4560 	free(buf, M_DEVBUF);
4561 	return error;
4562 }
4563 
4564 #define IXL_SW_RES_SIZE 0x14
4565 static int
4566 ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4567 {
4568 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4569 	struct i40e_hw *hw = &pf->hw;
4570 	device_t dev = pf->dev;
4571 	struct sbuf *buf;
4572 	int error = 0;
4573 
4574 	u8 num_entries;
4575 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4576 
4577 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4578 	if (!buf) {
4579 		device_printf(dev, "Could not allocate sbuf for output.\n");
4580 		return (ENOMEM);
4581 	}
4582 
4583 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4584 				resp,
4585 				IXL_SW_RES_SIZE,
4586 				NULL);
4587 	if (error) {
4588 		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4589 		    __func__, error, hw->aq.asq_last_status);
4590 		sbuf_delete(buf);
4591 		return error;
4592 	}
4593 	device_printf(dev, "Num_entries: %d\n", num_entries);
4594 
4595 	sbuf_cat(buf, "\n");
4596 	sbuf_printf(buf,
4597 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4598 	    "     | (this)     | (all) | (this) | (all)       \n");
4599 	for (int i = 0; i < num_entries; i++) {
4600 		sbuf_printf(buf,
4601 		    "%#4x | %10d   %5d   %6d   %12d",
4602 		    resp[i].resource_type,
4603 		    resp[i].guaranteed,
4604 		    resp[i].total,
4605 		    resp[i].used,
4606 		    resp[i].total_unalloced);
4607 		if (i < num_entries - 1)
4608 			sbuf_cat(buf, "\n");
4609 	}
4610 
4611 	error = sbuf_finish(buf);
4612 	if (error) {
4613 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4614 		sbuf_delete(buf);
4615 		return error;
4616 	}
4617 
4618 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4619 	if (error)
4620 		device_printf(dev, "sysctl error: %d\n", error);
4621 	sbuf_delete(buf);
4622 	return error;
4623 
4624 }
4625 
4626 /*
4627 ** Dump TX desc given index.
4628 ** Doesn't work; don't use.
4629 ** TODO: Also needs a queue index input!
4630 **/
4631 static int
4632 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4633 {
4634 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4635 	device_t dev = pf->dev;
4636 	struct sbuf *buf;
4637 	int error = 0;
4638 
4639 	u16 desc_idx = 0;
4640 
4641 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4642 	if (!buf) {
4643 		device_printf(dev, "Could not allocate sbuf for output.\n");
4644 		return (ENOMEM);
4645 	}
4646 
4647 	/* Read in index */
4648 	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4649 	if (error)
4650 		return (error);
4651 	if (req->newptr == NULL)
4652 		return (EIO); // fix
4653 	if (desc_idx > 1024) { // fix
4654 		device_printf(dev,
4655 		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4656 		return (EINVAL);
4657 	}
4658 
4659 	// Don't use this sysctl yet
4660 	if (TRUE)
4661 		return (ENODEV);
4662 
4663 	sbuf_cat(buf, "\n");
4664 
4665 	// set to queue 1?
4666 	struct ixl_queue *que = pf->vsi.queues;
4667 	struct tx_ring *txr = &(que[1].txr);
4668 	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4669 
4670 	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4671 	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4672 	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4673 
4674 	error = sbuf_finish(buf);
4675 	if (error) {
4676 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4677 		sbuf_delete(buf);
4678 		return error;
4679 	}
4680 
4681 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4682 	if (error)
4683 		device_printf(dev, "sysctl error: %d\n", error);
4684 	sbuf_delete(buf);
4685 	return error;
4686 }
4687 #endif
4688 
4689