xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 6574b8ed19b093f0af09501d2c9676c28993cb97)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixl_pf.h"
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 char ixl_driver_version[] = "1.2.2";
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *  Last field stores an index into ixl_strings
50  *  Last entry must be all 0s
51  *
52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53  *********************************************************************/
54 
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
56 {
57 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65 	/* required last entry */
66 	{0, 0, 0, 0, 0}
67 };
68 
69 /*********************************************************************
70  *  Table of branding strings
71  *********************************************************************/
72 
73 static char    *ixl_strings[] = {
74 	"Intel(R) Ethernet Connection XL710 Driver"
75 };
76 
77 
78 /*********************************************************************
79  *  Function prototypes
80  *********************************************************************/
81 static int      ixl_probe(device_t);
82 static int      ixl_attach(device_t);
83 static int      ixl_detach(device_t);
84 static int      ixl_shutdown(device_t);
85 static int	ixl_get_hw_capabilities(struct ixl_pf *);
86 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void	ixl_init(void *);
89 static void	ixl_init_locked(struct ixl_pf *);
90 static void     ixl_stop(struct ixl_pf *);
91 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int      ixl_media_change(struct ifnet *);
93 static void     ixl_update_link_status(struct ixl_pf *);
94 static int      ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int	ixl_setup_stations(struct ixl_pf *);
97 static int	ixl_setup_vsi(struct ixl_vsi *);
98 static int	ixl_initialize_vsi(struct ixl_vsi *);
99 static int	ixl_assign_vsi_msix(struct ixl_pf *);
100 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int	ixl_init_msix(struct ixl_pf *);
102 static void	ixl_configure_msix(struct ixl_pf *);
103 static void	ixl_configure_itr(struct ixl_pf *);
104 static void	ixl_configure_legacy(struct ixl_pf *);
105 static void	ixl_free_pci_resources(struct ixl_pf *);
106 static void	ixl_local_timer(void *);
107 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool	ixl_config_link(struct i40e_hw *);
109 static void	ixl_config_rss(struct ixl_vsi *);
110 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
112 
113 static void	ixl_enable_rings(struct ixl_vsi *);
114 static void	ixl_disable_rings(struct ixl_vsi *);
115 static void     ixl_enable_intr(struct ixl_vsi *);
116 static void     ixl_disable_intr(struct ixl_vsi *);
117 
118 static void     ixl_enable_adminq(struct i40e_hw *);
119 static void     ixl_disable_adminq(struct i40e_hw *);
120 static void     ixl_enable_queue(struct i40e_hw *, int);
121 static void     ixl_disable_queue(struct i40e_hw *, int);
122 static void     ixl_enable_legacy(struct i40e_hw *);
123 static void     ixl_disable_legacy(struct i40e_hw *);
124 
125 static void     ixl_set_promisc(struct ixl_vsi *);
126 static void     ixl_add_multi(struct ixl_vsi *);
127 static void     ixl_del_multi(struct ixl_vsi *);
128 static void	ixl_register_vlan(void *, struct ifnet *, u16);
129 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
130 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
131 
132 static void	ixl_init_filters(struct ixl_vsi *);
133 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
136 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
137 static struct ixl_mac_filter *
138 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
140 
141 /* Sysctl debug interface */
142 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
143 static void	ixl_print_debug_info(struct ixl_pf *);
144 
145 /* The MSI/X Interrupt handlers */
146 static void	ixl_intr(void *);
147 static void	ixl_msix_que(void *);
148 static void	ixl_msix_adminq(void *);
149 static void	ixl_handle_mdd_event(struct ixl_pf *);
150 
151 /* Deferred interrupt tasklets */
152 static void	ixl_do_adminq(void *, int);
153 
154 /* Sysctl handlers */
155 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
158 
159 /* Statistics */
160 static void     ixl_add_hw_stats(struct ixl_pf *);
161 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164 		    struct sysctl_oid_list *,
165 		    struct i40e_eth_stats *);
166 static void	ixl_update_stats_counters(struct ixl_pf *);
167 static void	ixl_update_eth_stats(struct ixl_vsi *);
168 static void	ixl_pf_reset_stats(struct ixl_pf *);
169 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
170 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
171 		    u64 *, u64 *);
172 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
173 		    u64 *, u64 *);
174 
175 #ifdef IXL_DEBUG
176 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179 static int	ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180 static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
181 #endif
182 
183 /*********************************************************************
184  *  FreeBSD Device Interface Entry Points
185  *********************************************************************/
186 
187 static device_method_t ixl_methods[] = {
188 	/* Device interface */
189 	DEVMETHOD(device_probe, ixl_probe),
190 	DEVMETHOD(device_attach, ixl_attach),
191 	DEVMETHOD(device_detach, ixl_detach),
192 	DEVMETHOD(device_shutdown, ixl_shutdown),
193 	{0, 0}
194 };
195 
196 static driver_t ixl_driver = {
197 	"ixl", ixl_methods, sizeof(struct ixl_pf),
198 };
199 
200 devclass_t ixl_devclass;
201 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
202 
203 MODULE_DEPEND(ixl, pci, 1, 1, 1);
204 MODULE_DEPEND(ixl, ether, 1, 1, 1);
205 
206 /*
207 ** Global reset mutex
208 */
209 static struct mtx ixl_reset_mtx;
210 
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214 
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216                    "IXL driver parameters");
217 
218 /*
219  * MSIX should be the default for best performance,
220  * but this allows it to be forced off for testing.
221  */
222 static int ixl_enable_msix = 1;
223 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225     "Enable MSI-X interrupts");
226 
227 /*
228 ** Number of descriptors per ring:
229 **   - TX and RX are the same size
230 */
231 static int ixl_ringsz = DEFAULT_RING;
232 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234     &ixl_ringsz, 0, "Descriptor Ring Size");
235 
236 /*
237 ** This can be set manually, if left as 0 the
238 ** number of queues will be calculated based
239 ** on cpus and msix vectors available.
240 */
241 int ixl_max_queues = 0;
242 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244     &ixl_max_queues, 0, "Number of Queues");
245 
246 /*
247 ** Controls for Interrupt Throttling
248 **	- true/false for dynamic adjustment
249 ** 	- default values for static ITR
250 */
251 int ixl_dynamic_rx_itr = 0;
252 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
255 
256 int ixl_dynamic_tx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
260 
261 int ixl_rx_itr = IXL_ITR_8K;
262 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264     &ixl_rx_itr, 0, "RX Interrupt Rate");
265 
266 int ixl_tx_itr = IXL_ITR_4K;
267 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269     &ixl_tx_itr, 0, "TX Interrupt Rate");
270 
271 #ifdef IXL_FDIR
272 static int ixl_enable_fdir = 1;
273 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274 /* Rate at which we sample */
275 int ixl_atr_rate = 20;
276 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
277 #endif
278 
279 static char *ixl_fc_string[6] = {
280 	"None",
281 	"Rx",
282 	"Tx",
283 	"Full",
284 	"Priority",
285 	"Default"
286 };
287 
288 
289 /*********************************************************************
290  *  Device identification routine
291  *
292  *  ixl_probe determines if the driver should be loaded on
293  *  the hardware based on PCI vendor/device id of the device.
294  *
295  *  return BUS_PROBE_DEFAULT on success, positive on failure
296  *********************************************************************/
297 
298 static int
299 ixl_probe(device_t dev)
300 {
301 	ixl_vendor_info_t *ent;
302 
303 	u16	pci_vendor_id, pci_device_id;
304 	u16	pci_subvendor_id, pci_subdevice_id;
305 	char	device_name[256];
306 	static bool lock_init = FALSE;
307 
308 	INIT_DEBUGOUT("ixl_probe: begin");
309 
310 	pci_vendor_id = pci_get_vendor(dev);
311 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
312 		return (ENXIO);
313 
314 	pci_device_id = pci_get_device(dev);
315 	pci_subvendor_id = pci_get_subvendor(dev);
316 	pci_subdevice_id = pci_get_subdevice(dev);
317 
318 	ent = ixl_vendor_info_array;
319 	while (ent->vendor_id != 0) {
320 		if ((pci_vendor_id == ent->vendor_id) &&
321 		    (pci_device_id == ent->device_id) &&
322 
323 		    ((pci_subvendor_id == ent->subvendor_id) ||
324 		     (ent->subvendor_id == 0)) &&
325 
326 		    ((pci_subdevice_id == ent->subdevice_id) ||
327 		     (ent->subdevice_id == 0))) {
328 			sprintf(device_name, "%s, Version - %s",
329 				ixl_strings[ent->index],
330 				ixl_driver_version);
331 			device_set_desc_copy(dev, device_name);
332 			/* One shot mutex init */
333 			if (lock_init == FALSE) {
334 				lock_init = TRUE;
335 				mtx_init(&ixl_reset_mtx,
336 				    "ixl_reset",
337 				    "IXL RESET Lock", MTX_DEF);
338 			}
339 			return (BUS_PROBE_DEFAULT);
340 		}
341 		ent++;
342 	}
343 	return (ENXIO);
344 }
345 
346 /*********************************************************************
347  *  Device initialization routine
348  *
349  *  The attach entry point is called when the driver is being loaded.
350  *  This routine identifies the type of hardware, allocates all resources
351  *  and initializes the hardware.
352  *
353  *  return 0 on success, positive on failure
354  *********************************************************************/
355 
356 static int
357 ixl_attach(device_t dev)
358 {
359 	struct ixl_pf	*pf;
360 	struct i40e_hw	*hw;
361 	struct ixl_vsi *vsi;
362 	u16		bus;
363 	int             error = 0;
364 
365 	INIT_DEBUGOUT("ixl_attach: begin");
366 
367 	/* Allocate, clear, and link in our primary soft structure */
368 	pf = device_get_softc(dev);
369 	pf->dev = pf->osdep.dev = dev;
370 	hw = &pf->hw;
371 
372 	/*
373 	** Note this assumes we have a single embedded VSI,
374 	** this could be enhanced later to allocate multiple
375 	*/
376 	vsi = &pf->vsi;
377 	vsi->dev = pf->dev;
378 
379 	/* Core Lock Init*/
380 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
381 
382 	/* Set up the timer callout */
383 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
384 
385 	/* Set up sysctls */
386 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
389 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
390 
391 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
394 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
395 
396 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
399 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
400 
401 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
404 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
405 
406 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 	    OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
409 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
410 
411 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
414 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
415 
416 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418 	    OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
419 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
420 
421 #ifdef IXL_DEBUG
422 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
423 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
424 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
425 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
426 
427 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
430 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
431 
432 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
435 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
436 
437 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439 	    OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
440 	    pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
441 
442 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444 	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
445 	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
446 #endif
447 
448 	/* Save off the information about this board */
449 	hw->vendor_id = pci_get_vendor(dev);
450 	hw->device_id = pci_get_device(dev);
451 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
452 	hw->subsystem_vendor_id =
453 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
454 	hw->subsystem_device_id =
455 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
456 
457 	hw->bus.device = pci_get_slot(dev);
458 	hw->bus.func = pci_get_function(dev);
459 
460 	/* Do PCI setup - map BAR0, etc */
461 	if (ixl_allocate_pci_resources(pf)) {
462 		device_printf(dev, "Allocation of PCI resources failed\n");
463 		error = ENXIO;
464 		goto err_out;
465 	}
466 
467 	/* Create for initial debugging use */
468 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
469 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
470 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
471 	    ixl_debug_info, "I", "Debug Information");
472 
473 
474 	/* Establish a clean starting point */
475 	i40e_clear_hw(hw);
476 	error = i40e_pf_reset(hw);
477 	if (error) {
478 		device_printf(dev,"PF reset failure %x\n", error);
479 		error = EIO;
480 		goto err_out;
481 	}
482 
483 	/* For now always do an initial CORE reset on first device */
484 	{
485 		static int	ixl_dev_count;
486 		static int	ixl_dev_track[32];
487 		u32		my_dev;
488 		int		i, found = FALSE;
489 		u16		bus = pci_get_bus(dev);
490 
491 		mtx_lock(&ixl_reset_mtx);
492 		my_dev = (bus << 8) | hw->bus.device;
493 
494 		for (i = 0; i < ixl_dev_count; i++) {
495 			if (ixl_dev_track[i] == my_dev)
496 				found = TRUE;
497 		}
498 
499                 if (!found) {
500                         u32 reg;
501 
502                         ixl_dev_track[ixl_dev_count] = my_dev;
503                         ixl_dev_count++;
504 
505 			INIT_DEBUGOUT("Initial CORE RESET\n");
506                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
507                         ixl_flush(hw);
508                         i = 50;
509                         do {
510 				i40e_msec_delay(50);
511                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
512                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
513                                         break;
514                         } while (i--);
515 
516                         /* paranoia */
517                         wr32(hw, I40E_PF_ATQLEN, 0);
518                         wr32(hw, I40E_PF_ATQBAL, 0);
519                         wr32(hw, I40E_PF_ATQBAH, 0);
520                         i40e_clear_pxe_mode(hw);
521                 }
522                 mtx_unlock(&ixl_reset_mtx);
523 	}
524 
525 	/* Set admin queue parameters */
526 	hw->aq.num_arq_entries = IXL_AQ_LEN;
527 	hw->aq.num_asq_entries = IXL_AQ_LEN;
528 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
529 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
530 
531 	/* Initialize the shared code */
532 	error = i40e_init_shared_code(hw);
533 	if (error) {
534 		device_printf(dev,"Unable to initialize the shared code\n");
535 		error = EIO;
536 		goto err_out;
537 	}
538 
539 	/* Set up the admin queue */
540 	error = i40e_init_adminq(hw);
541 	if (error) {
542 		device_printf(dev, "The driver for the device stopped "
543 		    "because the NVM image is newer than expected.\n"
544 		    "You must install the most recent version of "
545 		    " the network driver.\n");
546 		goto err_out;
547 	}
548 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
549 
550         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
551 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
552 		device_printf(dev, "The driver for the device detected "
553 		    "a newer version of the NVM image than expected.\n"
554 		    "Please install the most recent version of the network driver.\n");
555 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
556 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
557 		device_printf(dev, "The driver for the device detected "
558 		    "an older version of the NVM image than expected.\n"
559 		    "Please update the NVM image.\n");
560 
561 	/* Clear PXE mode */
562 	i40e_clear_pxe_mode(hw);
563 
564 	/* Get capabilities from the device */
565 	error = ixl_get_hw_capabilities(pf);
566 	if (error) {
567 		device_printf(dev, "HW capabilities failure!\n");
568 		goto err_get_cap;
569 	}
570 
571 	/* Set up host memory cache */
572 	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
573 	if (error) {
574 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
575 		goto err_get_cap;
576 	}
577 
578 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
579 	if (error) {
580 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
581 		goto err_mac_hmc;
582 	}
583 
584 	/* Disable LLDP from the firmware */
585 	i40e_aq_stop_lldp(hw, TRUE, NULL);
586 
587 	i40e_get_mac_addr(hw, hw->mac.addr);
588 	error = i40e_validate_mac_addr(hw->mac.addr);
589 	if (error) {
590 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
591 		goto err_mac_hmc;
592 	}
593 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
594 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
595 
596 	if (ixl_setup_stations(pf) != 0) {
597 		device_printf(dev, "setup stations failed!\n");
598 		error = ENOMEM;
599 		goto err_mac_hmc;
600 	}
601 
602 	/* Initialize mac filter list for VSI */
603 	SLIST_INIT(&vsi->ftl);
604 
605 	/* Set up interrupt routing here */
606 	if (pf->msix > 1)
607 		error = ixl_assign_vsi_msix(pf);
608 	else
609 		error = ixl_assign_vsi_legacy(pf);
610 	if (error)
611 		goto err_late;
612 
613 	i40e_msec_delay(75);
614 	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
615 	if (error) {
616 		device_printf(dev, "link restart failed, aq_err=%d\n",
617 		    pf->hw.aq.asq_last_status);
618 	}
619 
620 	/* Determine link state */
621 	vsi->link_up = ixl_config_link(hw);
622 
623 	/* Report if Unqualified modules are found */
624 	if ((vsi->link_up == FALSE) &&
625 	    (pf->hw.phy.link_info.link_info &
626 	    I40E_AQ_MEDIA_AVAILABLE) &&
627 	    (!(pf->hw.phy.link_info.an_info &
628 	    I40E_AQ_QUALIFIED_MODULE)))
629 		device_printf(dev, "Link failed because "
630 		    "an unqualified module was detected\n");
631 
632 	/* Setup OS specific network interface */
633 	if (ixl_setup_interface(dev, vsi) != 0)
634 		goto err_late;
635 
636 	/* Get the bus configuration and set the shared code */
637 	bus = ixl_get_bus_info(hw, dev);
638 	i40e_set_pci_config_data(hw, bus);
639 
640 	/* Initialize statistics */
641 	ixl_pf_reset_stats(pf);
642 	ixl_update_stats_counters(pf);
643 	ixl_add_hw_stats(pf);
644 
645 	/* Register for VLAN events */
646 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
647 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
648 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
649 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
650 
651 	INIT_DEBUGOUT("ixl_attach: end");
652 	return (0);
653 
654 err_late:
655 	ixl_free_vsi(vsi);
656 err_mac_hmc:
657 	i40e_shutdown_lan_hmc(hw);
658 err_get_cap:
659 	i40e_shutdown_adminq(hw);
660 err_out:
661 	if (vsi->ifp != NULL)
662 		if_free(vsi->ifp);
663 	ixl_free_pci_resources(pf);
664 	IXL_PF_LOCK_DESTROY(pf);
665 	return (error);
666 }
667 
668 /*********************************************************************
669  *  Device removal routine
670  *
671  *  The detach entry point is called when the driver is being removed.
672  *  This routine stops the adapter and deallocates all the resources
673  *  that were allocated for driver operation.
674  *
675  *  return 0 on success, positive on failure
676  *********************************************************************/
677 
678 static int
679 ixl_detach(device_t dev)
680 {
681 	struct ixl_pf		*pf = device_get_softc(dev);
682 	struct i40e_hw		*hw = &pf->hw;
683 	struct ixl_vsi		*vsi = &pf->vsi;
684 	struct ixl_queue	*que = vsi->queues;
685 	i40e_status		status;
686 
687 	INIT_DEBUGOUT("ixl_detach: begin");
688 
689 	/* Make sure VLANS are not using driver */
690 	if (vsi->ifp->if_vlantrunk != NULL) {
691 		device_printf(dev,"Vlan in use, detach first\n");
692 		return (EBUSY);
693 	}
694 
695 	IXL_PF_LOCK(pf);
696 	ixl_stop(pf);
697 	IXL_PF_UNLOCK(pf);
698 
699 	for (int i = 0; i < vsi->num_queues; i++, que++) {
700 		if (que->tq) {
701 			taskqueue_drain(que->tq, &que->task);
702 			taskqueue_drain(que->tq, &que->tx_task);
703 			taskqueue_free(que->tq);
704 		}
705 	}
706 
707 	/* Shutdown LAN HMC */
708 	status = i40e_shutdown_lan_hmc(hw);
709 	if (status)
710 		device_printf(dev,
711 		    "Shutdown LAN HMC failed with code %d\n", status);
712 
713 	/* Shutdown admin queue */
714 	status = i40e_shutdown_adminq(hw);
715 	if (status)
716 		device_printf(dev,
717 		    "Shutdown Admin queue failed with code %d\n", status);
718 
719 	/* Unregister VLAN events */
720 	if (vsi->vlan_attach != NULL)
721 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
722 	if (vsi->vlan_detach != NULL)
723 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
724 
725 	ether_ifdetach(vsi->ifp);
726 	callout_drain(&pf->timer);
727 
728 	ixl_free_pci_resources(pf);
729 	bus_generic_detach(dev);
730 	if_free(vsi->ifp);
731 	ixl_free_vsi(vsi);
732 	IXL_PF_LOCK_DESTROY(pf);
733 	return (0);
734 }
735 
736 /*********************************************************************
737  *
738  *  Shutdown entry point
739  *
740  **********************************************************************/
741 
742 static int
743 ixl_shutdown(device_t dev)
744 {
745 	struct ixl_pf *pf = device_get_softc(dev);
746 	IXL_PF_LOCK(pf);
747 	ixl_stop(pf);
748 	IXL_PF_UNLOCK(pf);
749 	return (0);
750 }
751 
752 
753 /*********************************************************************
754  *
755  *  Get the hardware capabilities
756  *
757  **********************************************************************/
758 
759 static int
760 ixl_get_hw_capabilities(struct ixl_pf *pf)
761 {
762 	struct i40e_aqc_list_capabilities_element_resp *buf;
763 	struct i40e_hw	*hw = &pf->hw;
764 	device_t 	dev = pf->dev;
765 	int             error, len;
766 	u16		needed;
767 	bool		again = TRUE;
768 
769 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770 retry:
771 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773 		device_printf(dev, "Unable to allocate cap memory\n");
774                 return (ENOMEM);
775 	}
776 
777 	/* This populates the hw struct */
778         error = i40e_aq_discover_capabilities(hw, buf, len,
779 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780 	free(buf, M_DEVBUF);
781 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782 	    (again == TRUE)) {
783 		/* retry once with a larger buffer */
784 		again = FALSE;
785 		len = needed;
786 		goto retry;
787 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788 		device_printf(dev, "capability discovery failed: %d\n",
789 		    pf->hw.aq.asq_last_status);
790 		return (ENODEV);
791 	}
792 
793 	/* Capture this PF's starting queue pair */
794 	pf->qbase = hw->func_caps.base_queue;
795 
796 #ifdef IXL_DEBUG
797 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799 	    hw->pf_id, hw->func_caps.num_vfs,
800 	    hw->func_caps.num_msix_vectors,
801 	    hw->func_caps.num_msix_vectors_vf,
802 	    hw->func_caps.fd_filters_guaranteed,
803 	    hw->func_caps.fd_filters_best_effort,
804 	    hw->func_caps.num_tx_qp,
805 	    hw->func_caps.num_rx_qp,
806 	    hw->func_caps.base_queue);
807 #endif
808 	return (error);
809 }
810 
811 static void
812 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813 {
814 	device_t 	dev = vsi->dev;
815 
816 	/* Enable/disable TXCSUM/TSO4 */
817 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
818 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
819 		if (mask & IFCAP_TXCSUM) {
820 			ifp->if_capenable |= IFCAP_TXCSUM;
821 			/* enable TXCSUM, restore TSO if previously enabled */
822 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824 				ifp->if_capenable |= IFCAP_TSO4;
825 			}
826 		}
827 		else if (mask & IFCAP_TSO4) {
828 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830 			device_printf(dev,
831 			    "TSO4 requires txcsum, enabling both...\n");
832 		}
833 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
834 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
835 		if (mask & IFCAP_TXCSUM)
836 			ifp->if_capenable &= ~IFCAP_TXCSUM;
837 		else if (mask & IFCAP_TSO4)
838 			ifp->if_capenable |= IFCAP_TSO4;
839 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
840 	    && (ifp->if_capenable & IFCAP_TSO4)) {
841 		if (mask & IFCAP_TXCSUM) {
842 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844 			device_printf(dev,
845 			    "TSO4 requires txcsum, disabling both...\n");
846 		} else if (mask & IFCAP_TSO4)
847 			ifp->if_capenable &= ~IFCAP_TSO4;
848 	}
849 
850 	/* Enable/disable TXCSUM_IPV6/TSO6 */
851 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
853 		if (mask & IFCAP_TXCSUM_IPV6) {
854 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857 				ifp->if_capenable |= IFCAP_TSO6;
858 			}
859 		} else if (mask & IFCAP_TSO6) {
860 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862 			device_printf(dev,
863 			    "TSO6 requires txcsum6, enabling both...\n");
864 		}
865 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
867 		if (mask & IFCAP_TXCSUM_IPV6)
868 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869 		else if (mask & IFCAP_TSO6)
870 			ifp->if_capenable |= IFCAP_TSO6;
871 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872 	    && (ifp->if_capenable & IFCAP_TSO6)) {
873 		if (mask & IFCAP_TXCSUM_IPV6) {
874 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876 			device_printf(dev,
877 			    "TSO6 requires txcsum6, disabling both...\n");
878 		} else if (mask & IFCAP_TSO6)
879 			ifp->if_capenable &= ~IFCAP_TSO6;
880 	}
881 }
882 
883 /*********************************************************************
884  *  Ioctl entry point
885  *
886  *  ixl_ioctl is called when the user wants to configure the
887  *  interface.
888  *
889  *  return 0 on success, positive on failure
890  **********************************************************************/
891 
892 static int
893 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894 {
895 	struct ixl_vsi	*vsi = ifp->if_softc;
896 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
897 	struct ifreq	*ifr = (struct ifreq *) data;
898 #if defined(INET) || defined(INET6)
899 	struct ifaddr *ifa = (struct ifaddr *)data;
900 	bool		avoid_reset = FALSE;
901 #endif
902 	int             error = 0;
903 
904 	switch (command) {
905 
906         case SIOCSIFADDR:
907 #ifdef INET
908 		if (ifa->ifa_addr->sa_family == AF_INET)
909 			avoid_reset = TRUE;
910 #endif
911 #ifdef INET6
912 		if (ifa->ifa_addr->sa_family == AF_INET6)
913 			avoid_reset = TRUE;
914 #endif
915 #if defined(INET) || defined(INET6)
916 		/*
917 		** Calling init results in link renegotiation,
918 		** so we avoid doing it when possible.
919 		*/
920 		if (avoid_reset) {
921 			ifp->if_flags |= IFF_UP;
922 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
923 				ixl_init(pf);
924 			if (!(ifp->if_flags & IFF_NOARP))
925 				arp_ifinit(ifp, ifa);
926 		} else
927 			error = ether_ioctl(ifp, command, data);
928 		break;
929 #endif
930 	case SIOCSIFMTU:
931 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
932 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
933 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
934 			error = EINVAL;
935 		} else {
936 			IXL_PF_LOCK(pf);
937 			ifp->if_mtu = ifr->ifr_mtu;
938 			vsi->max_frame_size =
939 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
940 			    + ETHER_VLAN_ENCAP_LEN;
941 			ixl_init_locked(pf);
942 			IXL_PF_UNLOCK(pf);
943 		}
944 		break;
945 	case SIOCSIFFLAGS:
946 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
947 		IXL_PF_LOCK(pf);
948 		if (ifp->if_flags & IFF_UP) {
949 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
950 				if ((ifp->if_flags ^ pf->if_flags) &
951 				    (IFF_PROMISC | IFF_ALLMULTI)) {
952 					ixl_set_promisc(vsi);
953 				}
954 			} else
955 				ixl_init_locked(pf);
956 		} else
957 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
958 				ixl_stop(pf);
959 		pf->if_flags = ifp->if_flags;
960 		IXL_PF_UNLOCK(pf);
961 		break;
962 	case SIOCADDMULTI:
963 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
964 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
965 			IXL_PF_LOCK(pf);
966 			ixl_disable_intr(vsi);
967 			ixl_add_multi(vsi);
968 			ixl_enable_intr(vsi);
969 			IXL_PF_UNLOCK(pf);
970 		}
971 		break;
972 	case SIOCDELMULTI:
973 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
974 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
975 			IXL_PF_LOCK(pf);
976 			ixl_disable_intr(vsi);
977 			ixl_del_multi(vsi);
978 			ixl_enable_intr(vsi);
979 			IXL_PF_UNLOCK(pf);
980 		}
981 		break;
982 	case SIOCSIFMEDIA:
983 	case SIOCGIFMEDIA:
984 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
985 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
986 		break;
987 	case SIOCSIFCAP:
988 	{
989 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
990 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
991 
992 		ixl_cap_txcsum_tso(vsi, ifp, mask);
993 
994 		if (mask & IFCAP_RXCSUM)
995 			ifp->if_capenable ^= IFCAP_RXCSUM;
996 		if (mask & IFCAP_RXCSUM_IPV6)
997 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
998 		if (mask & IFCAP_LRO)
999 			ifp->if_capenable ^= IFCAP_LRO;
1000 		if (mask & IFCAP_VLAN_HWTAGGING)
1001 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1002 		if (mask & IFCAP_VLAN_HWFILTER)
1003 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1004 		if (mask & IFCAP_VLAN_HWTSO)
1005 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1006 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1007 			IXL_PF_LOCK(pf);
1008 			ixl_init_locked(pf);
1009 			IXL_PF_UNLOCK(pf);
1010 		}
1011 		VLAN_CAPABILITIES(ifp);
1012 
1013 		break;
1014 	}
1015 
1016 	default:
1017 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1018 		error = ether_ioctl(ifp, command, data);
1019 		break;
1020 	}
1021 
1022 	return (error);
1023 }
1024 
1025 
1026 /*********************************************************************
1027  *  Init entry point
1028  *
1029  *  This routine is used in two ways. It is used by the stack as
1030  *  init entry point in network interface structure. It is also used
1031  *  by the driver as a hw/sw initialization routine to get to a
1032  *  consistent state.
1033  *
1034  *  return 0 on success, positive on failure
1035  **********************************************************************/
1036 
1037 static void
1038 ixl_init_locked(struct ixl_pf *pf)
1039 {
1040 	struct i40e_hw	*hw = &pf->hw;
1041 	struct ixl_vsi	*vsi = &pf->vsi;
1042 	struct ifnet	*ifp = vsi->ifp;
1043 	device_t 	dev = pf->dev;
1044 	struct i40e_filter_control_settings	filter;
1045 	u8		tmpaddr[ETHER_ADDR_LEN];
1046 	int		ret;
1047 
1048 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1049 	INIT_DEBUGOUT("ixl_init: begin");
1050 	ixl_stop(pf);
1051 
1052 	/* Get the latest mac address... User might use a LAA */
1053 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1054 	      I40E_ETH_LENGTH_OF_ADDRESS);
1055 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1056 	    i40e_validate_mac_addr(tmpaddr)) {
1057 		bcopy(tmpaddr, hw->mac.addr,
1058 		    I40E_ETH_LENGTH_OF_ADDRESS);
1059 		ret = i40e_aq_mac_address_write(hw,
1060 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1061 		    hw->mac.addr, NULL);
1062 		if (ret) {
1063 			device_printf(dev, "LLA address"
1064 			 "change failed!!\n");
1065 			return;
1066 		}
1067 	}
1068 
1069 	/* Set the various hardware offload abilities */
1070 	ifp->if_hwassist = 0;
1071 	if (ifp->if_capenable & IFCAP_TSO)
1072 		ifp->if_hwassist |= CSUM_TSO;
1073 	if (ifp->if_capenable & IFCAP_TXCSUM)
1074 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1075 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1076 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1077 
1078 	/* Set up the device filtering */
1079 	bzero(&filter, sizeof(filter));
1080 	filter.enable_ethtype = TRUE;
1081 	filter.enable_macvlan = TRUE;
1082 #ifdef IXL_FDIR
1083 	filter.enable_fdir = TRUE;
1084 #endif
1085 	if (i40e_set_filter_control(hw, &filter))
1086 		device_printf(dev, "set_filter_control() failed\n");
1087 
1088 	/* Set up RSS */
1089 	ixl_config_rss(vsi);
1090 
1091 	/* Setup the VSI */
1092 	ixl_setup_vsi(vsi);
1093 
1094 	/*
1095 	** Prepare the rings, hmc contexts, etc...
1096 	*/
1097 	if (ixl_initialize_vsi(vsi)) {
1098 		device_printf(dev, "initialize vsi failed!!\n");
1099 		return;
1100 	}
1101 
1102 	/* Add protocol filters to list */
1103 	ixl_init_filters(vsi);
1104 
1105 	/* Setup vlan's if needed */
1106 	ixl_setup_vlan_filters(vsi);
1107 
1108 	/* Start the local timer */
1109 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1110 
1111 	/* Set up MSI/X routing and the ITR settings */
1112 	if (ixl_enable_msix) {
1113 		ixl_configure_msix(pf);
1114 		ixl_configure_itr(pf);
1115 	} else
1116 		ixl_configure_legacy(pf);
1117 
1118 	ixl_enable_rings(vsi);
1119 
1120 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1121 
1122 	/* Set MTU in hardware*/
1123 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1124 	    TRUE, 0, NULL);
1125 	if (aq_error)
1126 		device_printf(vsi->dev,
1127 			"aq_set_mac_config in init error, code %d\n",
1128 		    aq_error);
1129 
1130 	/* And now turn on interrupts */
1131 	ixl_enable_intr(vsi);
1132 
1133 	/* Now inform the stack we're ready */
1134 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1135 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1136 
1137 	return;
1138 }
1139 
1140 static void
1141 ixl_init(void *arg)
1142 {
1143 	struct ixl_pf *pf = arg;
1144 
1145 	IXL_PF_LOCK(pf);
1146 	ixl_init_locked(pf);
1147 	IXL_PF_UNLOCK(pf);
1148 	return;
1149 }
1150 
1151 /*
1152 **
1153 ** MSIX Interrupt Handlers and Tasklets
1154 **
1155 */
1156 static void
1157 ixl_handle_que(void *context, int pending)
1158 {
1159 	struct ixl_queue *que = context;
1160 	struct ixl_vsi *vsi = que->vsi;
1161 	struct i40e_hw  *hw = vsi->hw;
1162 	struct tx_ring  *txr = &que->txr;
1163 	struct ifnet    *ifp = vsi->ifp;
1164 	bool		more;
1165 
1166 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1167 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1168 		IXL_TX_LOCK(txr);
1169 		ixl_txeof(que);
1170 		if (!drbr_empty(ifp, txr->br))
1171 			ixl_mq_start_locked(ifp, txr);
1172 		IXL_TX_UNLOCK(txr);
1173 		if (more) {
1174 			taskqueue_enqueue(que->tq, &que->task);
1175 			return;
1176 		}
1177 	}
1178 
1179 	/* Reenable this interrupt - hmmm */
1180 	ixl_enable_queue(hw, que->me);
1181 	return;
1182 }
1183 
1184 
1185 /*********************************************************************
1186  *
1187  *  Legacy Interrupt Service routine
1188  *
1189  **********************************************************************/
1190 void
1191 ixl_intr(void *arg)
1192 {
1193 	struct ixl_pf		*pf = arg;
1194 	struct i40e_hw		*hw =  &pf->hw;
1195 	struct ixl_vsi		*vsi = &pf->vsi;
1196 	struct ixl_queue	*que = vsi->queues;
1197 	struct ifnet		*ifp = vsi->ifp;
1198 	struct tx_ring		*txr = &que->txr;
1199         u32			reg, icr0, mask;
1200 	bool			more_tx, more_rx;
1201 
1202 	++que->irqs;
1203 
1204 	/* Protect against spurious interrupts */
1205 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1206 		return;
1207 
1208 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1209 
1210 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1211 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1212 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1213 
1214         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1215 
1216 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1217 		taskqueue_enqueue(pf->tq, &pf->adminq);
1218 		return;
1219 	}
1220 
1221 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1222 
1223 	IXL_TX_LOCK(txr);
1224 	more_tx = ixl_txeof(que);
1225 	if (!drbr_empty(vsi->ifp, txr->br))
1226 		more_tx = 1;
1227 	IXL_TX_UNLOCK(txr);
1228 
1229 	/* re-enable other interrupt causes */
1230 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1231 
1232 	/* And now the queues */
1233 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1234 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1235 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1236 
1237 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1238 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1239 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1240 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1241 
1242 	ixl_enable_legacy(hw);
1243 
1244 	return;
1245 }
1246 
1247 
1248 /*********************************************************************
1249  *
1250  *  MSIX VSI Interrupt Service routine
1251  *
1252  **********************************************************************/
1253 void
1254 ixl_msix_que(void *arg)
1255 {
1256 	struct ixl_queue	*que = arg;
1257 	struct ixl_vsi	*vsi = que->vsi;
1258 	struct i40e_hw	*hw = vsi->hw;
1259 	struct tx_ring	*txr = &que->txr;
1260 	bool		more_tx, more_rx;
1261 
1262 	/* Protect against spurious interrupts */
1263 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1264 		return;
1265 
1266 	++que->irqs;
1267 
1268 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1269 
1270 	IXL_TX_LOCK(txr);
1271 	more_tx = ixl_txeof(que);
1272 	/*
1273 	** Make certain that if the stack
1274 	** has anything queued the task gets
1275 	** scheduled to handle it.
1276 	*/
1277 	if (!drbr_empty(vsi->ifp, txr->br))
1278 		more_tx = 1;
1279 	IXL_TX_UNLOCK(txr);
1280 
1281 	ixl_set_queue_rx_itr(que);
1282 	ixl_set_queue_tx_itr(que);
1283 
1284 	if (more_tx || more_rx)
1285 		taskqueue_enqueue(que->tq, &que->task);
1286 	else
1287 		ixl_enable_queue(hw, que->me);
1288 
1289 	return;
1290 }
1291 
1292 
1293 /*********************************************************************
1294  *
1295  *  MSIX Admin Queue Interrupt Service routine
1296  *
1297  **********************************************************************/
1298 static void
1299 ixl_msix_adminq(void *arg)
1300 {
1301 	struct ixl_pf	*pf = arg;
1302 	struct i40e_hw	*hw = &pf->hw;
1303 	u32		reg, mask;
1304 
1305 	++pf->admin_irq;
1306 
1307 	reg = rd32(hw, I40E_PFINT_ICR0);
1308 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1309 
1310 	/* Check on the cause */
1311 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1312 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1313 
1314 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1315 		ixl_handle_mdd_event(pf);
1316 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1317 	}
1318 
1319 	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1320 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1321 
1322 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1323 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1324 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1325 
1326 	taskqueue_enqueue(pf->tq, &pf->adminq);
1327 	return;
1328 }
1329 
1330 /*********************************************************************
1331  *
1332  *  Media Ioctl callback
1333  *
1334  *  This routine is called whenever the user queries the status of
1335  *  the interface using ifconfig.
1336  *
1337  **********************************************************************/
1338 static void
1339 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1340 {
1341 	struct ixl_vsi	*vsi = ifp->if_softc;
1342 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1343 	struct i40e_hw  *hw = &pf->hw;
1344 
1345 	INIT_DEBUGOUT("ixl_media_status: begin");
1346 	IXL_PF_LOCK(pf);
1347 
1348 	ixl_update_link_status(pf);
1349 
1350 	ifmr->ifm_status = IFM_AVALID;
1351 	ifmr->ifm_active = IFM_ETHER;
1352 
1353 	if (!vsi->link_up) {
1354 		IXL_PF_UNLOCK(pf);
1355 		return;
1356 	}
1357 
1358 	ifmr->ifm_status |= IFM_ACTIVE;
1359 	/* Hardware is always full-duplex */
1360 	ifmr->ifm_active |= IFM_FDX;
1361 
1362 	switch (hw->phy.link_info.phy_type) {
1363 		/* 100 M */
1364 		case I40E_PHY_TYPE_100BASE_TX:
1365 			ifmr->ifm_active |= IFM_100_TX;
1366 			break;
1367 		/* 1 G */
1368 		case I40E_PHY_TYPE_1000BASE_T:
1369 			ifmr->ifm_active |= IFM_1000_T;
1370 			break;
1371 		case I40E_PHY_TYPE_1000BASE_SX:
1372 			ifmr->ifm_active |= IFM_1000_SX;
1373 			break;
1374 		case I40E_PHY_TYPE_1000BASE_LX:
1375 			ifmr->ifm_active |= IFM_1000_LX;
1376 			break;
1377 		/* 10 G */
1378 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1379 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1380 			ifmr->ifm_active |= IFM_10G_TWINAX;
1381 			break;
1382 		case I40E_PHY_TYPE_10GBASE_SR:
1383 			ifmr->ifm_active |= IFM_10G_SR;
1384 			break;
1385 		case I40E_PHY_TYPE_10GBASE_LR:
1386 			ifmr->ifm_active |= IFM_10G_LR;
1387 			break;
1388 		case I40E_PHY_TYPE_10GBASE_T:
1389 			ifmr->ifm_active |= IFM_10G_T;
1390 			break;
1391 		/* 40 G */
1392 		case I40E_PHY_TYPE_40GBASE_CR4:
1393 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1394 			ifmr->ifm_active |= IFM_40G_CR4;
1395 			break;
1396 		case I40E_PHY_TYPE_40GBASE_SR4:
1397 			ifmr->ifm_active |= IFM_40G_SR4;
1398 			break;
1399 		case I40E_PHY_TYPE_40GBASE_LR4:
1400 			ifmr->ifm_active |= IFM_40G_LR4;
1401 			break;
1402 		default:
1403 			ifmr->ifm_active |= IFM_UNKNOWN;
1404 			break;
1405 	}
1406 	/* Report flow control status as well */
1407 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1408 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1409 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1410 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1411 
1412 	IXL_PF_UNLOCK(pf);
1413 
1414 	return;
1415 }
1416 
1417 /*********************************************************************
1418  *
1419  *  Media Ioctl callback
1420  *
1421  *  This routine is called when the user changes speed/duplex using
1422  *  media/mediopt option with ifconfig.
1423  *
1424  **********************************************************************/
1425 static int
1426 ixl_media_change(struct ifnet * ifp)
1427 {
1428 	struct ixl_vsi *vsi = ifp->if_softc;
1429 	struct ifmedia *ifm = &vsi->media;
1430 
1431 	INIT_DEBUGOUT("ixl_media_change: begin");
1432 
1433 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1434 		return (EINVAL);
1435 
1436 	if_printf(ifp, "Media change is currently not supported.\n");
1437 
1438 	return (ENODEV);
1439 }
1440 
1441 
1442 #ifdef IXL_FDIR
1443 /*
1444 ** ATR: Application Targetted Receive - creates a filter
1445 **	based on TX flow info that will keep the receive
1446 **	portion of the flow on the same queue. Based on the
1447 **	implementation this is only available for TCP connections
1448 */
1449 void
1450 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1451 {
1452 	struct ixl_vsi			*vsi = que->vsi;
1453 	struct tx_ring			*txr = &que->txr;
1454 	struct i40e_filter_program_desc	*FDIR;
1455 	u32				ptype, dtype;
1456 	int				idx;
1457 
1458 	/* check if ATR is enabled and sample rate */
1459 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1460 		return;
1461 	/*
1462 	** We sample all TCP SYN/FIN packets,
1463 	** or at the selected sample rate
1464 	*/
1465 	txr->atr_count++;
1466 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1467 	    (txr->atr_count < txr->atr_rate))
1468                 return;
1469 	txr->atr_count = 0;
1470 
1471 	/* Get a descriptor to use */
1472 	idx = txr->next_avail;
1473 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1474 	if (++idx == que->num_desc)
1475 		idx = 0;
1476 	txr->avail--;
1477 	txr->next_avail = idx;
1478 
1479 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1480 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1481 
1482 	ptype |= (etype == ETHERTYPE_IP) ?
1483 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1484 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1485 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1486 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1487 
1488 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1489 
1490 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1491 
1492 	/*
1493 	** We use the TCP TH_FIN as a trigger to remove
1494 	** the filter, otherwise its an update.
1495 	*/
1496 	dtype |= (th->th_flags & TH_FIN) ?
1497 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1498 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1499 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1500 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1501 
1502 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1503 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1504 
1505 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1506 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1507 
1508 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1509 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1510 	return;
1511 }
1512 #endif
1513 
1514 
1515 static void
1516 ixl_set_promisc(struct ixl_vsi *vsi)
1517 {
1518 	struct ifnet	*ifp = vsi->ifp;
1519 	struct i40e_hw	*hw = vsi->hw;
1520 	int		err, mcnt = 0;
1521 	bool		uni = FALSE, multi = FALSE;
1522 
1523 	if (ifp->if_flags & IFF_ALLMULTI)
1524                 multi = TRUE;
1525 	else { /* Need to count the multicast addresses */
1526 		struct  ifmultiaddr *ifma;
1527 		if_maddr_rlock(ifp);
1528 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1529                         if (ifma->ifma_addr->sa_family != AF_LINK)
1530                                 continue;
1531                         if (mcnt == MAX_MULTICAST_ADDR)
1532                                 break;
1533                         mcnt++;
1534 		}
1535 		if_maddr_runlock(ifp);
1536 	}
1537 
1538 	if (mcnt >= MAX_MULTICAST_ADDR)
1539                 multi = TRUE;
1540         if (ifp->if_flags & IFF_PROMISC)
1541 		uni = TRUE;
1542 
1543 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1544 	    vsi->seid, uni, NULL);
1545 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1546 	    vsi->seid, multi, NULL);
1547 	return;
1548 }
1549 
1550 /*********************************************************************
1551  * 	Filter Routines
1552  *
1553  *	Routines for multicast and vlan filter management.
1554  *
1555  *********************************************************************/
1556 static void
1557 ixl_add_multi(struct ixl_vsi *vsi)
1558 {
1559 	struct	ifmultiaddr	*ifma;
1560 	struct ifnet		*ifp = vsi->ifp;
1561 	struct i40e_hw		*hw = vsi->hw;
1562 	int			mcnt = 0, flags;
1563 
1564 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1565 
1566 	if_maddr_rlock(ifp);
1567 	/*
1568 	** First just get a count, to decide if we
1569 	** we simply use multicast promiscuous.
1570 	*/
1571 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1572 		if (ifma->ifma_addr->sa_family != AF_LINK)
1573 			continue;
1574 		mcnt++;
1575 	}
1576 	if_maddr_runlock(ifp);
1577 
1578 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1579 		/* delete existing MC filters */
1580 		ixl_del_hw_filters(vsi, mcnt);
1581 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1582 		    vsi->seid, TRUE, NULL);
1583 		return;
1584 	}
1585 
1586 	mcnt = 0;
1587 	if_maddr_rlock(ifp);
1588 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1589 		if (ifma->ifma_addr->sa_family != AF_LINK)
1590 			continue;
1591 		ixl_add_mc_filter(vsi,
1592 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1593 		mcnt++;
1594 	}
1595 	if_maddr_runlock(ifp);
1596 	if (mcnt > 0) {
1597 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1598 		ixl_add_hw_filters(vsi, flags, mcnt);
1599 	}
1600 
1601 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1602 	return;
1603 }
1604 
1605 static void
1606 ixl_del_multi(struct ixl_vsi *vsi)
1607 {
1608 	struct ifnet		*ifp = vsi->ifp;
1609 	struct ifmultiaddr	*ifma;
1610 	struct ixl_mac_filter	*f;
1611 	int			mcnt = 0;
1612 	bool		match = FALSE;
1613 
1614 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1615 
1616 	/* Search for removed multicast addresses */
1617 	if_maddr_rlock(ifp);
1618 	SLIST_FOREACH(f, &vsi->ftl, next) {
1619 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1620 			match = FALSE;
1621 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1622 				if (ifma->ifma_addr->sa_family != AF_LINK)
1623 					continue;
1624 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1625 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1626 					match = TRUE;
1627 					break;
1628 				}
1629 			}
1630 			if (match == FALSE) {
1631 				f->flags |= IXL_FILTER_DEL;
1632 				mcnt++;
1633 			}
1634 		}
1635 	}
1636 	if_maddr_runlock(ifp);
1637 
1638 	if (mcnt > 0)
1639 		ixl_del_hw_filters(vsi, mcnt);
1640 }
1641 
1642 
1643 /*********************************************************************
1644  *  Timer routine
1645  *
1646  *  This routine checks for link status,updates statistics,
1647  *  and runs the watchdog check.
1648  *
1649  **********************************************************************/
1650 
1651 static void
1652 ixl_local_timer(void *arg)
1653 {
1654 	struct ixl_pf		*pf = arg;
1655 	struct i40e_hw		*hw = &pf->hw;
1656 	struct ixl_vsi		*vsi = &pf->vsi;
1657 	struct ixl_queue	*que = vsi->queues;
1658 	device_t		dev = pf->dev;
1659 	int			hung = 0;
1660 	u32			mask;
1661 
1662 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1663 
1664 	/* Fire off the adminq task */
1665 	taskqueue_enqueue(pf->tq, &pf->adminq);
1666 
1667 	/* Update stats */
1668 	ixl_update_stats_counters(pf);
1669 
1670 	/*
1671 	** Check status of the queues
1672 	*/
1673 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1674 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1675 
1676 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1677 		/* Any queues with outstanding work get a sw irq */
1678 		if (que->busy)
1679 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1680 		/*
1681 		** Each time txeof runs without cleaning, but there
1682 		** are uncleaned descriptors it increments busy. If
1683 		** we get to 5 we declare it hung.
1684 		*/
1685 		if (que->busy == IXL_QUEUE_HUNG) {
1686 			++hung;
1687 			/* Mark the queue as inactive */
1688 			vsi->active_queues &= ~((u64)1 << que->me);
1689 			continue;
1690 		} else {
1691 			/* Check if we've come back from hung */
1692 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1693 				vsi->active_queues |= ((u64)1 << que->me);
1694 		}
1695 		if (que->busy >= IXL_MAX_TX_BUSY) {
1696 			device_printf(dev,"Warning queue %d "
1697 			    "appears to be hung!\n", i);
1698 			que->busy = IXL_QUEUE_HUNG;
1699 			++hung;
1700 		}
1701 	}
1702 	/* Only reinit if all queues show hung */
1703 	if (hung == vsi->num_queues)
1704 		goto hung;
1705 
1706 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1707 	return;
1708 
1709 hung:
1710 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1711 	ixl_init_locked(pf);
1712 }
1713 
1714 /*
1715 ** Note: this routine updates the OS on the link state
1716 **	the real check of the hardware only happens with
1717 **	a link interrupt.
1718 */
1719 static void
1720 ixl_update_link_status(struct ixl_pf *pf)
1721 {
1722 	struct ixl_vsi		*vsi = &pf->vsi;
1723 	struct i40e_hw		*hw = &pf->hw;
1724 	struct ifnet		*ifp = vsi->ifp;
1725 	device_t		dev = pf->dev;
1726 	enum i40e_fc_mode 	fc;
1727 
1728 
1729 	if (vsi->link_up){
1730 		if (vsi->link_active == FALSE) {
1731 			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1732 			if (bootverbose) {
1733 				fc = hw->fc.current_mode;
1734 				device_printf(dev,"Link is up %d Gbps %s,"
1735 				    " Flow Control: %s\n",
1736 				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1737 				    "Full Duplex", ixl_fc_string[fc]);
1738 			}
1739 			vsi->link_active = TRUE;
1740 			if_link_state_change(ifp, LINK_STATE_UP);
1741 		}
1742 	} else { /* Link down */
1743 		if (vsi->link_active == TRUE) {
1744 			if (bootverbose)
1745 				device_printf(dev,"Link is Down\n");
1746 			if_link_state_change(ifp, LINK_STATE_DOWN);
1747 			vsi->link_active = FALSE;
1748 		}
1749 	}
1750 
1751 	return;
1752 }
1753 
1754 /*********************************************************************
1755  *
1756  *  This routine disables all traffic on the adapter by issuing a
1757  *  global reset on the MAC and deallocates TX/RX buffers.
1758  *
1759  **********************************************************************/
1760 
1761 static void
1762 ixl_stop(struct ixl_pf *pf)
1763 {
1764 	struct ixl_vsi	*vsi = &pf->vsi;
1765 	struct ifnet	*ifp = vsi->ifp;
1766 
1767 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1768 
1769 	INIT_DEBUGOUT("ixl_stop: begin\n");
1770 	ixl_disable_intr(vsi);
1771 	ixl_disable_rings(vsi);
1772 
1773 	/* Tell the stack that the interface is no longer active */
1774 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1775 
1776 	/* Stop the local timer */
1777 	callout_stop(&pf->timer);
1778 
1779 	return;
1780 }
1781 
1782 
1783 /*********************************************************************
1784  *
1785  *  Setup MSIX Interrupt resources and handlers for the VSI
1786  *
1787  **********************************************************************/
1788 static int
1789 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1790 {
1791 	device_t        dev = pf->dev;
1792 	struct 		ixl_vsi *vsi = &pf->vsi;
1793 	struct		ixl_queue *que = vsi->queues;
1794 	int 		error, rid = 0;
1795 
1796 	if (pf->msix == 1)
1797 		rid = 1;
1798 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1799 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1800 	if (pf->res == NULL) {
1801 		device_printf(dev,"Unable to allocate"
1802 		    " bus resource: vsi legacy/msi interrupt\n");
1803 		return (ENXIO);
1804 	}
1805 
1806 	/* Set the handler function */
1807 	error = bus_setup_intr(dev, pf->res,
1808 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1809 	    ixl_intr, pf, &pf->tag);
1810 	if (error) {
1811 		pf->res = NULL;
1812 		device_printf(dev, "Failed to register legacy/msi handler");
1813 		return (error);
1814 	}
1815 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1816 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1817 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1818 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1819 	    taskqueue_thread_enqueue, &que->tq);
1820 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1821 	    device_get_nameunit(dev));
1822 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1823 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1824 	    taskqueue_thread_enqueue, &pf->tq);
1825 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1826 	    device_get_nameunit(dev));
1827 
1828 	return (0);
1829 }
1830 
1831 
1832 /*********************************************************************
1833  *
1834  *  Setup MSIX Interrupt resources and handlers for the VSI
1835  *
1836  **********************************************************************/
1837 static int
1838 ixl_assign_vsi_msix(struct ixl_pf *pf)
1839 {
1840 	device_t	dev = pf->dev;
1841 	struct 		ixl_vsi *vsi = &pf->vsi;
1842 	struct 		ixl_queue *que = vsi->queues;
1843 	struct		tx_ring	 *txr;
1844 	int 		error, rid, vector = 0;
1845 
1846 	/* Admin Que is vector 0*/
1847 	rid = vector + 1;
1848 	pf->res = bus_alloc_resource_any(dev,
1849     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1850 	if (!pf->res) {
1851 		device_printf(dev,"Unable to allocate"
1852     	    " bus resource: Adminq interrupt [%d]\n", rid);
1853 		return (ENXIO);
1854 	}
1855 	/* Set the adminq vector and handler */
1856 	error = bus_setup_intr(dev, pf->res,
1857 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1858 	    ixl_msix_adminq, pf, &pf->tag);
1859 	if (error) {
1860 		pf->res = NULL;
1861 		device_printf(dev, "Failed to register Admin que handler");
1862 		return (error);
1863 	}
1864 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1865 	pf->admvec = vector;
1866 	/* Tasklet for Admin Queue */
1867 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1868 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1869 	    taskqueue_thread_enqueue, &pf->tq);
1870 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1871 	    device_get_nameunit(pf->dev));
1872 	++vector;
1873 
1874 	/* Now set up the stations */
1875 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1876 		rid = vector + 1;
1877 		txr = &que->txr;
1878 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1879 		    RF_SHAREABLE | RF_ACTIVE);
1880 		if (que->res == NULL) {
1881 			device_printf(dev,"Unable to allocate"
1882 		    	    " bus resource: que interrupt [%d]\n", vector);
1883 			return (ENXIO);
1884 		}
1885 		/* Set the handler function */
1886 		error = bus_setup_intr(dev, que->res,
1887 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1888 		    ixl_msix_que, que, &que->tag);
1889 		if (error) {
1890 			que->res = NULL;
1891 			device_printf(dev, "Failed to register que handler");
1892 			return (error);
1893 		}
1894 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1895 		/* Bind the vector to a CPU */
1896 		bus_bind_intr(dev, que->res, i);
1897 		que->msix = vector;
1898 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1899 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1900 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1901 		    taskqueue_thread_enqueue, &que->tq);
1902 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1903 		    device_get_nameunit(pf->dev));
1904 	}
1905 
1906 	return (0);
1907 }
1908 
1909 
1910 /*
1911  * Allocate MSI/X vectors
1912  */
1913 static int
1914 ixl_init_msix(struct ixl_pf *pf)
1915 {
1916 	device_t dev = pf->dev;
1917 	int rid, want, vectors, queues, available;
1918 
1919 	/* Override by tuneable */
1920 	if (ixl_enable_msix == 0)
1921 		goto msi;
1922 
1923 	/*
1924 	** When used in a virtualized environment
1925 	** PCI BUSMASTER capability may not be set
1926 	** so explicity set it here and rewrite
1927 	** the ENABLE in the MSIX control register
1928 	** at this point to cause the host to
1929 	** successfully initialize us.
1930 	*/
1931 	{
1932 		u16 pci_cmd_word;
1933 		int msix_ctrl;
1934 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1935 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1936 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1937 		pci_find_cap(dev, PCIY_MSIX, &rid);
1938 		rid += PCIR_MSIX_CTRL;
1939 		msix_ctrl = pci_read_config(dev, rid, 2);
1940 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1941 		pci_write_config(dev, rid, msix_ctrl, 2);
1942 	}
1943 
1944 	/* First try MSI/X */
1945 	rid = PCIR_BAR(IXL_BAR);
1946 	pf->msix_mem = bus_alloc_resource_any(dev,
1947 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1948        	if (!pf->msix_mem) {
1949 		/* May not be enabled */
1950 		device_printf(pf->dev,
1951 		    "Unable to map MSIX table \n");
1952 		goto msi;
1953 	}
1954 
1955 	available = pci_msix_count(dev);
1956 	if (available == 0) { /* system has msix disabled */
1957 		bus_release_resource(dev, SYS_RES_MEMORY,
1958 		    rid, pf->msix_mem);
1959 		pf->msix_mem = NULL;
1960 		goto msi;
1961 	}
1962 
1963 	/* Figure out a reasonable auto config value */
1964 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1965 
1966 	/* Override with hardcoded value if sane */
1967 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1968 		queues = ixl_max_queues;
1969 
1970 	/*
1971 	** Want one vector (RX/TX pair) per queue
1972 	** plus an additional for the admin queue.
1973 	*/
1974 	want = queues + 1;
1975 	if (want <= available)	/* Have enough */
1976 		vectors = want;
1977 	else {
1978                	device_printf(pf->dev,
1979 		    "MSIX Configuration Problem, "
1980 		    "%d vectors available but %d wanted!\n",
1981 		    available, want);
1982 		return (0); /* Will go to Legacy setup */
1983 	}
1984 
1985 	if (pci_alloc_msix(dev, &vectors) == 0) {
1986                	device_printf(pf->dev,
1987 		    "Using MSIX interrupts with %d vectors\n", vectors);
1988 		pf->msix = vectors;
1989 		pf->vsi.num_queues = queues;
1990 		return (vectors);
1991 	}
1992 msi:
1993        	vectors = pci_msi_count(dev);
1994 	pf->vsi.num_queues = 1;
1995 	pf->msix = 1;
1996 	ixl_max_queues = 1;
1997 	ixl_enable_msix = 0;
1998        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1999                	device_printf(pf->dev,"Using an MSI interrupt\n");
2000 	else {
2001 		pf->msix = 0;
2002                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2003 	}
2004 	return (vectors);
2005 }
2006 
2007 
2008 /*
2009  * Plumb MSI/X vectors
2010  */
2011 static void
2012 ixl_configure_msix(struct ixl_pf *pf)
2013 {
2014 	struct i40e_hw	*hw = &pf->hw;
2015 	struct ixl_vsi *vsi = &pf->vsi;
2016 	u32		reg;
2017 	u16		vector = 1;
2018 
2019 	/* First set up the adminq - vector 0 */
2020 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2021 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2022 
2023 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2024 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2025 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2026 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2027 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2028 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2029 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2030 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2031 
2032 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2033 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2034 
2035 	wr32(hw, I40E_PFINT_DYN_CTL0,
2036 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2037 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2038 
2039 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2040 
2041 	/* Next configure the queues */
2042 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2043 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2044 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2045 
2046 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2047 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2048 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2049 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2050 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2051 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2052 
2053 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2054 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2055 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2056 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2057 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2058 		if (i == (vsi->num_queues - 1))
2059 			reg |= (IXL_QUEUE_EOL
2060 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2061 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2062 	}
2063 }
2064 
2065 /*
2066  * Configure for MSI single vector operation
2067  */
2068 static void
2069 ixl_configure_legacy(struct ixl_pf *pf)
2070 {
2071 	struct i40e_hw	*hw = &pf->hw;
2072 	u32		reg;
2073 
2074 
2075 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2076 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2077 
2078 
2079 	/* Setup "other" causes */
2080 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2081 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2082 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2083 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2084 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2085 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2086 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2087 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2088 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2089 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2090 	    ;
2091 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2092 
2093 	/* SW_ITR_IDX = 0, but don't change INTENA */
2094 	wr32(hw, I40E_PFINT_DYN_CTL0,
2095 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2096 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2097 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2098 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2099 
2100 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2101 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2102 
2103 	/* Associate the queue pair to the vector and enable the q int */
2104 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2105 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2106 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2107 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2108 
2109 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2110 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2111 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2112 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2113 
2114 	/* Next enable the queue pair */
2115 	reg = rd32(hw, I40E_QTX_ENA(0));
2116 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2117 	wr32(hw, I40E_QTX_ENA(0), reg);
2118 
2119 	reg = rd32(hw, I40E_QRX_ENA(0));
2120 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2121 	wr32(hw, I40E_QRX_ENA(0), reg);
2122 }
2123 
2124 
2125 /*
2126  * Set the Initial ITR state
2127  */
2128 static void
2129 ixl_configure_itr(struct ixl_pf *pf)
2130 {
2131 	struct i40e_hw		*hw = &pf->hw;
2132 	struct ixl_vsi		*vsi = &pf->vsi;
2133 	struct ixl_queue	*que = vsi->queues;
2134 
2135 	vsi->rx_itr_setting = ixl_rx_itr;
2136 	if (ixl_dynamic_rx_itr)
2137 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2138 	vsi->tx_itr_setting = ixl_tx_itr;
2139 	if (ixl_dynamic_tx_itr)
2140 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2141 
2142 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2143 		struct tx_ring	*txr = &que->txr;
2144 		struct rx_ring 	*rxr = &que->rxr;
2145 
2146 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2147 		    vsi->rx_itr_setting);
2148 		rxr->itr = vsi->rx_itr_setting;
2149 		rxr->latency = IXL_AVE_LATENCY;
2150 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2151 		    vsi->tx_itr_setting);
2152 		txr->itr = vsi->tx_itr_setting;
2153 		txr->latency = IXL_AVE_LATENCY;
2154 	}
2155 }
2156 
2157 
2158 static int
2159 ixl_allocate_pci_resources(struct ixl_pf *pf)
2160 {
2161 	int             rid;
2162 	device_t        dev = pf->dev;
2163 
2164 	rid = PCIR_BAR(0);
2165 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2166 	    &rid, RF_ACTIVE);
2167 
2168 	if (!(pf->pci_mem)) {
2169 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2170 		return (ENXIO);
2171 	}
2172 
2173 	pf->osdep.mem_bus_space_tag =
2174 		rman_get_bustag(pf->pci_mem);
2175 	pf->osdep.mem_bus_space_handle =
2176 		rman_get_bushandle(pf->pci_mem);
2177 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2178 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2179 
2180 	pf->hw.back = &pf->osdep;
2181 
2182 	/*
2183 	** Now setup MSI or MSI/X, should
2184 	** return us the number of supported
2185 	** vectors. (Will be 1 for MSI)
2186 	*/
2187 	pf->msix = ixl_init_msix(pf);
2188 	return (0);
2189 }
2190 
2191 static void
2192 ixl_free_pci_resources(struct ixl_pf * pf)
2193 {
2194 	struct ixl_vsi		*vsi = &pf->vsi;
2195 	struct ixl_queue	*que = vsi->queues;
2196 	device_t		dev = pf->dev;
2197 	int			rid, memrid;
2198 
2199 	memrid = PCIR_BAR(IXL_BAR);
2200 
2201 	/* We may get here before stations are setup */
2202 	if ((!ixl_enable_msix) || (que == NULL))
2203 		goto early;
2204 
2205 	/*
2206 	**  Release all msix VSI resources:
2207 	*/
2208 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2209 		rid = que->msix + 1;
2210 		if (que->tag != NULL) {
2211 			bus_teardown_intr(dev, que->res, que->tag);
2212 			que->tag = NULL;
2213 		}
2214 		if (que->res != NULL)
2215 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2216 	}
2217 
2218 early:
2219 	/* Clean the AdminQ interrupt last */
2220 	if (pf->admvec) /* we are doing MSIX */
2221 		rid = pf->admvec + 1;
2222 	else
2223 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2224 
2225 	if (pf->tag != NULL) {
2226 		bus_teardown_intr(dev, pf->res, pf->tag);
2227 		pf->tag = NULL;
2228 	}
2229 	if (pf->res != NULL)
2230 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2231 
2232 	if (pf->msix)
2233 		pci_release_msi(dev);
2234 
2235 	if (pf->msix_mem != NULL)
2236 		bus_release_resource(dev, SYS_RES_MEMORY,
2237 		    memrid, pf->msix_mem);
2238 
2239 	if (pf->pci_mem != NULL)
2240 		bus_release_resource(dev, SYS_RES_MEMORY,
2241 		    PCIR_BAR(0), pf->pci_mem);
2242 
2243 	return;
2244 }
2245 
2246 
2247 /*********************************************************************
2248  *
2249  *  Setup networking device structure and register an interface.
2250  *
2251  **********************************************************************/
2252 static int
2253 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2254 {
2255 	struct ifnet		*ifp;
2256 	struct i40e_hw		*hw = vsi->hw;
2257 	struct ixl_queue	*que = vsi->queues;
2258 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2259 	enum i40e_status_code aq_error = 0;
2260 
2261 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2262 
2263 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2264 	if (ifp == NULL) {
2265 		device_printf(dev, "can not allocate ifnet structure\n");
2266 		return (-1);
2267 	}
2268 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2269 	ifp->if_mtu = ETHERMTU;
2270 	ifp->if_baudrate = 4000000000;  // ??
2271 	ifp->if_init = ixl_init;
2272 	ifp->if_softc = vsi;
2273 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2274 	ifp->if_ioctl = ixl_ioctl;
2275 
2276 	ifp->if_transmit = ixl_mq_start;
2277 
2278 	ifp->if_qflush = ixl_qflush;
2279 
2280 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2281 
2282 	ether_ifattach(ifp, hw->mac.addr);
2283 
2284 	vsi->max_frame_size =
2285 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2286 	    + ETHER_VLAN_ENCAP_LEN;
2287 
2288 	/*
2289 	 * Tell the upper layer(s) we support long frames.
2290 	 */
2291 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2292 
2293 	ifp->if_capabilities |= IFCAP_HWCSUM;
2294 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2295 	ifp->if_capabilities |= IFCAP_TSO;
2296 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2297 	ifp->if_capabilities |= IFCAP_LRO;
2298 
2299 	/* VLAN capabilties */
2300 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2301 			     |  IFCAP_VLAN_HWTSO
2302 			     |  IFCAP_VLAN_MTU
2303 			     |  IFCAP_VLAN_HWCSUM;
2304 	ifp->if_capenable = ifp->if_capabilities;
2305 
2306 	/*
2307 	** Don't turn this on by default, if vlans are
2308 	** created on another pseudo device (eg. lagg)
2309 	** then vlan events are not passed thru, breaking
2310 	** operation, but with HW FILTER off it works. If
2311 	** using vlans directly on the ixl driver you can
2312 	** enable this and get full hardware tag filtering.
2313 	*/
2314 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2315 
2316 	/*
2317 	 * Specify the media types supported by this adapter and register
2318 	 * callbacks to update media and link information
2319 	 */
2320 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2321 		     ixl_media_status);
2322 
2323 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2324 	if (aq_error) {
2325 		printf("Error getting supported media types, AQ error %d\n", aq_error);
2326 		return (EPERM);
2327 	}
2328 
2329 	/* Display supported media types */
2330 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2331 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2332 
2333 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2334 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2335 
2336 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2337 	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2338 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2339 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2340 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2341 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2342 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2343 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2344 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2345 
2346 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2347 	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2348 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2349 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2350 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2351 	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2352 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2353 
2354 	/* Use autoselect media by default */
2355 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2356 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2357 
2358 	return (0);
2359 }
2360 
2361 static bool
2362 ixl_config_link(struct i40e_hw *hw)
2363 {
2364 	bool check;
2365 
2366 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2367 	check = i40e_get_link_status(hw);
2368 #ifdef IXL_DEBUG
2369 	printf("Link is %s\n", check ? "up":"down");
2370 #endif
2371 	return (check);
2372 }
2373 
2374 /*********************************************************************
2375  *
2376  *  Initialize this VSI
2377  *
2378  **********************************************************************/
2379 static int
2380 ixl_setup_vsi(struct ixl_vsi *vsi)
2381 {
2382 	struct i40e_hw	*hw = vsi->hw;
2383 	device_t 	dev = vsi->dev;
2384 	struct i40e_aqc_get_switch_config_resp *sw_config;
2385 	struct i40e_vsi_context	ctxt;
2386 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2387 	int	ret = I40E_SUCCESS;
2388 	u16	next = 0;
2389 
2390 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2391 	ret = i40e_aq_get_switch_config(hw, sw_config,
2392 	    sizeof(aq_buf), &next, NULL);
2393 	if (ret) {
2394 		device_printf(dev,"aq_get_switch_config failed!!\n");
2395 		return (ret);
2396 	}
2397 #ifdef IXL_DEBUG
2398 	printf("Switch config: header reported: %d in structure, %d total\n",
2399     	    sw_config->header.num_reported, sw_config->header.num_total);
2400 	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2401 	    sw_config->element[0].element_type,
2402 	    sw_config->element[0].seid,
2403 	    sw_config->element[0].uplink_seid,
2404 	    sw_config->element[0].downlink_seid);
2405 #endif
2406 	/* Save off this important value */
2407 	vsi->seid = sw_config->element[0].seid;
2408 
2409 	memset(&ctxt, 0, sizeof(ctxt));
2410 	ctxt.seid = vsi->seid;
2411 	ctxt.pf_num = hw->pf_id;
2412 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2413 	if (ret) {
2414 		device_printf(dev,"get vsi params failed %x!!\n", ret);
2415 		return (ret);
2416 	}
2417 #ifdef IXL_DEBUG
2418 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2419 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2420 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2421 	    ctxt.uplink_seid, ctxt.vsi_number,
2422 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2423 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2424 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2425 #endif
2426 	/*
2427 	** Set the queue and traffic class bits
2428 	**  - when multiple traffic classes are supported
2429 	**    this will need to be more robust.
2430 	*/
2431 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2432 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2433 	ctxt.info.queue_mapping[0] = 0;
2434 	ctxt.info.tc_mapping[0] = 0x0800;
2435 
2436 	/* Set VLAN receive stripping mode */
2437 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2438 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2439 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2440 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2441 	else
2442 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2443 
2444 	/* Keep copy of VSI info in VSI for statistic counters */
2445 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2446 
2447 	/* Reset VSI statistics */
2448 	ixl_vsi_reset_stats(vsi);
2449 	vsi->hw_filters_add = 0;
2450 	vsi->hw_filters_del = 0;
2451 
2452 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2453 	if (ret)
2454 		device_printf(dev,"update vsi params failed %x!!\n",
2455 		   hw->aq.asq_last_status);
2456 	return (ret);
2457 }
2458 
2459 
2460 /*********************************************************************
2461  *
2462  *  Initialize the VSI:  this handles contexts, which means things
2463  *  			 like the number of descriptors, buffer size,
2464  *			 plus we init the rings thru this function.
2465  *
2466  **********************************************************************/
2467 static int
2468 ixl_initialize_vsi(struct ixl_vsi *vsi)
2469 {
2470 	struct ixl_queue	*que = vsi->queues;
2471 	device_t		dev = vsi->dev;
2472 	struct i40e_hw		*hw = vsi->hw;
2473 	int			err = 0;
2474 
2475 
2476 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2477 		struct tx_ring		*txr = &que->txr;
2478 		struct rx_ring 		*rxr = &que->rxr;
2479 		struct i40e_hmc_obj_txq tctx;
2480 		struct i40e_hmc_obj_rxq rctx;
2481 		u32			txctl;
2482 		u16			size;
2483 
2484 
2485 		/* Setup the HMC TX Context  */
2486 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2487 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2488 		tctx.new_context = 1;
2489 		tctx.base = (txr->dma.pa/128);
2490 		tctx.qlen = que->num_desc;
2491 		tctx.fc_ena = 0;
2492 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2493 		/* Enable HEAD writeback */
2494 		tctx.head_wb_ena = 1;
2495 		tctx.head_wb_addr = txr->dma.pa +
2496 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2497 		tctx.rdylist_act = 0;
2498 		err = i40e_clear_lan_tx_queue_context(hw, i);
2499 		if (err) {
2500 			device_printf(dev, "Unable to clear TX context\n");
2501 			break;
2502 		}
2503 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2504 		if (err) {
2505 			device_printf(dev, "Unable to set TX context\n");
2506 			break;
2507 		}
2508 		/* Associate the ring with this PF */
2509 		txctl = I40E_QTX_CTL_PF_QUEUE;
2510 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2511 		    I40E_QTX_CTL_PF_INDX_MASK);
2512 		wr32(hw, I40E_QTX_CTL(i), txctl);
2513 		ixl_flush(hw);
2514 
2515 		/* Do ring (re)init */
2516 		ixl_init_tx_ring(que);
2517 
2518 		/* Next setup the HMC RX Context  */
2519 		if (vsi->max_frame_size <= 2048)
2520 			rxr->mbuf_sz = MCLBYTES;
2521 		else
2522 			rxr->mbuf_sz = MJUMPAGESIZE;
2523 
2524 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2525 
2526 		/* Set up an RX context for the HMC */
2527 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2528 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2529 		/* ignore header split for now */
2530 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2531 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2532 		    vsi->max_frame_size : max_rxmax;
2533 		rctx.dtype = 0;
2534 		rctx.dsize = 1;	/* do 32byte descriptors */
2535 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2536 		rctx.base = (rxr->dma.pa/128);
2537 		rctx.qlen = que->num_desc;
2538 		rctx.tphrdesc_ena = 1;
2539 		rctx.tphwdesc_ena = 1;
2540 		rctx.tphdata_ena = 0;
2541 		rctx.tphhead_ena = 0;
2542 		rctx.lrxqthresh = 2;
2543 		rctx.crcstrip = 1;
2544 		rctx.l2tsel = 1;
2545 		rctx.showiv = 1;
2546 		rctx.fc_ena = 0;
2547 		rctx.prefena = 1;
2548 
2549 		err = i40e_clear_lan_rx_queue_context(hw, i);
2550 		if (err) {
2551 			device_printf(dev,
2552 			    "Unable to clear RX context %d\n", i);
2553 			break;
2554 		}
2555 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2556 		if (err) {
2557 			device_printf(dev, "Unable to set RX context %d\n", i);
2558 			break;
2559 		}
2560 		err = ixl_init_rx_ring(que);
2561 		if (err) {
2562 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2563 			break;
2564 		}
2565 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2566 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2567 	}
2568 	return (err);
2569 }
2570 
2571 
2572 /*********************************************************************
2573  *
2574  *  Free all VSI structs.
2575  *
2576  **********************************************************************/
2577 void
2578 ixl_free_vsi(struct ixl_vsi *vsi)
2579 {
2580 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2581 	struct ixl_queue	*que = vsi->queues;
2582 	struct ixl_mac_filter *f;
2583 
2584 	/* Free station queues */
2585 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2586 		struct tx_ring *txr = &que->txr;
2587 		struct rx_ring *rxr = &que->rxr;
2588 
2589 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2590 			continue;
2591 		IXL_TX_LOCK(txr);
2592 		ixl_free_que_tx(que);
2593 		if (txr->base)
2594 			i40e_free_dma(&pf->hw, &txr->dma);
2595 		IXL_TX_UNLOCK(txr);
2596 		IXL_TX_LOCK_DESTROY(txr);
2597 
2598 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2599 			continue;
2600 		IXL_RX_LOCK(rxr);
2601 		ixl_free_que_rx(que);
2602 		if (rxr->base)
2603 			i40e_free_dma(&pf->hw, &rxr->dma);
2604 		IXL_RX_UNLOCK(rxr);
2605 		IXL_RX_LOCK_DESTROY(rxr);
2606 
2607 	}
2608 	free(vsi->queues, M_DEVBUF);
2609 
2610 	/* Free VSI filter list */
2611 	while (!SLIST_EMPTY(&vsi->ftl)) {
2612 		f = SLIST_FIRST(&vsi->ftl);
2613 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2614 		free(f, M_DEVBUF);
2615 	}
2616 }
2617 
2618 
2619 /*********************************************************************
2620  *
2621  *  Allocate memory for the VSI (virtual station interface) and their
2622  *  associated queues, rings and the descriptors associated with each,
2623  *  called only once at attach.
2624  *
2625  **********************************************************************/
2626 static int
2627 ixl_setup_stations(struct ixl_pf *pf)
2628 {
2629 	device_t		dev = pf->dev;
2630 	struct ixl_vsi		*vsi;
2631 	struct ixl_queue	*que;
2632 	struct tx_ring		*txr;
2633 	struct rx_ring		*rxr;
2634 	int 			rsize, tsize;
2635 	int			error = I40E_SUCCESS;
2636 
2637 	vsi = &pf->vsi;
2638 	vsi->back = (void *)pf;
2639 	vsi->hw = &pf->hw;
2640 	vsi->id = 0;
2641 	vsi->num_vlans = 0;
2642 
2643 	/* Get memory for the station queues */
2644         if (!(vsi->queues =
2645             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2646             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2647                 device_printf(dev, "Unable to allocate queue memory\n");
2648                 error = ENOMEM;
2649                 goto early;
2650         }
2651 
2652 	for (int i = 0; i < vsi->num_queues; i++) {
2653 		que = &vsi->queues[i];
2654 		que->num_desc = ixl_ringsz;
2655 		que->me = i;
2656 		que->vsi = vsi;
2657 		/* mark the queue as active */
2658 		vsi->active_queues |= (u64)1 << que->me;
2659 		txr = &que->txr;
2660 		txr->que = que;
2661 		txr->tail = I40E_QTX_TAIL(que->me);
2662 
2663 		/* Initialize the TX lock */
2664 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2665 		    device_get_nameunit(dev), que->me);
2666 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2667 		/* Create the TX descriptor ring */
2668 		tsize = roundup2((que->num_desc *
2669 		    sizeof(struct i40e_tx_desc)) +
2670 		    sizeof(u32), DBA_ALIGN);
2671 		if (i40e_allocate_dma(&pf->hw,
2672 		    &txr->dma, tsize, DBA_ALIGN)) {
2673 			device_printf(dev,
2674 			    "Unable to allocate TX Descriptor memory\n");
2675 			error = ENOMEM;
2676 			goto fail;
2677 		}
2678 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2679 		bzero((void *)txr->base, tsize);
2680        		/* Now allocate transmit soft structs for the ring */
2681        		if (ixl_allocate_tx_data(que)) {
2682 			device_printf(dev,
2683 			    "Critical Failure setting up TX structures\n");
2684 			error = ENOMEM;
2685 			goto fail;
2686        		}
2687 		/* Allocate a buf ring */
2688 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2689 		    M_WAITOK, &txr->mtx);
2690 		if (txr->br == NULL) {
2691 			device_printf(dev,
2692 			    "Critical Failure setting up TX buf ring\n");
2693 			error = ENOMEM;
2694 			goto fail;
2695        		}
2696 
2697 		/*
2698 		 * Next the RX queues...
2699 		 */
2700 		rsize = roundup2(que->num_desc *
2701 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2702 		rxr = &que->rxr;
2703 		rxr->que = que;
2704 		rxr->tail = I40E_QRX_TAIL(que->me);
2705 
2706 		/* Initialize the RX side lock */
2707 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2708 		    device_get_nameunit(dev), que->me);
2709 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2710 
2711 		if (i40e_allocate_dma(&pf->hw,
2712 		    &rxr->dma, rsize, 4096)) {
2713 			device_printf(dev,
2714 			    "Unable to allocate RX Descriptor memory\n");
2715 			error = ENOMEM;
2716 			goto fail;
2717 		}
2718 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2719 		bzero((void *)rxr->base, rsize);
2720 
2721         	/* Allocate receive soft structs for the ring*/
2722 		if (ixl_allocate_rx_data(que)) {
2723 			device_printf(dev,
2724 			    "Critical Failure setting up receive structs\n");
2725 			error = ENOMEM;
2726 			goto fail;
2727 		}
2728 	}
2729 
2730 	return (0);
2731 
2732 fail:
2733 	for (int i = 0; i < vsi->num_queues; i++) {
2734 		que = &vsi->queues[i];
2735 		rxr = &que->rxr;
2736 		txr = &que->txr;
2737 		if (rxr->base)
2738 			i40e_free_dma(&pf->hw, &rxr->dma);
2739 		if (txr->base)
2740 			i40e_free_dma(&pf->hw, &txr->dma);
2741 	}
2742 
2743 early:
2744 	return (error);
2745 }
2746 
2747 /*
2748 ** Provide a update to the queue RX
2749 ** interrupt moderation value.
2750 */
2751 static void
2752 ixl_set_queue_rx_itr(struct ixl_queue *que)
2753 {
2754 	struct ixl_vsi	*vsi = que->vsi;
2755 	struct i40e_hw	*hw = vsi->hw;
2756 	struct rx_ring	*rxr = &que->rxr;
2757 	u16		rx_itr;
2758 	u16		rx_latency = 0;
2759 	int		rx_bytes;
2760 
2761 
2762 	/* Idle, do nothing */
2763 	if (rxr->bytes == 0)
2764 		return;
2765 
2766 	if (ixl_dynamic_rx_itr) {
2767 		rx_bytes = rxr->bytes/rxr->itr;
2768 		rx_itr = rxr->itr;
2769 
2770 		/* Adjust latency range */
2771 		switch (rxr->latency) {
2772 		case IXL_LOW_LATENCY:
2773 			if (rx_bytes > 10) {
2774 				rx_latency = IXL_AVE_LATENCY;
2775 				rx_itr = IXL_ITR_20K;
2776 			}
2777 			break;
2778 		case IXL_AVE_LATENCY:
2779 			if (rx_bytes > 20) {
2780 				rx_latency = IXL_BULK_LATENCY;
2781 				rx_itr = IXL_ITR_8K;
2782 			} else if (rx_bytes <= 10) {
2783 				rx_latency = IXL_LOW_LATENCY;
2784 				rx_itr = IXL_ITR_100K;
2785 			}
2786 			break;
2787 		case IXL_BULK_LATENCY:
2788 			if (rx_bytes <= 20) {
2789 				rx_latency = IXL_AVE_LATENCY;
2790 				rx_itr = IXL_ITR_20K;
2791 			}
2792 			break;
2793        		 }
2794 
2795 		rxr->latency = rx_latency;
2796 
2797 		if (rx_itr != rxr->itr) {
2798 			/* do an exponential smoothing */
2799 			rx_itr = (10 * rx_itr * rxr->itr) /
2800 			    ((9 * rx_itr) + rxr->itr);
2801 			rxr->itr = rx_itr & IXL_MAX_ITR;
2802 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2803 			    que->me), rxr->itr);
2804 		}
2805 	} else { /* We may have have toggled to non-dynamic */
2806 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2807 			vsi->rx_itr_setting = ixl_rx_itr;
2808 		/* Update the hardware if needed */
2809 		if (rxr->itr != vsi->rx_itr_setting) {
2810 			rxr->itr = vsi->rx_itr_setting;
2811 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2812 			    que->me), rxr->itr);
2813 		}
2814 	}
2815 	rxr->bytes = 0;
2816 	rxr->packets = 0;
2817 	return;
2818 }
2819 
2820 
2821 /*
2822 ** Provide a update to the queue TX
2823 ** interrupt moderation value.
2824 */
2825 static void
2826 ixl_set_queue_tx_itr(struct ixl_queue *que)
2827 {
2828 	struct ixl_vsi	*vsi = que->vsi;
2829 	struct i40e_hw	*hw = vsi->hw;
2830 	struct tx_ring	*txr = &que->txr;
2831 	u16		tx_itr;
2832 	u16		tx_latency = 0;
2833 	int		tx_bytes;
2834 
2835 
2836 	/* Idle, do nothing */
2837 	if (txr->bytes == 0)
2838 		return;
2839 
2840 	if (ixl_dynamic_tx_itr) {
2841 		tx_bytes = txr->bytes/txr->itr;
2842 		tx_itr = txr->itr;
2843 
2844 		switch (txr->latency) {
2845 		case IXL_LOW_LATENCY:
2846 			if (tx_bytes > 10) {
2847 				tx_latency = IXL_AVE_LATENCY;
2848 				tx_itr = IXL_ITR_20K;
2849 			}
2850 			break;
2851 		case IXL_AVE_LATENCY:
2852 			if (tx_bytes > 20) {
2853 				tx_latency = IXL_BULK_LATENCY;
2854 				tx_itr = IXL_ITR_8K;
2855 			} else if (tx_bytes <= 10) {
2856 				tx_latency = IXL_LOW_LATENCY;
2857 				tx_itr = IXL_ITR_100K;
2858 			}
2859 			break;
2860 		case IXL_BULK_LATENCY:
2861 			if (tx_bytes <= 20) {
2862 				tx_latency = IXL_AVE_LATENCY;
2863 				tx_itr = IXL_ITR_20K;
2864 			}
2865 			break;
2866 		}
2867 
2868 		txr->latency = tx_latency;
2869 
2870 		if (tx_itr != txr->itr) {
2871        	         /* do an exponential smoothing */
2872 			tx_itr = (10 * tx_itr * txr->itr) /
2873 			    ((9 * tx_itr) + txr->itr);
2874 			txr->itr = tx_itr & IXL_MAX_ITR;
2875 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2876 			    que->me), txr->itr);
2877 		}
2878 
2879 	} else { /* We may have have toggled to non-dynamic */
2880 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2881 			vsi->tx_itr_setting = ixl_tx_itr;
2882 		/* Update the hardware if needed */
2883 		if (txr->itr != vsi->tx_itr_setting) {
2884 			txr->itr = vsi->tx_itr_setting;
2885 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2886 			    que->me), txr->itr);
2887 		}
2888 	}
2889 	txr->bytes = 0;
2890 	txr->packets = 0;
2891 	return;
2892 }
2893 
2894 
2895 static void
2896 ixl_add_hw_stats(struct ixl_pf *pf)
2897 {
2898 	device_t dev = pf->dev;
2899 	struct ixl_vsi *vsi = &pf->vsi;
2900 	struct ixl_queue *queues = vsi->queues;
2901 	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2902 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2903 
2904 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2905 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2906 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2907 
2908 	struct sysctl_oid *vsi_node, *queue_node;
2909 	struct sysctl_oid_list *vsi_list, *queue_list;
2910 
2911 	struct tx_ring *txr;
2912 	struct rx_ring *rxr;
2913 
2914 	/* Driver statistics */
2915 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2916 			CTLFLAG_RD, &pf->watchdog_events,
2917 			"Watchdog timeouts");
2918 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2919 			CTLFLAG_RD, &pf->admin_irq,
2920 			"Admin Queue IRQ Handled");
2921 
2922 	/* VSI statistics */
2923 #define QUEUE_NAME_LEN 32
2924 	char queue_namebuf[QUEUE_NAME_LEN];
2925 
2926 	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2927 	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2928 	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2929 				   CTLFLAG_RD, NULL, "VSI-specific stats");
2930 	vsi_list = SYSCTL_CHILDREN(vsi_node);
2931 
2932 	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2933 
2934 	/* Queue statistics */
2935 	for (int q = 0; q < vsi->num_queues; q++) {
2936 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2937 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2938 					     CTLFLAG_RD, NULL, "Queue #");
2939 		queue_list = SYSCTL_CHILDREN(queue_node);
2940 
2941 		txr = &(queues[q].txr);
2942 		rxr = &(queues[q].rxr);
2943 
2944 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2945 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2946 				"m_defrag() failed");
2947 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2948 				CTLFLAG_RD, &(queues[q].dropped_pkts),
2949 				"Driver dropped packets");
2950 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2951 				CTLFLAG_RD, &(queues[q].irqs),
2952 				"irqs on this queue");
2953 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2954 				CTLFLAG_RD, &(queues[q].tso),
2955 				"TSO");
2956 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2957 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2958 				"Driver tx dma failure in xmit");
2959 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2960 				CTLFLAG_RD, &(txr->no_desc),
2961 				"Queue No Descriptor Available");
2962 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2963 				CTLFLAG_RD, &(txr->total_packets),
2964 				"Queue Packets Transmitted");
2965 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2966 				CTLFLAG_RD, &(txr->tx_bytes),
2967 				"Queue Bytes Transmitted");
2968 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2969 				CTLFLAG_RD, &(rxr->rx_packets),
2970 				"Queue Packets Received");
2971 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2972 				CTLFLAG_RD, &(rxr->rx_bytes),
2973 				"Queue Bytes Received");
2974 	}
2975 
2976 	/* MAC stats */
2977 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2978 }
2979 
2980 static void
2981 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2982 	struct sysctl_oid_list *child,
2983 	struct i40e_eth_stats *eth_stats)
2984 {
2985 	struct ixl_sysctl_info ctls[] =
2986 	{
2987 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2988 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2989 			"Unicast Packets Received"},
2990 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2991 			"Multicast Packets Received"},
2992 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2993 			"Broadcast Packets Received"},
2994 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2995 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2996 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2997 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
2998 			"Multicast Packets Transmitted"},
2999 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3000 			"Broadcast Packets Transmitted"},
3001 		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3002 		// end
3003 		{0,0,0}
3004 	};
3005 
3006 	struct ixl_sysctl_info *entry = ctls;
3007 	while (entry->stat != 0)
3008 	{
3009 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3010 				CTLFLAG_RD, entry->stat,
3011 				entry->description);
3012 		entry++;
3013 	}
3014 }
3015 
3016 static void
3017 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3018 	struct sysctl_oid_list *child,
3019 	struct i40e_hw_port_stats *stats)
3020 {
3021 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3022 				    CTLFLAG_RD, NULL, "Mac Statistics");
3023 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3024 
3025 	struct i40e_eth_stats *eth_stats = &stats->eth;
3026 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3027 
3028 	struct ixl_sysctl_info ctls[] =
3029 	{
3030 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3031 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3032 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3033 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3034 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3035 		/* Packet Reception Stats */
3036 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3037 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3038 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3039 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3040 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3041 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3042 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3043 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3044 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3045 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3046 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3047 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3048 		/* Packet Transmission Stats */
3049 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3050 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3051 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3052 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3053 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3054 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3055 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3056 		/* Flow control */
3057 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3058 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3059 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3060 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3061 		/* End */
3062 		{0,0,0}
3063 	};
3064 
3065 	struct ixl_sysctl_info *entry = ctls;
3066 	while (entry->stat != 0)
3067 	{
3068 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3069 				CTLFLAG_RD, entry->stat,
3070 				entry->description);
3071 		entry++;
3072 	}
3073 }
3074 
3075 /*
3076 ** ixl_config_rss - setup RSS
3077 **  - note this is done for the single vsi
3078 */
3079 static void ixl_config_rss(struct ixl_vsi *vsi)
3080 {
3081 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3082 	struct i40e_hw	*hw = vsi->hw;
3083 	u32		lut = 0;
3084 	u64		set_hena, hena;
3085 	int		i, j;
3086 
3087 	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3088 	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3089 	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3090 	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3091 
3092 	/* Fill out hash function seed */
3093 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3094                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3095 
3096 	/* Enable PCTYPES for RSS: */
3097 	set_hena =
3098 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3099 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3100 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3101 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3102 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3103 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3104 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3105 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3106 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3107 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3108 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3109 
3110 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3111 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3112 	hena |= set_hena;
3113 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3114 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3115 
3116 	/* Populate the LUT with max no. of queues in round robin fashion */
3117 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3118 		if (j == vsi->num_queues)
3119 			j = 0;
3120 		/* lut = 4-byte sliding window of 4 lut entries */
3121 		lut = (lut << 8) | (j &
3122 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3123 		/* On i = 3, we have 4 entries in lut; write to the register */
3124 		if ((i & 3) == 3)
3125 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3126 	}
3127 	ixl_flush(hw);
3128 }
3129 
3130 
3131 /*
3132 ** This routine is run via an vlan config EVENT,
3133 ** it enables us to use the HW Filter table since
3134 ** we can get the vlan id. This just creates the
3135 ** entry in the soft version of the VFTA, init will
3136 ** repopulate the real table.
3137 */
3138 static void
3139 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3140 {
3141 	struct ixl_vsi	*vsi = ifp->if_softc;
3142 	struct i40e_hw	*hw = vsi->hw;
3143 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3144 
3145 	if (ifp->if_softc !=  arg)   /* Not our event */
3146 		return;
3147 
3148 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3149 		return;
3150 
3151 	IXL_PF_LOCK(pf);
3152 	++vsi->num_vlans;
3153 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3154 	IXL_PF_UNLOCK(pf);
3155 }
3156 
3157 /*
3158 ** This routine is run via an vlan
3159 ** unconfig EVENT, remove our entry
3160 ** in the soft vfta.
3161 */
3162 static void
3163 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3164 {
3165 	struct ixl_vsi	*vsi = ifp->if_softc;
3166 	struct i40e_hw	*hw = vsi->hw;
3167 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3168 
3169 	if (ifp->if_softc !=  arg)
3170 		return;
3171 
3172 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3173 		return;
3174 
3175 	IXL_PF_LOCK(pf);
3176 	--vsi->num_vlans;
3177 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3178 	IXL_PF_UNLOCK(pf);
3179 }
3180 
3181 /*
3182 ** This routine updates vlan filters, called by init
3183 ** it scans the filter table and then updates the hw
3184 ** after a soft reset.
3185 */
3186 static void
3187 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3188 {
3189 	struct ixl_mac_filter	*f;
3190 	int			cnt = 0, flags;
3191 
3192 	if (vsi->num_vlans == 0)
3193 		return;
3194 	/*
3195 	** Scan the filter list for vlan entries,
3196 	** mark them for addition and then call
3197 	** for the AQ update.
3198 	*/
3199 	SLIST_FOREACH(f, &vsi->ftl, next) {
3200 		if (f->flags & IXL_FILTER_VLAN) {
3201 			f->flags |=
3202 			    (IXL_FILTER_ADD |
3203 			    IXL_FILTER_USED);
3204 			cnt++;
3205 		}
3206 	}
3207 	if (cnt == 0) {
3208 		printf("setup vlan: no filters found!\n");
3209 		return;
3210 	}
3211 	flags = IXL_FILTER_VLAN;
3212 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3213 	ixl_add_hw_filters(vsi, flags, cnt);
3214 	return;
3215 }
3216 
3217 /*
3218 ** Initialize filter list and add filters that the hardware
3219 ** needs to know about.
3220 */
3221 static void
3222 ixl_init_filters(struct ixl_vsi *vsi)
3223 {
3224 	/* Add broadcast address */
3225 	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3226 	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3227 }
3228 
3229 /*
3230 ** This routine adds mulicast filters
3231 */
3232 static void
3233 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3234 {
3235 	struct ixl_mac_filter *f;
3236 
3237 	/* Does one already exist */
3238 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3239 	if (f != NULL)
3240 		return;
3241 
3242 	f = ixl_get_filter(vsi);
3243 	if (f == NULL) {
3244 		printf("WARNING: no filter available!!\n");
3245 		return;
3246 	}
3247 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3248 	f->vlan = IXL_VLAN_ANY;
3249 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3250 	    | IXL_FILTER_MC);
3251 
3252 	return;
3253 }
3254 
3255 /*
3256 ** This routine adds macvlan filters
3257 */
3258 static void
3259 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3260 {
3261 	struct ixl_mac_filter	*f, *tmp;
3262 	device_t		dev = vsi->dev;
3263 
3264 	DEBUGOUT("ixl_add_filter: begin");
3265 
3266 	/* Does one already exist */
3267 	f = ixl_find_filter(vsi, macaddr, vlan);
3268 	if (f != NULL)
3269 		return;
3270 	/*
3271 	** Is this the first vlan being registered, if so we
3272 	** need to remove the ANY filter that indicates we are
3273 	** not in a vlan, and replace that with a 0 filter.
3274 	*/
3275 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3276 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3277 		if (tmp != NULL) {
3278 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3279 			ixl_add_filter(vsi, macaddr, 0);
3280 		}
3281 	}
3282 
3283 	f = ixl_get_filter(vsi);
3284 	if (f == NULL) {
3285 		device_printf(dev, "WARNING: no filter available!!\n");
3286 		return;
3287 	}
3288 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3289 	f->vlan = vlan;
3290 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3291 	if (f->vlan != IXL_VLAN_ANY)
3292 		f->flags |= IXL_FILTER_VLAN;
3293 
3294 	ixl_add_hw_filters(vsi, f->flags, 1);
3295 	return;
3296 }
3297 
3298 static void
3299 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3300 {
3301 	struct ixl_mac_filter *f;
3302 
3303 	f = ixl_find_filter(vsi, macaddr, vlan);
3304 	if (f == NULL)
3305 		return;
3306 
3307 	f->flags |= IXL_FILTER_DEL;
3308 	ixl_del_hw_filters(vsi, 1);
3309 
3310 	/* Check if this is the last vlan removal */
3311 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3312 		/* Switch back to a non-vlan filter */
3313 		ixl_del_filter(vsi, macaddr, 0);
3314 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3315 	}
3316 	return;
3317 }
3318 
3319 /*
3320 ** Find the filter with both matching mac addr and vlan id
3321 */
3322 static struct ixl_mac_filter *
3323 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3324 {
3325 	struct ixl_mac_filter	*f;
3326 	bool			match = FALSE;
3327 
3328 	SLIST_FOREACH(f, &vsi->ftl, next) {
3329 		if (!cmp_etheraddr(f->macaddr, macaddr))
3330 			continue;
3331 		if (f->vlan == vlan) {
3332 			match = TRUE;
3333 			break;
3334 		}
3335 	}
3336 
3337 	if (!match)
3338 		f = NULL;
3339 	return (f);
3340 }
3341 
3342 /*
3343 ** This routine takes additions to the vsi filter
3344 ** table and creates an Admin Queue call to create
3345 ** the filters in the hardware.
3346 */
3347 static void
3348 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3349 {
3350 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3351 	struct ixl_mac_filter	*f;
3352 	struct i40e_hw	*hw = vsi->hw;
3353 	device_t	dev = vsi->dev;
3354 	int		err, j = 0;
3355 
3356 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3357 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3358 	if (a == NULL) {
3359 		device_printf(dev, "add hw filter failed to get memory\n");
3360 		return;
3361 	}
3362 
3363 	/*
3364 	** Scan the filter list, each time we find one
3365 	** we add it to the admin queue array and turn off
3366 	** the add bit.
3367 	*/
3368 	SLIST_FOREACH(f, &vsi->ftl, next) {
3369 		if (f->flags == flags) {
3370 			b = &a[j]; // a pox on fvl long names :)
3371 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3372 			b->vlan_tag =
3373 			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3374 			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3375 			f->flags &= ~IXL_FILTER_ADD;
3376 			j++;
3377 		}
3378 		if (j == cnt)
3379 			break;
3380 	}
3381 	if (j > 0) {
3382 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3383 		if (err)
3384 			device_printf(dev, "aq_add_macvlan failure %d\n",
3385 			    hw->aq.asq_last_status);
3386 		else
3387 			vsi->hw_filters_add += j;
3388 	}
3389 	free(a, M_DEVBUF);
3390 	return;
3391 }
3392 
3393 /*
3394 ** This routine takes removals in the vsi filter
3395 ** table and creates an Admin Queue call to delete
3396 ** the filters in the hardware.
3397 */
3398 static void
3399 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3400 {
3401 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3402 	struct i40e_hw		*hw = vsi->hw;
3403 	device_t		dev = vsi->dev;
3404 	struct ixl_mac_filter	*f, *f_temp;
3405 	int			err, j = 0;
3406 
3407 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3408 
3409 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3410 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3411 	if (d == NULL) {
3412 		printf("del hw filter failed to get memory\n");
3413 		return;
3414 	}
3415 
3416 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3417 		if (f->flags & IXL_FILTER_DEL) {
3418 			e = &d[j]; // a pox on fvl long names :)
3419 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3420 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3421 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3422 			/* delete entry from vsi list */
3423 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3424 			free(f, M_DEVBUF);
3425 			j++;
3426 		}
3427 		if (j == cnt)
3428 			break;
3429 	}
3430 	if (j > 0) {
3431 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3432 		/* NOTE: returns ENOENT every time but seems to work fine,
3433 		   so we'll ignore that specific error. */
3434 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3435 			int sc = 0;
3436 			for (int i = 0; i < j; i++)
3437 				sc += (!d[i].error_code);
3438 			vsi->hw_filters_del += sc;
3439 			device_printf(dev,
3440 			    "Failed to remove %d/%d filters, aq error %d\n",
3441 			    j - sc, j, hw->aq.asq_last_status);
3442 		} else
3443 			vsi->hw_filters_del += j;
3444 	}
3445 	free(d, M_DEVBUF);
3446 
3447 	DEBUGOUT("ixl_del_hw_filters: end\n");
3448 	return;
3449 }
3450 
3451 
3452 static void
3453 ixl_enable_rings(struct ixl_vsi *vsi)
3454 {
3455 	struct i40e_hw	*hw = vsi->hw;
3456 	u32		reg;
3457 
3458 	for (int i = 0; i < vsi->num_queues; i++) {
3459 		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3460 
3461 		reg = rd32(hw, I40E_QTX_ENA(i));
3462 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3463 		    I40E_QTX_ENA_QENA_STAT_MASK;
3464 		wr32(hw, I40E_QTX_ENA(i), reg);
3465 		/* Verify the enable took */
3466 		for (int j = 0; j < 10; j++) {
3467 			reg = rd32(hw, I40E_QTX_ENA(i));
3468 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3469 				break;
3470 			i40e_msec_delay(10);
3471 		}
3472 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3473 			printf("TX queue %d disabled!\n", i);
3474 
3475 		reg = rd32(hw, I40E_QRX_ENA(i));
3476 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3477 		    I40E_QRX_ENA_QENA_STAT_MASK;
3478 		wr32(hw, I40E_QRX_ENA(i), reg);
3479 		/* Verify the enable took */
3480 		for (int j = 0; j < 10; j++) {
3481 			reg = rd32(hw, I40E_QRX_ENA(i));
3482 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3483 				break;
3484 			i40e_msec_delay(10);
3485 		}
3486 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3487 			printf("RX queue %d disabled!\n", i);
3488 	}
3489 }
3490 
3491 static void
3492 ixl_disable_rings(struct ixl_vsi *vsi)
3493 {
3494 	struct i40e_hw	*hw = vsi->hw;
3495 	u32		reg;
3496 
3497 	for (int i = 0; i < vsi->num_queues; i++) {
3498 		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3499 		i40e_usec_delay(500);
3500 
3501 		reg = rd32(hw, I40E_QTX_ENA(i));
3502 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3503 		wr32(hw, I40E_QTX_ENA(i), reg);
3504 		/* Verify the disable took */
3505 		for (int j = 0; j < 10; j++) {
3506 			reg = rd32(hw, I40E_QTX_ENA(i));
3507 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3508 				break;
3509 			i40e_msec_delay(10);
3510 		}
3511 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3512 			printf("TX queue %d still enabled!\n", i);
3513 
3514 		reg = rd32(hw, I40E_QRX_ENA(i));
3515 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3516 		wr32(hw, I40E_QRX_ENA(i), reg);
3517 		/* Verify the disable took */
3518 		for (int j = 0; j < 10; j++) {
3519 			reg = rd32(hw, I40E_QRX_ENA(i));
3520 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3521 				break;
3522 			i40e_msec_delay(10);
3523 		}
3524 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3525 			printf("RX queue %d still enabled!\n", i);
3526 	}
3527 }
3528 
3529 /**
3530  * ixl_handle_mdd_event
3531  *
3532  * Called from interrupt handler to identify possibly malicious vfs
3533  * (But also detects events from the PF, as well)
3534  **/
3535 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3536 {
3537 	struct i40e_hw *hw = &pf->hw;
3538 	device_t dev = pf->dev;
3539 	bool mdd_detected = false;
3540 	bool pf_mdd_detected = false;
3541 	u32 reg;
3542 
3543 	/* find what triggered the MDD event */
3544 	reg = rd32(hw, I40E_GL_MDET_TX);
3545 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3546 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3547 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3548 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3549 				I40E_GL_MDET_TX_EVENT_SHIFT;
3550 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3551 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3552 		device_printf(dev,
3553 			 "Malicious Driver Detection event 0x%02x"
3554 			 " on TX queue %d pf number 0x%02x\n",
3555 			 event, queue, pf_num);
3556 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3557 		mdd_detected = true;
3558 	}
3559 	reg = rd32(hw, I40E_GL_MDET_RX);
3560 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3561 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3562 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3563 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3564 				I40E_GL_MDET_RX_EVENT_SHIFT;
3565 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3566 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3567 		device_printf(dev,
3568 			 "Malicious Driver Detection event 0x%02x"
3569 			 " on RX queue %d of function 0x%02x\n",
3570 			 event, queue, func);
3571 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3572 		mdd_detected = true;
3573 	}
3574 
3575 	if (mdd_detected) {
3576 		reg = rd32(hw, I40E_PF_MDET_TX);
3577 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3578 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3579 			device_printf(dev,
3580 				 "MDD TX event is for this function 0x%08x",
3581 				 reg);
3582 			pf_mdd_detected = true;
3583 		}
3584 		reg = rd32(hw, I40E_PF_MDET_RX);
3585 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3586 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3587 			device_printf(dev,
3588 				 "MDD RX event is for this function 0x%08x",
3589 				 reg);
3590 			pf_mdd_detected = true;
3591 		}
3592 	}
3593 
3594 	/* re-enable mdd interrupt cause */
3595 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3596 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3597 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3598 	ixl_flush(hw);
3599 }
3600 
3601 static void
3602 ixl_enable_intr(struct ixl_vsi *vsi)
3603 {
3604 	struct i40e_hw		*hw = vsi->hw;
3605 	struct ixl_queue	*que = vsi->queues;
3606 
3607 	if (ixl_enable_msix) {
3608 		ixl_enable_adminq(hw);
3609 		for (int i = 0; i < vsi->num_queues; i++, que++)
3610 			ixl_enable_queue(hw, que->me);
3611 	} else
3612 		ixl_enable_legacy(hw);
3613 }
3614 
3615 static void
3616 ixl_disable_intr(struct ixl_vsi *vsi)
3617 {
3618 	struct i40e_hw		*hw = vsi->hw;
3619 	struct ixl_queue	*que = vsi->queues;
3620 
3621 	if (ixl_enable_msix) {
3622 		ixl_disable_adminq(hw);
3623 		for (int i = 0; i < vsi->num_queues; i++, que++)
3624 			ixl_disable_queue(hw, que->me);
3625 	} else
3626 		ixl_disable_legacy(hw);
3627 }
3628 
3629 static void
3630 ixl_enable_adminq(struct i40e_hw *hw)
3631 {
3632 	u32		reg;
3633 
3634 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3635 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3636 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3637 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3638 	ixl_flush(hw);
3639 	return;
3640 }
3641 
3642 static void
3643 ixl_disable_adminq(struct i40e_hw *hw)
3644 {
3645 	u32		reg;
3646 
3647 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3648 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3649 
3650 	return;
3651 }
3652 
3653 static void
3654 ixl_enable_queue(struct i40e_hw *hw, int id)
3655 {
3656 	u32		reg;
3657 
3658 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3659 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3660 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3661 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3662 }
3663 
3664 static void
3665 ixl_disable_queue(struct i40e_hw *hw, int id)
3666 {
3667 	u32		reg;
3668 
3669 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3670 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3671 
3672 	return;
3673 }
3674 
3675 static void
3676 ixl_enable_legacy(struct i40e_hw *hw)
3677 {
3678 	u32		reg;
3679 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3680 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3681 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3682 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3683 }
3684 
3685 static void
3686 ixl_disable_legacy(struct i40e_hw *hw)
3687 {
3688 	u32		reg;
3689 
3690 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3691 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3692 
3693 	return;
3694 }
3695 
3696 static void
3697 ixl_update_stats_counters(struct ixl_pf *pf)
3698 {
3699 	struct i40e_hw	*hw = &pf->hw;
3700 	struct ixl_vsi *vsi = &pf->vsi;
3701 	struct ifnet	*ifp = vsi->ifp;
3702 
3703 	struct i40e_hw_port_stats *nsd = &pf->stats;
3704 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3705 
3706 	/* Update hw stats */
3707 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3708 			   pf->stat_offsets_loaded,
3709 			   &osd->crc_errors, &nsd->crc_errors);
3710 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3711 			   pf->stat_offsets_loaded,
3712 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3713 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3714 			   I40E_GLPRT_GORCL(hw->port),
3715 			   pf->stat_offsets_loaded,
3716 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3717 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3718 			   I40E_GLPRT_GOTCL(hw->port),
3719 			   pf->stat_offsets_loaded,
3720 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3721 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3722 			   pf->stat_offsets_loaded,
3723 			   &osd->eth.rx_discards,
3724 			   &nsd->eth.rx_discards);
3725 	ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3726 			   pf->stat_offsets_loaded,
3727 			   &osd->eth.tx_discards,
3728 			   &nsd->eth.tx_discards);
3729 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3730 			   I40E_GLPRT_UPRCL(hw->port),
3731 			   pf->stat_offsets_loaded,
3732 			   &osd->eth.rx_unicast,
3733 			   &nsd->eth.rx_unicast);
3734 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3735 			   I40E_GLPRT_UPTCL(hw->port),
3736 			   pf->stat_offsets_loaded,
3737 			   &osd->eth.tx_unicast,
3738 			   &nsd->eth.tx_unicast);
3739 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3740 			   I40E_GLPRT_MPRCL(hw->port),
3741 			   pf->stat_offsets_loaded,
3742 			   &osd->eth.rx_multicast,
3743 			   &nsd->eth.rx_multicast);
3744 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3745 			   I40E_GLPRT_MPTCL(hw->port),
3746 			   pf->stat_offsets_loaded,
3747 			   &osd->eth.tx_multicast,
3748 			   &nsd->eth.tx_multicast);
3749 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3750 			   I40E_GLPRT_BPRCL(hw->port),
3751 			   pf->stat_offsets_loaded,
3752 			   &osd->eth.rx_broadcast,
3753 			   &nsd->eth.rx_broadcast);
3754 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3755 			   I40E_GLPRT_BPTCL(hw->port),
3756 			   pf->stat_offsets_loaded,
3757 			   &osd->eth.tx_broadcast,
3758 			   &nsd->eth.tx_broadcast);
3759 
3760 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3761 			   pf->stat_offsets_loaded,
3762 			   &osd->tx_dropped_link_down,
3763 			   &nsd->tx_dropped_link_down);
3764 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3765 			   pf->stat_offsets_loaded,
3766 			   &osd->mac_local_faults,
3767 			   &nsd->mac_local_faults);
3768 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3769 			   pf->stat_offsets_loaded,
3770 			   &osd->mac_remote_faults,
3771 			   &nsd->mac_remote_faults);
3772 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3773 			   pf->stat_offsets_loaded,
3774 			   &osd->rx_length_errors,
3775 			   &nsd->rx_length_errors);
3776 
3777 	/* Flow control (LFC) stats */
3778 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3779 			   pf->stat_offsets_loaded,
3780 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3781 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3782 			   pf->stat_offsets_loaded,
3783 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3784 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3785 			   pf->stat_offsets_loaded,
3786 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3787 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3788 			   pf->stat_offsets_loaded,
3789 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3790 
3791 	/* Priority flow control stats */
3792 #if 0
3793 	for (int i = 0; i < 8; i++) {
3794 		ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3795 				   pf->stat_offsets_loaded,
3796 				   &osd->priority_xon_rx[i],
3797 				   &nsd->priority_xon_rx[i]);
3798 		ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3799 				   pf->stat_offsets_loaded,
3800 				   &osd->priority_xon_tx[i],
3801 				   &nsd->priority_xon_tx[i]);
3802 		ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3803 				   pf->stat_offsets_loaded,
3804 				   &osd->priority_xoff_tx[i],
3805 				   &nsd->priority_xoff_tx[i]);
3806 		ixl_stat_update32(hw,
3807 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3808 				   pf->stat_offsets_loaded,
3809 				   &osd->priority_xon_2_xoff[i],
3810 				   &nsd->priority_xon_2_xoff[i]);
3811 	}
3812 #endif
3813 
3814 	/* Packet size stats rx */
3815 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3816 			   I40E_GLPRT_PRC64L(hw->port),
3817 			   pf->stat_offsets_loaded,
3818 			   &osd->rx_size_64, &nsd->rx_size_64);
3819 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3820 			   I40E_GLPRT_PRC127L(hw->port),
3821 			   pf->stat_offsets_loaded,
3822 			   &osd->rx_size_127, &nsd->rx_size_127);
3823 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3824 			   I40E_GLPRT_PRC255L(hw->port),
3825 			   pf->stat_offsets_loaded,
3826 			   &osd->rx_size_255, &nsd->rx_size_255);
3827 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3828 			   I40E_GLPRT_PRC511L(hw->port),
3829 			   pf->stat_offsets_loaded,
3830 			   &osd->rx_size_511, &nsd->rx_size_511);
3831 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3832 			   I40E_GLPRT_PRC1023L(hw->port),
3833 			   pf->stat_offsets_loaded,
3834 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3835 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3836 			   I40E_GLPRT_PRC1522L(hw->port),
3837 			   pf->stat_offsets_loaded,
3838 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3839 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3840 			   I40E_GLPRT_PRC9522L(hw->port),
3841 			   pf->stat_offsets_loaded,
3842 			   &osd->rx_size_big, &nsd->rx_size_big);
3843 
3844 	/* Packet size stats tx */
3845 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3846 			   I40E_GLPRT_PTC64L(hw->port),
3847 			   pf->stat_offsets_loaded,
3848 			   &osd->tx_size_64, &nsd->tx_size_64);
3849 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3850 			   I40E_GLPRT_PTC127L(hw->port),
3851 			   pf->stat_offsets_loaded,
3852 			   &osd->tx_size_127, &nsd->tx_size_127);
3853 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3854 			   I40E_GLPRT_PTC255L(hw->port),
3855 			   pf->stat_offsets_loaded,
3856 			   &osd->tx_size_255, &nsd->tx_size_255);
3857 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3858 			   I40E_GLPRT_PTC511L(hw->port),
3859 			   pf->stat_offsets_loaded,
3860 			   &osd->tx_size_511, &nsd->tx_size_511);
3861 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3862 			   I40E_GLPRT_PTC1023L(hw->port),
3863 			   pf->stat_offsets_loaded,
3864 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3865 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3866 			   I40E_GLPRT_PTC1522L(hw->port),
3867 			   pf->stat_offsets_loaded,
3868 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3869 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3870 			   I40E_GLPRT_PTC9522L(hw->port),
3871 			   pf->stat_offsets_loaded,
3872 			   &osd->tx_size_big, &nsd->tx_size_big);
3873 
3874 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3875 			   pf->stat_offsets_loaded,
3876 			   &osd->rx_undersize, &nsd->rx_undersize);
3877 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3878 			   pf->stat_offsets_loaded,
3879 			   &osd->rx_fragments, &nsd->rx_fragments);
3880 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3881 			   pf->stat_offsets_loaded,
3882 			   &osd->rx_oversize, &nsd->rx_oversize);
3883 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3884 			   pf->stat_offsets_loaded,
3885 			   &osd->rx_jabber, &nsd->rx_jabber);
3886 	pf->stat_offsets_loaded = true;
3887 	/* End hw stats */
3888 
3889 	/* Update vsi stats */
3890 	ixl_update_eth_stats(vsi);
3891 
3892 	/* OS statistics */
3893 	// ERJ - these are per-port, update all vsis?
3894 	ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
3895 }
3896 
3897 /*
3898 ** Tasklet handler for MSIX Adminq interrupts
3899 **  - do outside interrupt since it might sleep
3900 */
3901 static void
3902 ixl_do_adminq(void *context, int pending)
3903 {
3904 	struct ixl_pf			*pf = context;
3905 	struct i40e_hw			*hw = &pf->hw;
3906 	struct ixl_vsi			*vsi = &pf->vsi;
3907 	struct i40e_arq_event_info	event;
3908 	i40e_status			ret;
3909 	u32				reg, loop = 0;
3910 	u16				opcode, result;
3911 
3912 	event.msg_len = IXL_AQ_BUF_SZ;
3913 	event.msg_buf = malloc(event.msg_len,
3914 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3915 	if (!event.msg_buf) {
3916 		printf("Unable to allocate adminq memory\n");
3917 		return;
3918 	}
3919 
3920 	/* clean and process any events */
3921 	do {
3922 		ret = i40e_clean_arq_element(hw, &event, &result);
3923 		if (ret)
3924 			break;
3925 		opcode = LE16_TO_CPU(event.desc.opcode);
3926 		switch (opcode) {
3927 		case i40e_aqc_opc_get_link_status:
3928 			vsi->link_up = ixl_config_link(hw);
3929 			ixl_update_link_status(pf);
3930 			break;
3931 		case i40e_aqc_opc_send_msg_to_pf:
3932 			/* process pf/vf communication here */
3933 			break;
3934 		case i40e_aqc_opc_event_lan_overflow:
3935 			break;
3936 		default:
3937 #ifdef IXL_DEBUG
3938 			printf("AdminQ unknown event %x\n", opcode);
3939 #endif
3940 			break;
3941 		}
3942 
3943 	} while (result && (loop++ < IXL_ADM_LIMIT));
3944 
3945 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3946 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3947 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3948 	free(event.msg_buf, M_DEVBUF);
3949 
3950 	if (pf->msix > 1)
3951 		ixl_enable_adminq(&pf->hw);
3952 	else
3953 		ixl_enable_intr(vsi);
3954 }
3955 
3956 static int
3957 ixl_debug_info(SYSCTL_HANDLER_ARGS)
3958 {
3959 	struct ixl_pf	*pf;
3960 	int		error, input = 0;
3961 
3962 	error = sysctl_handle_int(oidp, &input, 0, req);
3963 
3964 	if (error || !req->newptr)
3965 		return (error);
3966 
3967 	if (input == 1) {
3968 		pf = (struct ixl_pf *)arg1;
3969 		ixl_print_debug_info(pf);
3970 	}
3971 
3972 	return (error);
3973 }
3974 
3975 static void
3976 ixl_print_debug_info(struct ixl_pf *pf)
3977 {
3978 	struct i40e_hw		*hw = &pf->hw;
3979 	struct ixl_vsi		*vsi = &pf->vsi;
3980 	struct ixl_queue	*que = vsi->queues;
3981 	struct rx_ring		*rxr = &que->rxr;
3982 	struct tx_ring		*txr = &que->txr;
3983 	u32			reg;
3984 
3985 
3986 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
3987 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
3988 	printf("RX next check = %x\n", rxr->next_check);
3989 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
3990 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
3991 	printf("TX desc avail = %x\n", txr->avail);
3992 
3993 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
3994 	 printf("RX Bytes = %x\n", reg);
3995 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
3996 	 printf("Port RX Bytes = %x\n", reg);
3997 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
3998 	 printf("RX discard = %x\n", reg);
3999 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4000 	 printf("Port RX discard = %x\n", reg);
4001 
4002 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4003 	 printf("TX errors = %x\n", reg);
4004 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4005 	 printf("TX Bytes = %x\n", reg);
4006 
4007 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4008 	 printf("RX undersize = %x\n", reg);
4009 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4010 	 printf("RX fragments = %x\n", reg);
4011 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4012 	 printf("RX oversize = %x\n", reg);
4013 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4014 	 printf("RX length error = %x\n", reg);
4015 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4016 	 printf("mac remote fault = %x\n", reg);
4017 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4018 	 printf("mac local fault = %x\n", reg);
4019 }
4020 
4021 /**
4022  * Update VSI-specific ethernet statistics counters.
4023  **/
4024 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4025 {
4026 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4027 	struct i40e_hw *hw = &pf->hw;
4028 	struct ifnet *ifp = vsi->ifp;
4029 	struct i40e_eth_stats *es;
4030 	struct i40e_eth_stats *oes;
4031 	u16 stat_idx = vsi->info.stat_counter_idx;
4032 
4033 	es = &vsi->eth_stats;
4034 	oes = &vsi->eth_stats_offsets;
4035 
4036 	/* Gather up the stats that the hw collects */
4037 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4038 			   vsi->stat_offsets_loaded,
4039 			   &oes->tx_errors, &es->tx_errors);
4040 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4041 			   vsi->stat_offsets_loaded,
4042 			   &oes->rx_discards, &es->rx_discards);
4043 
4044 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4045 			   I40E_GLV_GORCL(stat_idx),
4046 			   vsi->stat_offsets_loaded,
4047 			   &oes->rx_bytes, &es->rx_bytes);
4048 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4049 			   I40E_GLV_UPRCL(stat_idx),
4050 			   vsi->stat_offsets_loaded,
4051 			   &oes->rx_unicast, &es->rx_unicast);
4052 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4053 			   I40E_GLV_MPRCL(stat_idx),
4054 			   vsi->stat_offsets_loaded,
4055 			   &oes->rx_multicast, &es->rx_multicast);
4056 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4057 			   I40E_GLV_BPRCL(stat_idx),
4058 			   vsi->stat_offsets_loaded,
4059 			   &oes->rx_broadcast, &es->rx_broadcast);
4060 
4061 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4062 			   I40E_GLV_GOTCL(stat_idx),
4063 			   vsi->stat_offsets_loaded,
4064 			   &oes->tx_bytes, &es->tx_bytes);
4065 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4066 			   I40E_GLV_UPTCL(stat_idx),
4067 			   vsi->stat_offsets_loaded,
4068 			   &oes->tx_unicast, &es->tx_unicast);
4069 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4070 			   I40E_GLV_MPTCL(stat_idx),
4071 			   vsi->stat_offsets_loaded,
4072 			   &oes->tx_multicast, &es->tx_multicast);
4073 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4074 			   I40E_GLV_BPTCL(stat_idx),
4075 			   vsi->stat_offsets_loaded,
4076 			   &oes->tx_broadcast, &es->tx_broadcast);
4077 	vsi->stat_offsets_loaded = true;
4078 
4079 	/* Update ifnet stats */
4080 	ifp->if_ipackets = es->rx_unicast +
4081 	                   es->rx_multicast +
4082 			   es->rx_broadcast;
4083 	ifp->if_opackets = es->tx_unicast +
4084 	                   es->tx_multicast +
4085 			   es->tx_broadcast;
4086 	ifp->if_ibytes = es->rx_bytes;
4087 	ifp->if_obytes = es->tx_bytes;
4088 	ifp->if_imcasts = es->rx_multicast;
4089 	ifp->if_omcasts = es->tx_multicast;
4090 
4091 	ifp->if_oerrors = es->tx_errors;
4092 	ifp->if_iqdrops = es->rx_discards;
4093 	ifp->if_noproto = es->rx_unknown_protocol;
4094 	ifp->if_collisions = 0;
4095 }
4096 
4097 /**
4098  * Reset all of the stats for the given pf
4099  **/
4100 void ixl_pf_reset_stats(struct ixl_pf *pf)
4101 {
4102 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4103 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4104 	pf->stat_offsets_loaded = false;
4105 }
4106 
4107 /**
4108  * Resets all stats of the given vsi
4109  **/
4110 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4111 {
4112 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4113 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4114 	vsi->stat_offsets_loaded = false;
4115 }
4116 
4117 /**
4118  * Read and update a 48 bit stat from the hw
4119  *
4120  * Since the device stats are not reset at PFReset, they likely will not
4121  * be zeroed when the driver starts.  We'll save the first values read
4122  * and use them as offsets to be subtracted from the raw values in order
4123  * to report stats that count from zero.
4124  **/
4125 static void
4126 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4127 	bool offset_loaded, u64 *offset, u64 *stat)
4128 {
4129 	u64 new_data;
4130 
4131 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4132 	new_data = rd64(hw, loreg);
4133 #else
4134 	/*
4135 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4136 	 * 10 don't support 8 byte bus reads/writes.
4137 	 */
4138 	new_data = rd32(hw, loreg);
4139 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4140 #endif
4141 
4142 	if (!offset_loaded)
4143 		*offset = new_data;
4144 	if (new_data >= *offset)
4145 		*stat = new_data - *offset;
4146 	else
4147 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4148 	*stat &= 0xFFFFFFFFFFFFULL;
4149 }
4150 
4151 /**
4152  * Read and update a 32 bit stat from the hw
4153  **/
4154 static void
4155 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4156 	bool offset_loaded, u64 *offset, u64 *stat)
4157 {
4158 	u32 new_data;
4159 
4160 	new_data = rd32(hw, reg);
4161 	if (!offset_loaded)
4162 		*offset = new_data;
4163 	if (new_data >= *offset)
4164 		*stat = (u32)(new_data - *offset);
4165 	else
4166 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4167 }
4168 
4169 /*
4170 ** Set flow control using sysctl:
4171 ** 	0 - off
4172 **	1 - rx pause
4173 **	2 - tx pause
4174 **	3 - full
4175 */
4176 static int
4177 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4178 {
4179 	/*
4180 	 * TODO: ensure flow control is disabled if
4181 	 * priority flow control is enabled
4182 	 *
4183 	 * TODO: ensure tx CRC by hardware should be enabled
4184 	 * if tx flow control is enabled.
4185 	 */
4186 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4187 	struct i40e_hw *hw = &pf->hw;
4188 	device_t dev = pf->dev;
4189 	int requested_fc = 0, error = 0;
4190 	enum i40e_status_code aq_error = 0;
4191 	u8 fc_aq_err = 0;
4192 
4193 	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4194 	if (aq_error) {
4195 		device_printf(dev,
4196 		    "%s: Error retrieving link info from aq, %d\n",
4197 		    __func__, aq_error);
4198 		return (EAGAIN);
4199 	}
4200 
4201 	/* Read in new mode */
4202 	requested_fc = hw->fc.current_mode;
4203 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4204 	if ((error) || (req->newptr == NULL))
4205 		return (error);
4206 	if (requested_fc < 0 || requested_fc > 3) {
4207 		device_printf(dev,
4208 		    "Invalid fc mode; valid modes are 0 through 3\n");
4209 		return (EINVAL);
4210 	}
4211 
4212 	/*
4213 	** Changing flow control mode currently does not work on
4214 	** 40GBASE-CR4 PHYs
4215 	*/
4216 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4217 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4218 		device_printf(dev, "Changing flow control mode unsupported"
4219 		    " on 40GBase-CR4 media.\n");
4220 		return (ENODEV);
4221 	}
4222 
4223 	/* Set fc ability for port */
4224 	hw->fc.requested_mode = requested_fc;
4225 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4226 	if (aq_error) {
4227 		device_printf(dev,
4228 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4229 		    __func__, aq_error, fc_aq_err);
4230 		return (EAGAIN);
4231 	}
4232 
4233 	if (hw->fc.current_mode != hw->fc.requested_mode) {
4234 		device_printf(dev, "%s: FC set failure:\n", __func__);
4235 		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4236 		    __func__,
4237 		    ixl_fc_string[hw->fc.current_mode],
4238 		    ixl_fc_string[hw->fc.requested_mode]);
4239 	}
4240 
4241 	return (0);
4242 }
4243 
4244 static int
4245 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4246 {
4247 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4248 	struct i40e_hw *hw = &pf->hw;
4249 	int error = 0, index = 0;
4250 
4251 	char *speeds[] = {
4252 		"Unknown",
4253 		"100M",
4254 		"1G",
4255 		"10G",
4256 		"40G",
4257 		"20G"
4258 	};
4259 
4260 	ixl_update_link_status(pf);
4261 
4262 	switch (hw->phy.link_info.link_speed) {
4263 	case I40E_LINK_SPEED_100MB:
4264 		index = 1;
4265 		break;
4266 	case I40E_LINK_SPEED_1GB:
4267 		index = 2;
4268 		break;
4269 	case I40E_LINK_SPEED_10GB:
4270 		index = 3;
4271 		break;
4272 	case I40E_LINK_SPEED_40GB:
4273 		index = 4;
4274 		break;
4275 	case I40E_LINK_SPEED_20GB:
4276 		index = 5;
4277 		break;
4278 	case I40E_LINK_SPEED_UNKNOWN:
4279 	default:
4280 		index = 0;
4281 		break;
4282 	}
4283 
4284 	error = sysctl_handle_string(oidp, speeds[index],
4285 	    strlen(speeds[index]), req);
4286 	return (error);
4287 }
4288 
4289 /*
4290 ** Control link advertise speed:
4291 **	Flags:
4292 **	0x1 - advertise 100 Mb
4293 **	0x2 - advertise 1G
4294 **	0x4 - advertise 10G
4295 **
4296 ** Does not work on 40G devices.
4297 */
4298 static int
4299 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4300 {
4301 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4302 	struct i40e_hw *hw = &pf->hw;
4303 	device_t dev = pf->dev;
4304 	struct i40e_aq_get_phy_abilities_resp abilities;
4305 	struct i40e_aq_set_phy_config config;
4306 	int requested_ls = 0;
4307 	enum i40e_status_code aq_error = 0;
4308 	int error = 0;
4309 
4310 	/*
4311 	** FW doesn't support changing advertised speed
4312 	** for 40G devices; speed is always 40G.
4313 	*/
4314 	if (i40e_is_40G_device(hw->device_id))
4315 		return (ENODEV);
4316 
4317 	/* Read in new mode */
4318 	requested_ls = pf->advertised_speed;
4319 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4320 	if ((error) || (req->newptr == NULL))
4321 		return (error);
4322 	if (requested_ls < 1 || requested_ls > 7) {
4323 		device_printf(dev,
4324 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4325 		return (EINVAL);
4326 	}
4327 
4328 	/* Exit if no change */
4329 	if (pf->advertised_speed == requested_ls)
4330 		return (0);
4331 
4332 	/* Get current capability information */
4333 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4334 	if (aq_error) {
4335 		device_printf(dev, "%s: Error getting phy capabilities %d,"
4336 		    " aq error: %d\n", __func__, aq_error,
4337 		    hw->aq.asq_last_status);
4338 		return (EAGAIN);
4339 	}
4340 
4341 	/* Prepare new config */
4342 	bzero(&config, sizeof(config));
4343 	config.phy_type = abilities.phy_type;
4344 	config.abilities = abilities.abilities
4345 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4346 	config.eee_capability = abilities.eee_capability;
4347 	config.eeer = abilities.eeer_val;
4348 	config.low_power_ctrl = abilities.d3_lpan;
4349 	/* Translate into aq cmd link_speed */
4350 	if (requested_ls & 0x4)
4351 		config.link_speed |= I40E_LINK_SPEED_10GB;
4352 	if (requested_ls & 0x2)
4353 		config.link_speed |= I40E_LINK_SPEED_1GB;
4354 	if (requested_ls & 0x1)
4355 		config.link_speed |= I40E_LINK_SPEED_100MB;
4356 
4357 	/* Do aq command & restart link */
4358 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4359 	if (aq_error) {
4360 		device_printf(dev, "%s: Error setting new phy config %d,"
4361 		    " aq error: %d\n", __func__, aq_error,
4362 		    hw->aq.asq_last_status);
4363 		return (EAGAIN);
4364 	}
4365 
4366 	pf->advertised_speed = requested_ls;
4367 	ixl_update_link_status(pf);
4368 	return (0);
4369 }
4370 
4371 /*
4372 ** Get the width and transaction speed of
4373 ** the bus this adapter is plugged into.
4374 */
4375 static u16
4376 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4377 {
4378         u16                     link;
4379         u32                     offset;
4380 
4381 
4382         /* Get the PCI Express Capabilities offset */
4383         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4384 
4385         /* ...and read the Link Status Register */
4386         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4387 
4388         switch (link & I40E_PCI_LINK_WIDTH) {
4389         case I40E_PCI_LINK_WIDTH_1:
4390                 hw->bus.width = i40e_bus_width_pcie_x1;
4391                 break;
4392         case I40E_PCI_LINK_WIDTH_2:
4393                 hw->bus.width = i40e_bus_width_pcie_x2;
4394                 break;
4395         case I40E_PCI_LINK_WIDTH_4:
4396                 hw->bus.width = i40e_bus_width_pcie_x4;
4397                 break;
4398         case I40E_PCI_LINK_WIDTH_8:
4399                 hw->bus.width = i40e_bus_width_pcie_x8;
4400                 break;
4401         default:
4402                 hw->bus.width = i40e_bus_width_unknown;
4403                 break;
4404         }
4405 
4406         switch (link & I40E_PCI_LINK_SPEED) {
4407         case I40E_PCI_LINK_SPEED_2500:
4408                 hw->bus.speed = i40e_bus_speed_2500;
4409                 break;
4410         case I40E_PCI_LINK_SPEED_5000:
4411                 hw->bus.speed = i40e_bus_speed_5000;
4412                 break;
4413         case I40E_PCI_LINK_SPEED_8000:
4414                 hw->bus.speed = i40e_bus_speed_8000;
4415                 break;
4416         default:
4417                 hw->bus.speed = i40e_bus_speed_unknown;
4418                 break;
4419         }
4420 
4421 
4422         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4423             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4424             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4425             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4426             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4427             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4428             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4429             ("Unknown"));
4430 
4431         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4432             (hw->bus.speed < i40e_bus_speed_8000)) {
4433                 device_printf(dev, "PCI-Express bandwidth available"
4434                     " for this device\n     is not sufficient for"
4435                     " normal operation.\n");
4436                 device_printf(dev, "For expected performance a x8 "
4437                     "PCIE Gen3 slot is required.\n");
4438         }
4439 
4440         return (link);
4441 }
4442 
4443 #ifdef IXL_DEBUG
4444 static int
4445 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4446 {
4447 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4448 	struct i40e_hw *hw = &pf->hw;
4449 	struct i40e_link_status link_status;
4450 	char buf[512];
4451 
4452 	enum i40e_status_code aq_error = 0;
4453 
4454 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4455 	if (aq_error) {
4456 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4457 		return (EPERM);
4458 	}
4459 
4460 	sprintf(buf, "\n"
4461 	    "PHY Type : %#04x\n"
4462 	    "Speed    : %#04x\n"
4463 	    "Link info: %#04x\n"
4464 	    "AN info  : %#04x\n"
4465 	    "Ext info : %#04x",
4466 	    link_status.phy_type, link_status.link_speed,
4467 	    link_status.link_info, link_status.an_info,
4468 	    link_status.ext_info);
4469 
4470 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4471 }
4472 
4473 static int
4474 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4475 {
4476 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4477 	struct i40e_hw *hw = &pf->hw;
4478 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4479 	char buf[512];
4480 
4481 	enum i40e_status_code aq_error = 0;
4482 
4483 	// TODO: Print out list of qualified modules as well?
4484 	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4485 	if (aq_error) {
4486 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4487 		return (EPERM);
4488 	}
4489 
4490 	sprintf(buf, "\n"
4491 	    "PHY Type : %#010x\n"
4492 	    "Speed    : %#04x\n"
4493 	    "Abilities: %#04x\n"
4494 	    "EEE cap  : %#06x\n"
4495 	    "EEER reg : %#010x\n"
4496 	    "D3 Lpan  : %#04x",
4497 	    abilities_resp.phy_type, abilities_resp.link_speed,
4498 	    abilities_resp.abilities, abilities_resp.eee_capability,
4499 	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4500 
4501 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4502 }
4503 
4504 static int
4505 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4506 {
4507 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4508 	struct ixl_vsi *vsi = &pf->vsi;
4509 	struct ixl_mac_filter *f;
4510 	char *buf, *buf_i;
4511 
4512 	int error = 0;
4513 	int ftl_len = 0;
4514 	int ftl_counter = 0;
4515 	int buf_len = 0;
4516 	int entry_len = 42;
4517 
4518 	SLIST_FOREACH(f, &vsi->ftl, next) {
4519 		ftl_len++;
4520 	}
4521 
4522 	if (ftl_len < 1) {
4523 		sysctl_handle_string(oidp, "(none)", 6, req);
4524 		return (0);
4525 	}
4526 
4527 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4528 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4529 
4530 	sprintf(buf_i++, "\n");
4531 	SLIST_FOREACH(f, &vsi->ftl, next) {
4532 		sprintf(buf_i,
4533 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4534 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4535 		buf_i += entry_len;
4536 		/* don't print '\n' for last entry */
4537 		if (++ftl_counter != ftl_len) {
4538 			sprintf(buf_i, "\n");
4539 			buf_i++;
4540 		}
4541 	}
4542 
4543 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4544 	if (error)
4545 		printf("sysctl error: %d\n", error);
4546 	free(buf, M_DEVBUF);
4547 	return error;
4548 }
4549 
4550 #define IXL_SW_RES_SIZE 0x14
4551 static int
4552 ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4553 {
4554 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4555 	struct i40e_hw *hw = &pf->hw;
4556 	device_t dev = pf->dev;
4557 	struct sbuf *buf;
4558 	int error = 0;
4559 
4560 	u8 num_entries;
4561 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4562 
4563 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4564 	if (!buf) {
4565 		device_printf(dev, "Could not allocate sbuf for output.\n");
4566 		return (ENOMEM);
4567 	}
4568 
4569 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4570 				resp,
4571 				IXL_SW_RES_SIZE,
4572 				NULL);
4573 	if (error) {
4574 		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4575 		    __func__, error, hw->aq.asq_last_status);
4576 		sbuf_delete(buf);
4577 		return error;
4578 	}
4579 	device_printf(dev, "Num_entries: %d\n", num_entries);
4580 
4581 	sbuf_cat(buf, "\n");
4582 	sbuf_printf(buf,
4583 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4584 	    "     | (this)     | (all) | (this) | (all)       \n");
4585 	for (int i = 0; i < num_entries; i++) {
4586 		sbuf_printf(buf,
4587 		    "%#4x | %10d   %5d   %6d   %12d",
4588 		    resp[i].resource_type,
4589 		    resp[i].guaranteed,
4590 		    resp[i].total,
4591 		    resp[i].used,
4592 		    resp[i].total_unalloced);
4593 		if (i < num_entries - 1)
4594 			sbuf_cat(buf, "\n");
4595 	}
4596 
4597 	error = sbuf_finish(buf);
4598 	if (error) {
4599 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4600 		sbuf_delete(buf);
4601 		return error;
4602 	}
4603 
4604 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4605 	if (error)
4606 		device_printf(dev, "sysctl error: %d\n", error);
4607 	sbuf_delete(buf);
4608 	return error;
4609 
4610 }
4611 
4612 /*
4613 ** Dump TX desc given index.
4614 ** Doesn't work; don't use.
4615 ** TODO: Also needs a queue index input!
4616 **/
4617 static int
4618 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4619 {
4620 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4621 	device_t dev = pf->dev;
4622 	struct sbuf *buf;
4623 	int error = 0;
4624 
4625 	u16 desc_idx = 0;
4626 
4627 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4628 	if (!buf) {
4629 		device_printf(dev, "Could not allocate sbuf for output.\n");
4630 		return (ENOMEM);
4631 	}
4632 
4633 	/* Read in index */
4634 	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4635 	if (error)
4636 		return (error);
4637 	if (req->newptr == NULL)
4638 		return (EIO); // fix
4639 	if (desc_idx > 1024) { // fix
4640 		device_printf(dev,
4641 		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4642 		return (EINVAL);
4643 	}
4644 
4645 	// Don't use this sysctl yet
4646 	if (TRUE)
4647 		return (ENODEV);
4648 
4649 	sbuf_cat(buf, "\n");
4650 
4651 	// set to queue 1?
4652 	struct ixl_queue *que = pf->vsi.queues;
4653 	struct tx_ring *txr = &(que[1].txr);
4654 	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4655 
4656 	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4657 	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4658 	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4659 
4660 	error = sbuf_finish(buf);
4661 	if (error) {
4662 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4663 		sbuf_delete(buf);
4664 		return error;
4665 	}
4666 
4667 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4668 	if (error)
4669 		device_printf(dev, "sysctl error: %d\n", error);
4670 	sbuf_delete(buf);
4671 	return error;
4672 }
4673 #endif
4674 
4675