xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 4ce386ff25d77954b8cfa11534f632172e848244)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40 
41 #include "ixl.h"
42 #include "ixl_pf.h"
43 
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47 
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixl_driver_version[] = "1.3.6";
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixl_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {
65 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73 	/* required last entry */
74 	{0, 0, 0, 0, 0}
75 };
76 
77 /*********************************************************************
78  *  Table of branding strings
79  *********************************************************************/
80 
81 static char    *ixl_strings[] = {
82 	"Intel(R) Ethernet Connection XL710 Driver"
83 };
84 
85 
86 /*********************************************************************
87  *  Function prototypes
88  *********************************************************************/
89 static int      ixl_probe(device_t);
90 static int      ixl_attach(device_t);
91 static int      ixl_detach(device_t);
92 static int      ixl_shutdown(device_t);
93 static int	ixl_get_hw_capabilities(struct ixl_pf *);
94 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
96 static void	ixl_init(void *);
97 static void	ixl_init_locked(struct ixl_pf *);
98 static void     ixl_stop(struct ixl_pf *);
99 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
100 static int      ixl_media_change(struct ifnet *);
101 static void     ixl_update_link_status(struct ixl_pf *);
102 static int      ixl_allocate_pci_resources(struct ixl_pf *);
103 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
104 static int	ixl_setup_stations(struct ixl_pf *);
105 static int	ixl_switch_config(struct ixl_pf *);
106 static int	ixl_initialize_vsi(struct ixl_vsi *);
107 static int	ixl_assign_vsi_msix(struct ixl_pf *);
108 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
109 static int	ixl_init_msix(struct ixl_pf *);
110 static void	ixl_configure_msix(struct ixl_pf *);
111 static void	ixl_configure_itr(struct ixl_pf *);
112 static void	ixl_configure_legacy(struct ixl_pf *);
113 static void	ixl_free_pci_resources(struct ixl_pf *);
114 static void	ixl_local_timer(void *);
115 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
116 static bool	ixl_config_link(struct i40e_hw *);
117 static void	ixl_config_rss(struct ixl_vsi *);
118 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
119 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
120 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
121 
122 static void	ixl_enable_rings(struct ixl_vsi *);
123 static void	ixl_disable_rings(struct ixl_vsi *);
124 static void     ixl_enable_intr(struct ixl_vsi *);
125 static void     ixl_disable_intr(struct ixl_vsi *);
126 
127 static void     ixl_enable_adminq(struct i40e_hw *);
128 static void     ixl_disable_adminq(struct i40e_hw *);
129 static void     ixl_enable_queue(struct i40e_hw *, int);
130 static void     ixl_disable_queue(struct i40e_hw *, int);
131 static void     ixl_enable_legacy(struct i40e_hw *);
132 static void     ixl_disable_legacy(struct i40e_hw *);
133 
134 static void     ixl_set_promisc(struct ixl_vsi *);
135 static void     ixl_add_multi(struct ixl_vsi *);
136 static void     ixl_del_multi(struct ixl_vsi *);
137 static void	ixl_register_vlan(void *, struct ifnet *, u16);
138 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
139 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
140 
141 static void	ixl_init_filters(struct ixl_vsi *);
142 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
143 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
144 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
145 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
146 static struct ixl_mac_filter *
147 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
148 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
149 
150 /* Sysctl debug interface */
151 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
152 static void	ixl_print_debug_info(struct ixl_pf *);
153 
154 /* The MSI/X Interrupt handlers */
155 static void	ixl_intr(void *);
156 static void	ixl_msix_que(void *);
157 static void	ixl_msix_adminq(void *);
158 static void	ixl_handle_mdd_event(struct ixl_pf *);
159 
160 /* Deferred interrupt tasklets */
161 static void	ixl_do_adminq(void *, int);
162 
163 /* Sysctl handlers */
164 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
165 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
166 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
167 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
168 
169 /* Statistics */
170 static void     ixl_add_hw_stats(struct ixl_pf *);
171 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
172 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
173 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
174 		    struct sysctl_oid_list *,
175 		    struct i40e_eth_stats *);
176 static void	ixl_update_stats_counters(struct ixl_pf *);
177 static void	ixl_update_eth_stats(struct ixl_vsi *);
178 static void	ixl_pf_reset_stats(struct ixl_pf *);
179 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
180 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
181 		    u64 *, u64 *);
182 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
183 		    u64 *, u64 *);
184 
185 #ifdef IXL_DEBUG_SYSCTL
186 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
187 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
188 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
189 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
190 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
191 static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
192 #endif
193 
194 /*********************************************************************
195  *  FreeBSD Device Interface Entry Points
196  *********************************************************************/
197 
198 static device_method_t ixl_methods[] = {
199 	/* Device interface */
200 	DEVMETHOD(device_probe, ixl_probe),
201 	DEVMETHOD(device_attach, ixl_attach),
202 	DEVMETHOD(device_detach, ixl_detach),
203 	DEVMETHOD(device_shutdown, ixl_shutdown),
204 	{0, 0}
205 };
206 
207 static driver_t ixl_driver = {
208 	"ixl", ixl_methods, sizeof(struct ixl_pf),
209 };
210 
211 devclass_t ixl_devclass;
212 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
213 
214 MODULE_DEPEND(ixl, pci, 1, 1, 1);
215 MODULE_DEPEND(ixl, ether, 1, 1, 1);
216 #ifdef DEV_NETMAP
217 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
218 #endif /* DEV_NETMAP */
219 
220 /*
221 ** Global reset mutex
222 */
223 static struct mtx ixl_reset_mtx;
224 
225 /*
226 ** TUNEABLE PARAMETERS:
227 */
228 
229 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
230                    "IXL driver parameters");
231 
232 /*
233  * MSIX should be the default for best performance,
234  * but this allows it to be forced off for testing.
235  */
236 static int ixl_enable_msix = 1;
237 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
238 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
239     "Enable MSI-X interrupts");
240 
241 /*
242 ** Number of descriptors per ring:
243 **   - TX and RX are the same size
244 */
245 static int ixl_ringsz = DEFAULT_RING;
246 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
247 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
248     &ixl_ringsz, 0, "Descriptor Ring Size");
249 
250 /*
251 ** This can be set manually, if left as 0 the
252 ** number of queues will be calculated based
253 ** on cpus and msix vectors available.
254 */
255 int ixl_max_queues = 0;
256 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
257 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
258     &ixl_max_queues, 0, "Number of Queues");
259 
260 /*
261 ** Controls for Interrupt Throttling
262 **	- true/false for dynamic adjustment
263 ** 	- default values for static ITR
264 */
265 int ixl_dynamic_rx_itr = 0;
266 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
267 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
268     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
269 
270 int ixl_dynamic_tx_itr = 0;
271 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
272 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
273     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
274 
275 int ixl_rx_itr = IXL_ITR_8K;
276 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
277 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
278     &ixl_rx_itr, 0, "RX Interrupt Rate");
279 
280 int ixl_tx_itr = IXL_ITR_4K;
281 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
282 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
283     &ixl_tx_itr, 0, "TX Interrupt Rate");
284 
285 #ifdef IXL_FDIR
286 static int ixl_enable_fdir = 1;
287 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
288 /* Rate at which we sample */
289 int ixl_atr_rate = 20;
290 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
291 #endif
292 
293 #ifdef DEV_NETMAP
294 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
295 #include <dev/netmap/if_ixl_netmap.h>
296 #endif /* DEV_NETMAP */
297 
298 static char *ixl_fc_string[6] = {
299 	"None",
300 	"Rx",
301 	"Tx",
302 	"Full",
303 	"Priority",
304 	"Default"
305 };
306 
307 
308 /*********************************************************************
309  *  Device identification routine
310  *
311  *  ixl_probe determines if the driver should be loaded on
312  *  the hardware based on PCI vendor/device id of the device.
313  *
314  *  return BUS_PROBE_DEFAULT on success, positive on failure
315  *********************************************************************/
316 
317 static int
318 ixl_probe(device_t dev)
319 {
320 	ixl_vendor_info_t *ent;
321 
322 	u16	pci_vendor_id, pci_device_id;
323 	u16	pci_subvendor_id, pci_subdevice_id;
324 	char	device_name[256];
325 	static bool lock_init = FALSE;
326 
327 	INIT_DEBUGOUT("ixl_probe: begin");
328 
329 	pci_vendor_id = pci_get_vendor(dev);
330 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
331 		return (ENXIO);
332 
333 	pci_device_id = pci_get_device(dev);
334 	pci_subvendor_id = pci_get_subvendor(dev);
335 	pci_subdevice_id = pci_get_subdevice(dev);
336 
337 	ent = ixl_vendor_info_array;
338 	while (ent->vendor_id != 0) {
339 		if ((pci_vendor_id == ent->vendor_id) &&
340 		    (pci_device_id == ent->device_id) &&
341 
342 		    ((pci_subvendor_id == ent->subvendor_id) ||
343 		     (ent->subvendor_id == 0)) &&
344 
345 		    ((pci_subdevice_id == ent->subdevice_id) ||
346 		     (ent->subdevice_id == 0))) {
347 			sprintf(device_name, "%s, Version - %s",
348 				ixl_strings[ent->index],
349 				ixl_driver_version);
350 			device_set_desc_copy(dev, device_name);
351 			/* One shot mutex init */
352 			if (lock_init == FALSE) {
353 				lock_init = TRUE;
354 				mtx_init(&ixl_reset_mtx,
355 				    "ixl_reset",
356 				    "IXL RESET Lock", MTX_DEF);
357 			}
358 			return (BUS_PROBE_DEFAULT);
359 		}
360 		ent++;
361 	}
362 	return (ENXIO);
363 }
364 
365 /*********************************************************************
366  *  Device initialization routine
367  *
368  *  The attach entry point is called when the driver is being loaded.
369  *  This routine identifies the type of hardware, allocates all resources
370  *  and initializes the hardware.
371  *
372  *  return 0 on success, positive on failure
373  *********************************************************************/
374 
375 static int
376 ixl_attach(device_t dev)
377 {
378 	struct ixl_pf	*pf;
379 	struct i40e_hw	*hw;
380 	struct ixl_vsi *vsi;
381 	u16		bus;
382 	int             error = 0;
383 
384 	INIT_DEBUGOUT("ixl_attach: begin");
385 
386 	/* Allocate, clear, and link in our primary soft structure */
387 	pf = device_get_softc(dev);
388 	pf->dev = pf->osdep.dev = dev;
389 	hw = &pf->hw;
390 
391 	/*
392 	** Note this assumes we have a single embedded VSI,
393 	** this could be enhanced later to allocate multiple
394 	*/
395 	vsi = &pf->vsi;
396 	vsi->dev = pf->dev;
397 
398 	/* Core Lock Init*/
399 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
400 
401 	/* Set up the timer callout */
402 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
403 
404 	/* Set up sysctls */
405 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
408 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
409 
410 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
411 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
413 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
414 
415 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
416 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
418 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
419 
420 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
423 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
424 
425 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
426 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
427 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
428 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
429 
430 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
431 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
432 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
433 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
434 
435 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
436 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
437 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
438 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
439 
440 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
441 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
443 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
444 
445 #ifdef IXL_DEBUG_SYSCTL
446 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
449 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
450 
451 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
454 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
455 
456 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
459 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
460 
461 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
464 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
465 
466 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
467 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
469 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
470 
471 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
472 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473 	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
474 	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
475 #endif
476 
477 	/* Save off the PCI information */
478 	hw->vendor_id = pci_get_vendor(dev);
479 	hw->device_id = pci_get_device(dev);
480 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
481 	hw->subsystem_vendor_id =
482 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
483 	hw->subsystem_device_id =
484 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
485 
486 	hw->bus.device = pci_get_slot(dev);
487 	hw->bus.func = pci_get_function(dev);
488 
489 	/* Do PCI setup - map BAR0, etc */
490 	if (ixl_allocate_pci_resources(pf)) {
491 		device_printf(dev, "Allocation of PCI resources failed\n");
492 		error = ENXIO;
493 		goto err_out;
494 	}
495 
496 	/* Create for initial debugging use */
497 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
498 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
499 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
500 	    ixl_debug_info, "I", "Debug Information");
501 
502 
503 	/* Establish a clean starting point */
504 	i40e_clear_hw(hw);
505 	error = i40e_pf_reset(hw);
506 	if (error) {
507 		device_printf(dev,"PF reset failure %x\n", error);
508 		error = EIO;
509 		goto err_out;
510 	}
511 
512 	/* Set admin queue parameters */
513 	hw->aq.num_arq_entries = IXL_AQ_LEN;
514 	hw->aq.num_asq_entries = IXL_AQ_LEN;
515 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
516 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
517 
518 	/* Initialize the shared code */
519 	error = i40e_init_shared_code(hw);
520 	if (error) {
521 		device_printf(dev,"Unable to initialize the shared code\n");
522 		error = EIO;
523 		goto err_out;
524 	}
525 
526 	/* Set up the admin queue */
527 	error = i40e_init_adminq(hw);
528 	if (error) {
529 		device_printf(dev, "The driver for the device stopped "
530 		    "because the NVM image is newer than expected.\n"
531 		    "You must install the most recent version of "
532 		    " the network driver.\n");
533 		goto err_out;
534 	}
535 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
536 
537         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
538 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
539 		device_printf(dev, "The driver for the device detected "
540 		    "a newer version of the NVM image than expected.\n"
541 		    "Please install the most recent version of the network driver.\n");
542 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
543 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
544 		device_printf(dev, "The driver for the device detected "
545 		    "an older version of the NVM image than expected.\n"
546 		    "Please update the NVM image.\n");
547 
548 	/* Clear PXE mode */
549 	i40e_clear_pxe_mode(hw);
550 
551 	/* Get capabilities from the device */
552 	error = ixl_get_hw_capabilities(pf);
553 	if (error) {
554 		device_printf(dev, "HW capabilities failure!\n");
555 		goto err_get_cap;
556 	}
557 
558 	/* Set up host memory cache */
559 	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
560 	if (error) {
561 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
562 		goto err_get_cap;
563 	}
564 
565 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
566 	if (error) {
567 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
568 		goto err_mac_hmc;
569 	}
570 
571 	/* Disable LLDP from the firmware */
572 	i40e_aq_stop_lldp(hw, TRUE, NULL);
573 
574 	i40e_get_mac_addr(hw, hw->mac.addr);
575 	error = i40e_validate_mac_addr(hw->mac.addr);
576 	if (error) {
577 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
578 		goto err_mac_hmc;
579 	}
580 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
581 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
582 
583 	/* Set up VSI and queues */
584 	if (ixl_setup_stations(pf) != 0) {
585 		device_printf(dev, "setup stations failed!\n");
586 		error = ENOMEM;
587 		goto err_mac_hmc;
588 	}
589 
590 	/* Initialize mac filter list for VSI */
591 	SLIST_INIT(&vsi->ftl);
592 
593 	/* Set up interrupt routing here */
594 	if (pf->msix > 1)
595 		error = ixl_assign_vsi_msix(pf);
596 	else
597 		error = ixl_assign_vsi_legacy(pf);
598 	if (error)
599 		goto err_late;
600 
601 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
602 	    (hw->aq.fw_maj_ver < 4)) {
603 		i40e_msec_delay(75);
604 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
605 		if (error)
606 			device_printf(dev, "link restart failed, aq_err=%d\n",
607 			    pf->hw.aq.asq_last_status);
608 	}
609 
610 	/* Determine link state */
611 	vsi->link_up = ixl_config_link(hw);
612 
613 	/* Report if Unqualified modules are found */
614 	if ((vsi->link_up == FALSE) &&
615 	    (pf->hw.phy.link_info.link_info &
616 	    I40E_AQ_MEDIA_AVAILABLE) &&
617 	    (!(pf->hw.phy.link_info.an_info &
618 	    I40E_AQ_QUALIFIED_MODULE)))
619 		device_printf(dev, "Link failed because "
620 		    "an unqualified module was detected\n");
621 
622 	/* Setup OS specific network interface */
623 	if (ixl_setup_interface(dev, vsi) != 0) {
624 		device_printf(dev, "interface setup failed!\n");
625 		error = EIO;
626 		goto err_late;
627 	}
628 
629 	error = ixl_switch_config(pf);
630 	if (error) {
631 		device_printf(dev, "Initial switch config failed: %d\n", error);
632 		goto err_mac_hmc;
633 	}
634 
635 	/* Limit phy interrupts to link and modules failure */
636 	error = i40e_aq_set_phy_int_mask(hw,
637 	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
638         if (error)
639 		device_printf(dev, "set phy mask failed: %d\n", error);
640 
641 	/* Get the bus configuration and set the shared code */
642 	bus = ixl_get_bus_info(hw, dev);
643 	i40e_set_pci_config_data(hw, bus);
644 
645 	/* Initialize statistics */
646 	ixl_pf_reset_stats(pf);
647 	ixl_update_stats_counters(pf);
648 	ixl_add_hw_stats(pf);
649 
650 	/* Register for VLAN events */
651 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
652 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
653 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
654 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
655 
656 
657 #ifdef DEV_NETMAP
658 	ixl_netmap_attach(vsi);
659 #endif /* DEV_NETMAP */
660 	INIT_DEBUGOUT("ixl_attach: end");
661 	return (0);
662 
663 err_late:
664 	if (vsi->ifp != NULL)
665 		if_free(vsi->ifp);
666 err_mac_hmc:
667 	i40e_shutdown_lan_hmc(hw);
668 err_get_cap:
669 	i40e_shutdown_adminq(hw);
670 err_out:
671 	ixl_free_pci_resources(pf);
672 	ixl_free_vsi(vsi);
673 	IXL_PF_LOCK_DESTROY(pf);
674 	return (error);
675 }
676 
677 /*********************************************************************
678  *  Device removal routine
679  *
680  *  The detach entry point is called when the driver is being removed.
681  *  This routine stops the adapter and deallocates all the resources
682  *  that were allocated for driver operation.
683  *
684  *  return 0 on success, positive on failure
685  *********************************************************************/
686 
687 static int
688 ixl_detach(device_t dev)
689 {
690 	struct ixl_pf		*pf = device_get_softc(dev);
691 	struct i40e_hw		*hw = &pf->hw;
692 	struct ixl_vsi		*vsi = &pf->vsi;
693 	struct ixl_queue	*que = vsi->queues;
694 	i40e_status		status;
695 
696 	INIT_DEBUGOUT("ixl_detach: begin");
697 
698 	/* Make sure VLANS are not using driver */
699 	if (vsi->ifp->if_vlantrunk != NULL) {
700 		device_printf(dev,"Vlan in use, detach first\n");
701 		return (EBUSY);
702 	}
703 
704 	ether_ifdetach(vsi->ifp);
705 	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
706 		IXL_PF_LOCK(pf);
707 		ixl_stop(pf);
708 		IXL_PF_UNLOCK(pf);
709 	}
710 
711 	for (int i = 0; i < vsi->num_queues; i++, que++) {
712 		if (que->tq) {
713 			taskqueue_drain(que->tq, &que->task);
714 			taskqueue_drain(que->tq, &que->tx_task);
715 			taskqueue_free(que->tq);
716 		}
717 	}
718 
719 	/* Shutdown LAN HMC */
720 	status = i40e_shutdown_lan_hmc(hw);
721 	if (status)
722 		device_printf(dev,
723 		    "Shutdown LAN HMC failed with code %d\n", status);
724 
725 	/* Shutdown admin queue */
726 	status = i40e_shutdown_adminq(hw);
727 	if (status)
728 		device_printf(dev,
729 		    "Shutdown Admin queue failed with code %d\n", status);
730 
731 	/* Unregister VLAN events */
732 	if (vsi->vlan_attach != NULL)
733 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
734 	if (vsi->vlan_detach != NULL)
735 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
736 
737 	callout_drain(&pf->timer);
738 #ifdef DEV_NETMAP
739 	netmap_detach(vsi->ifp);
740 #endif /* DEV_NETMAP */
741 
742 
743 	ixl_free_pci_resources(pf);
744 	bus_generic_detach(dev);
745 	if_free(vsi->ifp);
746 	ixl_free_vsi(vsi);
747 	IXL_PF_LOCK_DESTROY(pf);
748 	return (0);
749 }
750 
751 /*********************************************************************
752  *
753  *  Shutdown entry point
754  *
755  **********************************************************************/
756 
757 static int
758 ixl_shutdown(device_t dev)
759 {
760 	struct ixl_pf *pf = device_get_softc(dev);
761 	IXL_PF_LOCK(pf);
762 	ixl_stop(pf);
763 	IXL_PF_UNLOCK(pf);
764 	return (0);
765 }
766 
767 
768 /*********************************************************************
769  *
770  *  Get the hardware capabilities
771  *
772  **********************************************************************/
773 
774 static int
775 ixl_get_hw_capabilities(struct ixl_pf *pf)
776 {
777 	struct i40e_aqc_list_capabilities_element_resp *buf;
778 	struct i40e_hw	*hw = &pf->hw;
779 	device_t 	dev = pf->dev;
780 	int             error, len;
781 	u16		needed;
782 	bool		again = TRUE;
783 
784 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
785 retry:
786 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
787 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
788 		device_printf(dev, "Unable to allocate cap memory\n");
789                 return (ENOMEM);
790 	}
791 
792 	/* This populates the hw struct */
793         error = i40e_aq_discover_capabilities(hw, buf, len,
794 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
795 	free(buf, M_DEVBUF);
796 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
797 	    (again == TRUE)) {
798 		/* retry once with a larger buffer */
799 		again = FALSE;
800 		len = needed;
801 		goto retry;
802 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
803 		device_printf(dev, "capability discovery failed: %d\n",
804 		    pf->hw.aq.asq_last_status);
805 		return (ENODEV);
806 	}
807 
808 	/* Capture this PF's starting queue pair */
809 	pf->qbase = hw->func_caps.base_queue;
810 
811 #ifdef IXL_DEBUG
812 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
813 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
814 	    hw->pf_id, hw->func_caps.num_vfs,
815 	    hw->func_caps.num_msix_vectors,
816 	    hw->func_caps.num_msix_vectors_vf,
817 	    hw->func_caps.fd_filters_guaranteed,
818 	    hw->func_caps.fd_filters_best_effort,
819 	    hw->func_caps.num_tx_qp,
820 	    hw->func_caps.num_rx_qp,
821 	    hw->func_caps.base_queue);
822 #endif
823 	return (error);
824 }
825 
826 static void
827 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
828 {
829 	device_t 	dev = vsi->dev;
830 
831 	/* Enable/disable TXCSUM/TSO4 */
832 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
833 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
834 		if (mask & IFCAP_TXCSUM) {
835 			ifp->if_capenable |= IFCAP_TXCSUM;
836 			/* enable TXCSUM, restore TSO if previously enabled */
837 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
838 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
839 				ifp->if_capenable |= IFCAP_TSO4;
840 			}
841 		}
842 		else if (mask & IFCAP_TSO4) {
843 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
844 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
845 			device_printf(dev,
846 			    "TSO4 requires txcsum, enabling both...\n");
847 		}
848 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
849 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
850 		if (mask & IFCAP_TXCSUM)
851 			ifp->if_capenable &= ~IFCAP_TXCSUM;
852 		else if (mask & IFCAP_TSO4)
853 			ifp->if_capenable |= IFCAP_TSO4;
854 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
855 	    && (ifp->if_capenable & IFCAP_TSO4)) {
856 		if (mask & IFCAP_TXCSUM) {
857 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
858 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
859 			device_printf(dev,
860 			    "TSO4 requires txcsum, disabling both...\n");
861 		} else if (mask & IFCAP_TSO4)
862 			ifp->if_capenable &= ~IFCAP_TSO4;
863 	}
864 
865 	/* Enable/disable TXCSUM_IPV6/TSO6 */
866 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
867 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
868 		if (mask & IFCAP_TXCSUM_IPV6) {
869 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
870 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
871 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
872 				ifp->if_capenable |= IFCAP_TSO6;
873 			}
874 		} else if (mask & IFCAP_TSO6) {
875 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
877 			device_printf(dev,
878 			    "TSO6 requires txcsum6, enabling both...\n");
879 		}
880 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
881 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
882 		if (mask & IFCAP_TXCSUM_IPV6)
883 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
884 		else if (mask & IFCAP_TSO6)
885 			ifp->if_capenable |= IFCAP_TSO6;
886 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
887 	    && (ifp->if_capenable & IFCAP_TSO6)) {
888 		if (mask & IFCAP_TXCSUM_IPV6) {
889 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
890 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
891 			device_printf(dev,
892 			    "TSO6 requires txcsum6, disabling both...\n");
893 		} else if (mask & IFCAP_TSO6)
894 			ifp->if_capenable &= ~IFCAP_TSO6;
895 	}
896 }
897 
898 /*********************************************************************
899  *  Ioctl entry point
900  *
901  *  ixl_ioctl is called when the user wants to configure the
902  *  interface.
903  *
904  *  return 0 on success, positive on failure
905  **********************************************************************/
906 
907 static int
908 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
909 {
910 	struct ixl_vsi	*vsi = ifp->if_softc;
911 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
912 	struct ifreq	*ifr = (struct ifreq *) data;
913 #if defined(INET) || defined(INET6)
914 	struct ifaddr *ifa = (struct ifaddr *)data;
915 	bool		avoid_reset = FALSE;
916 #endif
917 	int             error = 0;
918 
919 	switch (command) {
920 
921         case SIOCSIFADDR:
922 #ifdef INET
923 		if (ifa->ifa_addr->sa_family == AF_INET)
924 			avoid_reset = TRUE;
925 #endif
926 #ifdef INET6
927 		if (ifa->ifa_addr->sa_family == AF_INET6)
928 			avoid_reset = TRUE;
929 #endif
930 #if defined(INET) || defined(INET6)
931 		/*
932 		** Calling init results in link renegotiation,
933 		** so we avoid doing it when possible.
934 		*/
935 		if (avoid_reset) {
936 			ifp->if_flags |= IFF_UP;
937 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
938 				ixl_init(pf);
939 #ifdef INET
940 			if (!(ifp->if_flags & IFF_NOARP))
941 				arp_ifinit(ifp, ifa);
942 #endif
943 		} else
944 			error = ether_ioctl(ifp, command, data);
945 		break;
946 #endif
947 	case SIOCSIFMTU:
948 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
949 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
950 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
951 			error = EINVAL;
952 		} else {
953 			IXL_PF_LOCK(pf);
954 			ifp->if_mtu = ifr->ifr_mtu;
955 			vsi->max_frame_size =
956 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
957 			    + ETHER_VLAN_ENCAP_LEN;
958 			ixl_init_locked(pf);
959 			IXL_PF_UNLOCK(pf);
960 		}
961 		break;
962 	case SIOCSIFFLAGS:
963 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
964 		IXL_PF_LOCK(pf);
965 		if (ifp->if_flags & IFF_UP) {
966 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
967 				if ((ifp->if_flags ^ pf->if_flags) &
968 				    (IFF_PROMISC | IFF_ALLMULTI)) {
969 					ixl_set_promisc(vsi);
970 				}
971 			} else
972 				ixl_init_locked(pf);
973 		} else
974 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
975 				ixl_stop(pf);
976 		pf->if_flags = ifp->if_flags;
977 		IXL_PF_UNLOCK(pf);
978 		break;
979 	case SIOCADDMULTI:
980 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
981 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982 			IXL_PF_LOCK(pf);
983 			ixl_disable_intr(vsi);
984 			ixl_add_multi(vsi);
985 			ixl_enable_intr(vsi);
986 			IXL_PF_UNLOCK(pf);
987 		}
988 		break;
989 	case SIOCDELMULTI:
990 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
991 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
992 			IXL_PF_LOCK(pf);
993 			ixl_disable_intr(vsi);
994 			ixl_del_multi(vsi);
995 			ixl_enable_intr(vsi);
996 			IXL_PF_UNLOCK(pf);
997 		}
998 		break;
999 	case SIOCSIFMEDIA:
1000 	case SIOCGIFMEDIA:
1001 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1002 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1003 		break;
1004 	case SIOCSIFCAP:
1005 	{
1006 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1007 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1008 
1009 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1010 
1011 		if (mask & IFCAP_RXCSUM)
1012 			ifp->if_capenable ^= IFCAP_RXCSUM;
1013 		if (mask & IFCAP_RXCSUM_IPV6)
1014 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1015 		if (mask & IFCAP_LRO)
1016 			ifp->if_capenable ^= IFCAP_LRO;
1017 		if (mask & IFCAP_VLAN_HWTAGGING)
1018 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1019 		if (mask & IFCAP_VLAN_HWFILTER)
1020 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1021 		if (mask & IFCAP_VLAN_HWTSO)
1022 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1023 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1024 			IXL_PF_LOCK(pf);
1025 			ixl_init_locked(pf);
1026 			IXL_PF_UNLOCK(pf);
1027 		}
1028 		VLAN_CAPABILITIES(ifp);
1029 
1030 		break;
1031 	}
1032 
1033 	default:
1034 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1035 		error = ether_ioctl(ifp, command, data);
1036 		break;
1037 	}
1038 
1039 	return (error);
1040 }
1041 
1042 
1043 /*********************************************************************
1044  *  Init entry point
1045  *
1046  *  This routine is used in two ways. It is used by the stack as
1047  *  init entry point in network interface structure. It is also used
1048  *  by the driver as a hw/sw initialization routine to get to a
1049  *  consistent state.
1050  *
1051  *  return 0 on success, positive on failure
1052  **********************************************************************/
1053 
1054 static void
1055 ixl_init_locked(struct ixl_pf *pf)
1056 {
1057 	struct i40e_hw	*hw = &pf->hw;
1058 	struct ixl_vsi	*vsi = &pf->vsi;
1059 	struct ifnet	*ifp = vsi->ifp;
1060 	device_t 	dev = pf->dev;
1061 	struct i40e_filter_control_settings	filter;
1062 	u8		tmpaddr[ETHER_ADDR_LEN];
1063 	int		ret;
1064 
1065 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1066 	INIT_DEBUGOUT("ixl_init: begin");
1067 	ixl_stop(pf);
1068 
1069 	/* Get the latest mac address... User might use a LAA */
1070 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1071 	      I40E_ETH_LENGTH_OF_ADDRESS);
1072 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1073 	    i40e_validate_mac_addr(tmpaddr)) {
1074 		bcopy(tmpaddr, hw->mac.addr,
1075 		    I40E_ETH_LENGTH_OF_ADDRESS);
1076 		ret = i40e_aq_mac_address_write(hw,
1077 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1078 		    hw->mac.addr, NULL);
1079 		if (ret) {
1080 			device_printf(dev, "LLA address"
1081 			 "change failed!!\n");
1082 			return;
1083 		}
1084 	}
1085 
1086 	/* Set the various hardware offload abilities */
1087 	ifp->if_hwassist = 0;
1088 	if (ifp->if_capenable & IFCAP_TSO)
1089 		ifp->if_hwassist |= CSUM_TSO;
1090 	if (ifp->if_capenable & IFCAP_TXCSUM)
1091 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1092 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1093 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1094 
1095 	/* Set up the device filtering */
1096 	bzero(&filter, sizeof(filter));
1097 	filter.enable_ethtype = TRUE;
1098 	filter.enable_macvlan = TRUE;
1099 #ifdef IXL_FDIR
1100 	filter.enable_fdir = TRUE;
1101 #endif
1102 	if (i40e_set_filter_control(hw, &filter))
1103 		device_printf(dev, "set_filter_control() failed\n");
1104 
1105 	/* Set up RSS */
1106 	ixl_config_rss(vsi);
1107 
1108 	/*
1109 	** Prepare the VSI: rings, hmc contexts, etc...
1110 	*/
1111 	if (ixl_initialize_vsi(vsi)) {
1112 		device_printf(dev, "initialize vsi failed!!\n");
1113 		return;
1114 	}
1115 
1116 	/* Add protocol filters to list */
1117 	ixl_init_filters(vsi);
1118 
1119 	/* Setup vlan's if needed */
1120 	ixl_setup_vlan_filters(vsi);
1121 
1122 	/* Start the local timer */
1123 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1124 
1125 	/* Set up MSI/X routing and the ITR settings */
1126 	if (ixl_enable_msix) {
1127 		ixl_configure_msix(pf);
1128 		ixl_configure_itr(pf);
1129 	} else
1130 		ixl_configure_legacy(pf);
1131 
1132 	ixl_enable_rings(vsi);
1133 
1134 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1135 
1136 	/* Set MTU in hardware*/
1137 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1138 	    TRUE, 0, NULL);
1139 	if (aq_error)
1140 		device_printf(vsi->dev,
1141 			"aq_set_mac_config in init error, code %d\n",
1142 		    aq_error);
1143 
1144 	/* And now turn on interrupts */
1145 	ixl_enable_intr(vsi);
1146 
1147 	/* Now inform the stack we're ready */
1148 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1149 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1150 
1151 	return;
1152 }
1153 
1154 static void
1155 ixl_init(void *arg)
1156 {
1157 	struct ixl_pf *pf = arg;
1158 
1159 	IXL_PF_LOCK(pf);
1160 	ixl_init_locked(pf);
1161 	IXL_PF_UNLOCK(pf);
1162 	return;
1163 }
1164 
1165 /*
1166 **
1167 ** MSIX Interrupt Handlers and Tasklets
1168 **
1169 */
1170 static void
1171 ixl_handle_que(void *context, int pending)
1172 {
1173 	struct ixl_queue *que = context;
1174 	struct ixl_vsi *vsi = que->vsi;
1175 	struct i40e_hw  *hw = vsi->hw;
1176 	struct tx_ring  *txr = &que->txr;
1177 	struct ifnet    *ifp = vsi->ifp;
1178 	bool		more;
1179 
1180 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1181 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1182 		IXL_TX_LOCK(txr);
1183 		ixl_txeof(que);
1184 		if (!drbr_empty(ifp, txr->br))
1185 			ixl_mq_start_locked(ifp, txr);
1186 		IXL_TX_UNLOCK(txr);
1187 		if (more) {
1188 			taskqueue_enqueue(que->tq, &que->task);
1189 			return;
1190 		}
1191 	}
1192 
1193 	/* Reenable this interrupt - hmmm */
1194 	ixl_enable_queue(hw, que->me);
1195 	return;
1196 }
1197 
1198 
1199 /*********************************************************************
1200  *
1201  *  Legacy Interrupt Service routine
1202  *
1203  **********************************************************************/
1204 void
1205 ixl_intr(void *arg)
1206 {
1207 	struct ixl_pf		*pf = arg;
1208 	struct i40e_hw		*hw =  &pf->hw;
1209 	struct ixl_vsi		*vsi = &pf->vsi;
1210 	struct ixl_queue	*que = vsi->queues;
1211 	struct ifnet		*ifp = vsi->ifp;
1212 	struct tx_ring		*txr = &que->txr;
1213         u32			reg, icr0, mask;
1214 	bool			more_tx, more_rx;
1215 
1216 	++que->irqs;
1217 
1218 	/* Protect against spurious interrupts */
1219 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1220 		return;
1221 
1222 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1223 
1224 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1225 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1226 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1227 
1228         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1229 
1230 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1231 		taskqueue_enqueue(pf->tq, &pf->adminq);
1232 		return;
1233 	}
1234 
1235 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1236 
1237 	IXL_TX_LOCK(txr);
1238 	more_tx = ixl_txeof(que);
1239 	if (!drbr_empty(vsi->ifp, txr->br))
1240 		more_tx = 1;
1241 	IXL_TX_UNLOCK(txr);
1242 
1243 	/* re-enable other interrupt causes */
1244 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1245 
1246 	/* And now the queues */
1247 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1248 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1249 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1250 
1251 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1252 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1253 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1254 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1255 
1256 	ixl_enable_legacy(hw);
1257 
1258 	return;
1259 }
1260 
1261 
1262 /*********************************************************************
1263  *
1264  *  MSIX VSI Interrupt Service routine
1265  *
1266  **********************************************************************/
1267 void
1268 ixl_msix_que(void *arg)
1269 {
1270 	struct ixl_queue	*que = arg;
1271 	struct ixl_vsi	*vsi = que->vsi;
1272 	struct i40e_hw	*hw = vsi->hw;
1273 	struct tx_ring	*txr = &que->txr;
1274 	bool		more_tx, more_rx;
1275 
1276 	/* Protect against spurious interrupts */
1277 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1278 		return;
1279 
1280 	++que->irqs;
1281 
1282 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1283 
1284 	IXL_TX_LOCK(txr);
1285 	more_tx = ixl_txeof(que);
1286 	/*
1287 	** Make certain that if the stack
1288 	** has anything queued the task gets
1289 	** scheduled to handle it.
1290 	*/
1291 	if (!drbr_empty(vsi->ifp, txr->br))
1292 		more_tx = 1;
1293 	IXL_TX_UNLOCK(txr);
1294 
1295 	ixl_set_queue_rx_itr(que);
1296 	ixl_set_queue_tx_itr(que);
1297 
1298 	if (more_tx || more_rx)
1299 		taskqueue_enqueue(que->tq, &que->task);
1300 	else
1301 		ixl_enable_queue(hw, que->me);
1302 
1303 	return;
1304 }
1305 
1306 
1307 /*********************************************************************
1308  *
1309  *  MSIX Admin Queue Interrupt Service routine
1310  *
1311  **********************************************************************/
1312 static void
1313 ixl_msix_adminq(void *arg)
1314 {
1315 	struct ixl_pf	*pf = arg;
1316 	struct i40e_hw	*hw = &pf->hw;
1317 	u32		reg, mask;
1318 
1319 	++pf->admin_irq;
1320 
1321 	reg = rd32(hw, I40E_PFINT_ICR0);
1322 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1323 
1324 	/* Check on the cause */
1325 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1326 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1327 
1328 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1329 		ixl_handle_mdd_event(pf);
1330 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1331 	}
1332 
1333 	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1334 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1335 
1336 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1337 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1338 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1339 
1340 	taskqueue_enqueue(pf->tq, &pf->adminq);
1341 	return;
1342 }
1343 
1344 /*********************************************************************
1345  *
1346  *  Media Ioctl callback
1347  *
1348  *  This routine is called whenever the user queries the status of
1349  *  the interface using ifconfig.
1350  *
1351  **********************************************************************/
1352 static void
1353 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1354 {
1355 	struct ixl_vsi	*vsi = ifp->if_softc;
1356 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1357 	struct i40e_hw  *hw = &pf->hw;
1358 
1359 	INIT_DEBUGOUT("ixl_media_status: begin");
1360 	IXL_PF_LOCK(pf);
1361 
1362 	ixl_update_link_status(pf);
1363 
1364 	ifmr->ifm_status = IFM_AVALID;
1365 	ifmr->ifm_active = IFM_ETHER;
1366 
1367 	if (!vsi->link_up) {
1368 		IXL_PF_UNLOCK(pf);
1369 		return;
1370 	}
1371 
1372 	ifmr->ifm_status |= IFM_ACTIVE;
1373 	/* Hardware is always full-duplex */
1374 	ifmr->ifm_active |= IFM_FDX;
1375 
1376 	switch (hw->phy.link_info.phy_type) {
1377 		/* 100 M */
1378 		case I40E_PHY_TYPE_100BASE_TX:
1379 			ifmr->ifm_active |= IFM_100_TX;
1380 			break;
1381 		/* 1 G */
1382 		case I40E_PHY_TYPE_1000BASE_T:
1383 			ifmr->ifm_active |= IFM_1000_T;
1384 			break;
1385 		case I40E_PHY_TYPE_1000BASE_SX:
1386 			ifmr->ifm_active |= IFM_1000_SX;
1387 			break;
1388 		case I40E_PHY_TYPE_1000BASE_LX:
1389 			ifmr->ifm_active |= IFM_1000_LX;
1390 			break;
1391 		/* 10 G */
1392 		case I40E_PHY_TYPE_10GBASE_CR1:
1393 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1394 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1395 		/* Using this until a real KR media type */
1396 		case I40E_PHY_TYPE_10GBASE_KR:
1397 		case I40E_PHY_TYPE_10GBASE_KX4:
1398 			ifmr->ifm_active |= IFM_10G_TWINAX;
1399 			break;
1400 		case I40E_PHY_TYPE_10GBASE_SR:
1401 			ifmr->ifm_active |= IFM_10G_SR;
1402 			break;
1403 		case I40E_PHY_TYPE_10GBASE_LR:
1404 			ifmr->ifm_active |= IFM_10G_LR;
1405 			break;
1406 		case I40E_PHY_TYPE_10GBASE_T:
1407 			ifmr->ifm_active |= IFM_10G_T;
1408 			break;
1409 		/* 40 G */
1410 		case I40E_PHY_TYPE_40GBASE_CR4:
1411 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1412 			ifmr->ifm_active |= IFM_40G_CR4;
1413 			break;
1414 		case I40E_PHY_TYPE_40GBASE_SR4:
1415 			ifmr->ifm_active |= IFM_40G_SR4;
1416 			break;
1417 		case I40E_PHY_TYPE_40GBASE_LR4:
1418 			ifmr->ifm_active |= IFM_40G_LR4;
1419 			break;
1420 		/*
1421 		** Set these to CR4 because OS does not
1422 		** have types available yet.
1423 		*/
1424 		case I40E_PHY_TYPE_40GBASE_KR4:
1425 		case I40E_PHY_TYPE_XLAUI:
1426 		case I40E_PHY_TYPE_XLPPI:
1427 		case I40E_PHY_TYPE_40GBASE_AOC:
1428 			ifmr->ifm_active |= IFM_40G_CR4;
1429 			break;
1430 		default:
1431 			ifmr->ifm_active |= IFM_UNKNOWN;
1432 			break;
1433 	}
1434 	/* Report flow control status as well */
1435 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1436 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1437 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1438 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1439 
1440 	IXL_PF_UNLOCK(pf);
1441 
1442 	return;
1443 }
1444 
1445 /*********************************************************************
1446  *
1447  *  Media Ioctl callback
1448  *
1449  *  This routine is called when the user changes speed/duplex using
1450  *  media/mediopt option with ifconfig.
1451  *
1452  **********************************************************************/
1453 static int
1454 ixl_media_change(struct ifnet * ifp)
1455 {
1456 	struct ixl_vsi *vsi = ifp->if_softc;
1457 	struct ifmedia *ifm = &vsi->media;
1458 
1459 	INIT_DEBUGOUT("ixl_media_change: begin");
1460 
1461 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1462 		return (EINVAL);
1463 
1464 	if_printf(ifp, "Media change is currently not supported.\n");
1465 
1466 	return (ENODEV);
1467 }
1468 
1469 
1470 #ifdef IXL_FDIR
1471 /*
1472 ** ATR: Application Targetted Receive - creates a filter
1473 **	based on TX flow info that will keep the receive
1474 **	portion of the flow on the same queue. Based on the
1475 **	implementation this is only available for TCP connections
1476 */
1477 void
1478 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1479 {
1480 	struct ixl_vsi			*vsi = que->vsi;
1481 	struct tx_ring			*txr = &que->txr;
1482 	struct i40e_filter_program_desc	*FDIR;
1483 	u32				ptype, dtype;
1484 	int				idx;
1485 
1486 	/* check if ATR is enabled and sample rate */
1487 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1488 		return;
1489 	/*
1490 	** We sample all TCP SYN/FIN packets,
1491 	** or at the selected sample rate
1492 	*/
1493 	txr->atr_count++;
1494 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1495 	    (txr->atr_count < txr->atr_rate))
1496                 return;
1497 	txr->atr_count = 0;
1498 
1499 	/* Get a descriptor to use */
1500 	idx = txr->next_avail;
1501 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1502 	if (++idx == que->num_desc)
1503 		idx = 0;
1504 	txr->avail--;
1505 	txr->next_avail = idx;
1506 
1507 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1508 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1509 
1510 	ptype |= (etype == ETHERTYPE_IP) ?
1511 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1512 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1513 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1514 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1515 
1516 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1517 
1518 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1519 
1520 	/*
1521 	** We use the TCP TH_FIN as a trigger to remove
1522 	** the filter, otherwise its an update.
1523 	*/
1524 	dtype |= (th->th_flags & TH_FIN) ?
1525 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1526 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1527 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1528 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1529 
1530 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1531 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1532 
1533 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1534 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1535 
1536 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1537 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1538 	return;
1539 }
1540 #endif
1541 
1542 
1543 static void
1544 ixl_set_promisc(struct ixl_vsi *vsi)
1545 {
1546 	struct ifnet	*ifp = vsi->ifp;
1547 	struct i40e_hw	*hw = vsi->hw;
1548 	int		err, mcnt = 0;
1549 	bool		uni = FALSE, multi = FALSE;
1550 
1551 	if (ifp->if_flags & IFF_ALLMULTI)
1552                 multi = TRUE;
1553 	else { /* Need to count the multicast addresses */
1554 		struct  ifmultiaddr *ifma;
1555 		if_maddr_rlock(ifp);
1556 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1557                         if (ifma->ifma_addr->sa_family != AF_LINK)
1558                                 continue;
1559                         if (mcnt == MAX_MULTICAST_ADDR)
1560                                 break;
1561                         mcnt++;
1562 		}
1563 		if_maddr_runlock(ifp);
1564 	}
1565 
1566 	if (mcnt >= MAX_MULTICAST_ADDR)
1567                 multi = TRUE;
1568         if (ifp->if_flags & IFF_PROMISC)
1569 		uni = TRUE;
1570 
1571 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1572 	    vsi->seid, uni, NULL);
1573 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1574 	    vsi->seid, multi, NULL);
1575 	return;
1576 }
1577 
1578 /*********************************************************************
1579  * 	Filter Routines
1580  *
1581  *	Routines for multicast and vlan filter management.
1582  *
1583  *********************************************************************/
1584 static void
1585 ixl_add_multi(struct ixl_vsi *vsi)
1586 {
1587 	struct	ifmultiaddr	*ifma;
1588 	struct ifnet		*ifp = vsi->ifp;
1589 	struct i40e_hw		*hw = vsi->hw;
1590 	int			mcnt = 0, flags;
1591 
1592 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1593 
1594 	if_maddr_rlock(ifp);
1595 	/*
1596 	** First just get a count, to decide if we
1597 	** we simply use multicast promiscuous.
1598 	*/
1599 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1600 		if (ifma->ifma_addr->sa_family != AF_LINK)
1601 			continue;
1602 		mcnt++;
1603 	}
1604 	if_maddr_runlock(ifp);
1605 
1606 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1607 		/* delete existing MC filters */
1608 		ixl_del_hw_filters(vsi, mcnt);
1609 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1610 		    vsi->seid, TRUE, NULL);
1611 		return;
1612 	}
1613 
1614 	mcnt = 0;
1615 	if_maddr_rlock(ifp);
1616 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1617 		if (ifma->ifma_addr->sa_family != AF_LINK)
1618 			continue;
1619 		ixl_add_mc_filter(vsi,
1620 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1621 		mcnt++;
1622 	}
1623 	if_maddr_runlock(ifp);
1624 	if (mcnt > 0) {
1625 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1626 		ixl_add_hw_filters(vsi, flags, mcnt);
1627 	}
1628 
1629 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1630 	return;
1631 }
1632 
1633 static void
1634 ixl_del_multi(struct ixl_vsi *vsi)
1635 {
1636 	struct ifnet		*ifp = vsi->ifp;
1637 	struct ifmultiaddr	*ifma;
1638 	struct ixl_mac_filter	*f;
1639 	int			mcnt = 0;
1640 	bool		match = FALSE;
1641 
1642 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1643 
1644 	/* Search for removed multicast addresses */
1645 	if_maddr_rlock(ifp);
1646 	SLIST_FOREACH(f, &vsi->ftl, next) {
1647 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1648 			match = FALSE;
1649 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1650 				if (ifma->ifma_addr->sa_family != AF_LINK)
1651 					continue;
1652 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1653 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1654 					match = TRUE;
1655 					break;
1656 				}
1657 			}
1658 			if (match == FALSE) {
1659 				f->flags |= IXL_FILTER_DEL;
1660 				mcnt++;
1661 			}
1662 		}
1663 	}
1664 	if_maddr_runlock(ifp);
1665 
1666 	if (mcnt > 0)
1667 		ixl_del_hw_filters(vsi, mcnt);
1668 }
1669 
1670 
1671 /*********************************************************************
1672  *  Timer routine
1673  *
1674  *  This routine checks for link status,updates statistics,
1675  *  and runs the watchdog check.
1676  *
1677  **********************************************************************/
1678 
1679 static void
1680 ixl_local_timer(void *arg)
1681 {
1682 	struct ixl_pf		*pf = arg;
1683 	struct i40e_hw		*hw = &pf->hw;
1684 	struct ixl_vsi		*vsi = &pf->vsi;
1685 	struct ixl_queue	*que = vsi->queues;
1686 	device_t		dev = pf->dev;
1687 	int			hung = 0;
1688 	u32			mask;
1689 
1690 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1691 
1692 	/* Fire off the adminq task */
1693 	taskqueue_enqueue(pf->tq, &pf->adminq);
1694 
1695 	/* Update stats */
1696 	ixl_update_stats_counters(pf);
1697 
1698 	/*
1699 	** Check status of the queues
1700 	*/
1701 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1702 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1703 
1704 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1705 		/* Any queues with outstanding work get a sw irq */
1706 		if (que->busy)
1707 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1708 		/*
1709 		** Each time txeof runs without cleaning, but there
1710 		** are uncleaned descriptors it increments busy. If
1711 		** we get to 5 we declare it hung.
1712 		*/
1713 		if (que->busy == IXL_QUEUE_HUNG) {
1714 			++hung;
1715 			/* Mark the queue as inactive */
1716 			vsi->active_queues &= ~((u64)1 << que->me);
1717 			continue;
1718 		} else {
1719 			/* Check if we've come back from hung */
1720 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1721 				vsi->active_queues |= ((u64)1 << que->me);
1722 		}
1723 		if (que->busy >= IXL_MAX_TX_BUSY) {
1724 #ifdef IXL_DEBUG
1725 			device_printf(dev,"Warning queue %d "
1726 			    "appears to be hung!\n", i);
1727 #endif
1728 			que->busy = IXL_QUEUE_HUNG;
1729 			++hung;
1730 		}
1731 	}
1732 	/* Only reinit if all queues show hung */
1733 	if (hung == vsi->num_queues)
1734 		goto hung;
1735 
1736 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1737 	return;
1738 
1739 hung:
1740 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1741 	ixl_init_locked(pf);
1742 }
1743 
1744 /*
1745 ** Note: this routine updates the OS on the link state
1746 **	the real check of the hardware only happens with
1747 **	a link interrupt.
1748 */
1749 static void
1750 ixl_update_link_status(struct ixl_pf *pf)
1751 {
1752 	struct ixl_vsi		*vsi = &pf->vsi;
1753 	struct i40e_hw		*hw = &pf->hw;
1754 	struct ifnet		*ifp = vsi->ifp;
1755 	device_t		dev = pf->dev;
1756 
1757 
1758 	if (vsi->link_up){
1759 		if (vsi->link_active == FALSE) {
1760 			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1761 			pf->fc = hw->fc.current_mode;
1762 			if (bootverbose) {
1763 				device_printf(dev,"Link is up %d Gbps %s,"
1764 				    " Flow Control: %s\n",
1765 				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1766 				    "Full Duplex", ixl_fc_string[pf->fc]);
1767 			}
1768 			vsi->link_active = TRUE;
1769 			/*
1770 			** Warn user if link speed on NPAR enabled
1771 			** partition is not at least 10GB
1772 			*/
1773 			if (hw->func_caps.npar_enable &&
1774 			   (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1775 			   hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1776 				device_printf(dev, "The partition detected link"
1777 				    "speed that is less than 10Gbps\n");
1778 			if_link_state_change(ifp, LINK_STATE_UP);
1779 		}
1780 	} else { /* Link down */
1781 		if (vsi->link_active == TRUE) {
1782 			if (bootverbose)
1783 				device_printf(dev,"Link is Down\n");
1784 			if_link_state_change(ifp, LINK_STATE_DOWN);
1785 			vsi->link_active = FALSE;
1786 		}
1787 	}
1788 
1789 	return;
1790 }
1791 
1792 /*********************************************************************
1793  *
1794  *  This routine disables all traffic on the adapter by issuing a
1795  *  global reset on the MAC and deallocates TX/RX buffers.
1796  *
1797  **********************************************************************/
1798 
1799 static void
1800 ixl_stop(struct ixl_pf *pf)
1801 {
1802 	struct ixl_vsi	*vsi = &pf->vsi;
1803 	struct ifnet	*ifp = vsi->ifp;
1804 
1805 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1806 
1807 	INIT_DEBUGOUT("ixl_stop: begin\n");
1808 	ixl_disable_intr(vsi);
1809 	ixl_disable_rings(vsi);
1810 
1811 	/* Tell the stack that the interface is no longer active */
1812 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1813 
1814 	/* Stop the local timer */
1815 	callout_stop(&pf->timer);
1816 
1817 	return;
1818 }
1819 
1820 
1821 /*********************************************************************
1822  *
1823  *  Setup MSIX Interrupt resources and handlers for the VSI
1824  *
1825  **********************************************************************/
1826 static int
1827 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1828 {
1829 	device_t        dev = pf->dev;
1830 	struct 		ixl_vsi *vsi = &pf->vsi;
1831 	struct		ixl_queue *que = vsi->queues;
1832 	int 		error, rid = 0;
1833 
1834 	if (pf->msix == 1)
1835 		rid = 1;
1836 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1837 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1838 	if (pf->res == NULL) {
1839 		device_printf(dev,"Unable to allocate"
1840 		    " bus resource: vsi legacy/msi interrupt\n");
1841 		return (ENXIO);
1842 	}
1843 
1844 	/* Set the handler function */
1845 	error = bus_setup_intr(dev, pf->res,
1846 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1847 	    ixl_intr, pf, &pf->tag);
1848 	if (error) {
1849 		pf->res = NULL;
1850 		device_printf(dev, "Failed to register legacy/msi handler");
1851 		return (error);
1852 	}
1853 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1854 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1855 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1856 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1857 	    taskqueue_thread_enqueue, &que->tq);
1858 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1859 	    device_get_nameunit(dev));
1860 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1861 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1862 	    taskqueue_thread_enqueue, &pf->tq);
1863 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1864 	    device_get_nameunit(dev));
1865 
1866 	return (0);
1867 }
1868 
1869 
1870 /*********************************************************************
1871  *
1872  *  Setup MSIX Interrupt resources and handlers for the VSI
1873  *
1874  **********************************************************************/
1875 static int
1876 ixl_assign_vsi_msix(struct ixl_pf *pf)
1877 {
1878 	device_t	dev = pf->dev;
1879 	struct 		ixl_vsi *vsi = &pf->vsi;
1880 	struct 		ixl_queue *que = vsi->queues;
1881 	struct		tx_ring	 *txr;
1882 	int 		error, rid, vector = 0;
1883 #ifdef	RSS
1884 	cpuset_t cpu_mask;
1885 #endif
1886 
1887 	/* Admin Que is vector 0*/
1888 	rid = vector + 1;
1889 	pf->res = bus_alloc_resource_any(dev,
1890     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1891 	if (!pf->res) {
1892 		device_printf(dev,"Unable to allocate"
1893     	    " bus resource: Adminq interrupt [%d]\n", rid);
1894 		return (ENXIO);
1895 	}
1896 	/* Set the adminq vector and handler */
1897 	error = bus_setup_intr(dev, pf->res,
1898 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1899 	    ixl_msix_adminq, pf, &pf->tag);
1900 	if (error) {
1901 		pf->res = NULL;
1902 		device_printf(dev, "Failed to register Admin que handler");
1903 		return (error);
1904 	}
1905 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1906 	pf->admvec = vector;
1907 	/* Tasklet for Admin Queue */
1908 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1909 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1910 	    taskqueue_thread_enqueue, &pf->tq);
1911 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1912 	    device_get_nameunit(pf->dev));
1913 	++vector;
1914 
1915 	/* Now set up the stations */
1916 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1917 		int cpu_id = i;
1918 		rid = vector + 1;
1919 		txr = &que->txr;
1920 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1921 		    RF_SHAREABLE | RF_ACTIVE);
1922 		if (que->res == NULL) {
1923 			device_printf(dev,"Unable to allocate"
1924 		    	    " bus resource: que interrupt [%d]\n", vector);
1925 			return (ENXIO);
1926 		}
1927 		/* Set the handler function */
1928 		error = bus_setup_intr(dev, que->res,
1929 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1930 		    ixl_msix_que, que, &que->tag);
1931 		if (error) {
1932 			que->res = NULL;
1933 			device_printf(dev, "Failed to register que handler");
1934 			return (error);
1935 		}
1936 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1937 		/* Bind the vector to a CPU */
1938 #ifdef RSS
1939 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1940 #endif
1941 		bus_bind_intr(dev, que->res, cpu_id);
1942 		que->msix = vector;
1943 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1944 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1945 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1946 		    taskqueue_thread_enqueue, &que->tq);
1947 #ifdef RSS
1948 		CPU_SETOF(cpu_id, &cpu_mask);
1949 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1950 		    &cpu_mask, "%s (bucket %d)",
1951 		    device_get_nameunit(dev), cpu_id);
1952 #else
1953 		taskqueue_start_threads(&que->tq, 1, PI_NET,
1954 		    "%s que", device_get_nameunit(dev));
1955 #endif
1956 	}
1957 
1958 	return (0);
1959 }
1960 
1961 
1962 /*
1963  * Allocate MSI/X vectors
1964  */
1965 static int
1966 ixl_init_msix(struct ixl_pf *pf)
1967 {
1968 	device_t dev = pf->dev;
1969 	int rid, want, vectors, queues, available;
1970 
1971 	/* Override by tuneable */
1972 	if (ixl_enable_msix == 0)
1973 		goto msi;
1974 
1975 	/*
1976 	** When used in a virtualized environment
1977 	** PCI BUSMASTER capability may not be set
1978 	** so explicity set it here and rewrite
1979 	** the ENABLE in the MSIX control register
1980 	** at this point to cause the host to
1981 	** successfully initialize us.
1982 	*/
1983 	{
1984 		u16 pci_cmd_word;
1985 		int msix_ctrl;
1986 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1987 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1988 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1989 		pci_find_cap(dev, PCIY_MSIX, &rid);
1990 		rid += PCIR_MSIX_CTRL;
1991 		msix_ctrl = pci_read_config(dev, rid, 2);
1992 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1993 		pci_write_config(dev, rid, msix_ctrl, 2);
1994 	}
1995 
1996 	/* First try MSI/X */
1997 	rid = PCIR_BAR(IXL_BAR);
1998 	pf->msix_mem = bus_alloc_resource_any(dev,
1999 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2000        	if (!pf->msix_mem) {
2001 		/* May not be enabled */
2002 		device_printf(pf->dev,
2003 		    "Unable to map MSIX table \n");
2004 		goto msi;
2005 	}
2006 
2007 	available = pci_msix_count(dev);
2008 	if (available == 0) { /* system has msix disabled */
2009 		bus_release_resource(dev, SYS_RES_MEMORY,
2010 		    rid, pf->msix_mem);
2011 		pf->msix_mem = NULL;
2012 		goto msi;
2013 	}
2014 
2015 	/* Figure out a reasonable auto config value */
2016 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2017 
2018 	/* Override with hardcoded value if sane */
2019 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2020 		queues = ixl_max_queues;
2021 
2022 #ifdef  RSS
2023 	/* If we're doing RSS, clamp at the number of RSS buckets */
2024 	if (queues > rss_getnumbuckets())
2025 		queues = rss_getnumbuckets();
2026 #endif
2027 
2028 	/*
2029 	** Want one vector (RX/TX pair) per queue
2030 	** plus an additional for the admin queue.
2031 	*/
2032 	want = queues + 1;
2033 	if (want <= available)	/* Have enough */
2034 		vectors = want;
2035 	else {
2036                	device_printf(pf->dev,
2037 		    "MSIX Configuration Problem, "
2038 		    "%d vectors available but %d wanted!\n",
2039 		    available, want);
2040 		return (0); /* Will go to Legacy setup */
2041 	}
2042 
2043 	if (pci_alloc_msix(dev, &vectors) == 0) {
2044                	device_printf(pf->dev,
2045 		    "Using MSIX interrupts with %d vectors\n", vectors);
2046 		pf->msix = vectors;
2047 		pf->vsi.num_queues = queues;
2048 #ifdef RSS
2049 		/*
2050 		 * If we're doing RSS, the number of queues needs to
2051 		 * match the number of RSS buckets that are configured.
2052 		 *
2053 		 * + If there's more queues than RSS buckets, we'll end
2054 		 *   up with queues that get no traffic.
2055 		 *
2056 		 * + If there's more RSS buckets than queues, we'll end
2057 		 *   up having multiple RSS buckets map to the same queue,
2058 		 *   so there'll be some contention.
2059 		 */
2060 		if (queues != rss_getnumbuckets()) {
2061 			device_printf(dev,
2062 			    "%s: queues (%d) != RSS buckets (%d)"
2063 			    "; performance will be impacted.\n",
2064 			    __func__, queues, rss_getnumbuckets());
2065 		}
2066 #endif
2067 		return (vectors);
2068 	}
2069 msi:
2070        	vectors = pci_msi_count(dev);
2071 	pf->vsi.num_queues = 1;
2072 	pf->msix = 1;
2073 	ixl_max_queues = 1;
2074 	ixl_enable_msix = 0;
2075        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2076                	device_printf(pf->dev,"Using an MSI interrupt\n");
2077 	else {
2078 		pf->msix = 0;
2079                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2080 	}
2081 	return (vectors);
2082 }
2083 
2084 
2085 /*
2086  * Plumb MSI/X vectors
2087  */
2088 static void
2089 ixl_configure_msix(struct ixl_pf *pf)
2090 {
2091 	struct i40e_hw	*hw = &pf->hw;
2092 	struct ixl_vsi *vsi = &pf->vsi;
2093 	u32		reg;
2094 	u16		vector = 1;
2095 
2096 	/* First set up the adminq - vector 0 */
2097 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2098 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2099 
2100 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2101 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2102 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2103 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2104 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2105 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2106 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2107 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2108 
2109 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2110 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2111 
2112 	wr32(hw, I40E_PFINT_DYN_CTL0,
2113 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2114 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2115 
2116 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2117 
2118 	/* Next configure the queues */
2119 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2120 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2121 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2122 
2123 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2124 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2125 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2126 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2127 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2128 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2129 
2130 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2131 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2132 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2133 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2134 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2135 		if (i == (vsi->num_queues - 1))
2136 			reg |= (IXL_QUEUE_EOL
2137 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2138 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2139 	}
2140 }
2141 
2142 /*
2143  * Configure for MSI single vector operation
2144  */
2145 static void
2146 ixl_configure_legacy(struct ixl_pf *pf)
2147 {
2148 	struct i40e_hw	*hw = &pf->hw;
2149 	u32		reg;
2150 
2151 
2152 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2153 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2154 
2155 
2156 	/* Setup "other" causes */
2157 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2158 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2159 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2160 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2161 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2162 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2163 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2164 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2165 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2166 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2167 	    ;
2168 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2169 
2170 	/* SW_ITR_IDX = 0, but don't change INTENA */
2171 	wr32(hw, I40E_PFINT_DYN_CTL0,
2172 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2173 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2174 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2175 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2176 
2177 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2178 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2179 
2180 	/* Associate the queue pair to the vector and enable the q int */
2181 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2182 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2183 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2184 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2185 
2186 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2187 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2188 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2189 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2190 
2191 	/* Next enable the queue pair */
2192 	reg = rd32(hw, I40E_QTX_ENA(0));
2193 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2194 	wr32(hw, I40E_QTX_ENA(0), reg);
2195 
2196 	reg = rd32(hw, I40E_QRX_ENA(0));
2197 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2198 	wr32(hw, I40E_QRX_ENA(0), reg);
2199 }
2200 
2201 
2202 /*
2203  * Set the Initial ITR state
2204  */
2205 static void
2206 ixl_configure_itr(struct ixl_pf *pf)
2207 {
2208 	struct i40e_hw		*hw = &pf->hw;
2209 	struct ixl_vsi		*vsi = &pf->vsi;
2210 	struct ixl_queue	*que = vsi->queues;
2211 
2212 	vsi->rx_itr_setting = ixl_rx_itr;
2213 	if (ixl_dynamic_rx_itr)
2214 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2215 	vsi->tx_itr_setting = ixl_tx_itr;
2216 	if (ixl_dynamic_tx_itr)
2217 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2218 
2219 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2220 		struct tx_ring	*txr = &que->txr;
2221 		struct rx_ring 	*rxr = &que->rxr;
2222 
2223 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2224 		    vsi->rx_itr_setting);
2225 		rxr->itr = vsi->rx_itr_setting;
2226 		rxr->latency = IXL_AVE_LATENCY;
2227 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2228 		    vsi->tx_itr_setting);
2229 		txr->itr = vsi->tx_itr_setting;
2230 		txr->latency = IXL_AVE_LATENCY;
2231 	}
2232 }
2233 
2234 
2235 static int
2236 ixl_allocate_pci_resources(struct ixl_pf *pf)
2237 {
2238 	int             rid;
2239 	device_t        dev = pf->dev;
2240 
2241 	rid = PCIR_BAR(0);
2242 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2243 	    &rid, RF_ACTIVE);
2244 
2245 	if (!(pf->pci_mem)) {
2246 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2247 		return (ENXIO);
2248 	}
2249 
2250 	pf->osdep.mem_bus_space_tag =
2251 		rman_get_bustag(pf->pci_mem);
2252 	pf->osdep.mem_bus_space_handle =
2253 		rman_get_bushandle(pf->pci_mem);
2254 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2255 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2256 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2257 
2258 	pf->hw.back = &pf->osdep;
2259 
2260 	/*
2261 	** Now setup MSI or MSI/X, should
2262 	** return us the number of supported
2263 	** vectors. (Will be 1 for MSI)
2264 	*/
2265 	pf->msix = ixl_init_msix(pf);
2266 	return (0);
2267 }
2268 
2269 static void
2270 ixl_free_pci_resources(struct ixl_pf * pf)
2271 {
2272 	struct ixl_vsi		*vsi = &pf->vsi;
2273 	struct ixl_queue	*que = vsi->queues;
2274 	device_t		dev = pf->dev;
2275 	int			rid, memrid;
2276 
2277 	memrid = PCIR_BAR(IXL_BAR);
2278 
2279 	/* We may get here before stations are setup */
2280 	if ((!ixl_enable_msix) || (que == NULL))
2281 		goto early;
2282 
2283 	/*
2284 	**  Release all msix VSI resources:
2285 	*/
2286 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2287 		rid = que->msix + 1;
2288 		if (que->tag != NULL) {
2289 			bus_teardown_intr(dev, que->res, que->tag);
2290 			que->tag = NULL;
2291 		}
2292 		if (que->res != NULL)
2293 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2294 	}
2295 
2296 early:
2297 	/* Clean the AdminQ interrupt last */
2298 	if (pf->admvec) /* we are doing MSIX */
2299 		rid = pf->admvec + 1;
2300 	else
2301 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2302 
2303 	if (pf->tag != NULL) {
2304 		bus_teardown_intr(dev, pf->res, pf->tag);
2305 		pf->tag = NULL;
2306 	}
2307 	if (pf->res != NULL)
2308 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2309 
2310 	if (pf->msix)
2311 		pci_release_msi(dev);
2312 
2313 	if (pf->msix_mem != NULL)
2314 		bus_release_resource(dev, SYS_RES_MEMORY,
2315 		    memrid, pf->msix_mem);
2316 
2317 	if (pf->pci_mem != NULL)
2318 		bus_release_resource(dev, SYS_RES_MEMORY,
2319 		    PCIR_BAR(0), pf->pci_mem);
2320 
2321 	return;
2322 }
2323 
2324 static void
2325 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2326 {
2327 	/* Display supported media types */
2328 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2329 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2330 
2331 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2332 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2333 
2334 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2335 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2336 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2337 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2338 	    phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2339 	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2340 	    phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2341 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2342 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2343 
2344 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2345 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2346 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2347 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2348 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2349 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2350 
2351 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2352 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2353 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2354 	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2355 	    phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2356 	    /* KR4 uses CR4 until the OS has the real media type */
2357 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2358 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2359 
2360 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2361 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2362 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2363 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2364 }
2365 
2366 /*********************************************************************
2367  *
2368  *  Setup networking device structure and register an interface.
2369  *
2370  **********************************************************************/
2371 static int
2372 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2373 {
2374 	struct ifnet		*ifp;
2375 	struct i40e_hw		*hw = vsi->hw;
2376 	struct ixl_queue	*que = vsi->queues;
2377 	struct i40e_aq_get_phy_abilities_resp abilities;
2378 	enum i40e_status_code aq_error = 0;
2379 
2380 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2381 
2382 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2383 	if (ifp == NULL) {
2384 		device_printf(dev, "can not allocate ifnet structure\n");
2385 		return (-1);
2386 	}
2387 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2388 	ifp->if_mtu = ETHERMTU;
2389 	ifp->if_baudrate = 4000000000;  // ??
2390 	ifp->if_init = ixl_init;
2391 	ifp->if_softc = vsi;
2392 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2393 	ifp->if_ioctl = ixl_ioctl;
2394 
2395 #if __FreeBSD_version >= 1100036
2396 	if_setgetcounterfn(ifp, ixl_get_counter);
2397 #endif
2398 
2399 	ifp->if_transmit = ixl_mq_start;
2400 
2401 	ifp->if_qflush = ixl_qflush;
2402 
2403 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2404 
2405 	vsi->max_frame_size =
2406 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2407 	    + ETHER_VLAN_ENCAP_LEN;
2408 
2409 	/*
2410 	 * Tell the upper layer(s) we support long frames.
2411 	 */
2412 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2413 
2414 	ifp->if_capabilities |= IFCAP_HWCSUM;
2415 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2416 	ifp->if_capabilities |= IFCAP_TSO;
2417 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2418 	ifp->if_capabilities |= IFCAP_LRO;
2419 
2420 	/* VLAN capabilties */
2421 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2422 			     |  IFCAP_VLAN_HWTSO
2423 			     |  IFCAP_VLAN_MTU
2424 			     |  IFCAP_VLAN_HWCSUM;
2425 	ifp->if_capenable = ifp->if_capabilities;
2426 
2427 	/*
2428 	** Don't turn this on by default, if vlans are
2429 	** created on another pseudo device (eg. lagg)
2430 	** then vlan events are not passed thru, breaking
2431 	** operation, but with HW FILTER off it works. If
2432 	** using vlans directly on the ixl driver you can
2433 	** enable this and get full hardware tag filtering.
2434 	*/
2435 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2436 
2437 	/*
2438 	 * Specify the media types supported by this adapter and register
2439 	 * callbacks to update media and link information
2440 	 */
2441 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2442 		     ixl_media_status);
2443 
2444 	aq_error = i40e_aq_get_phy_capabilities(hw,
2445 	    FALSE, TRUE, &abilities, NULL);
2446 	/* May need delay to detect fiber correctly */
2447 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2448 		i40e_msec_delay(200);
2449 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2450 		    TRUE, &abilities, NULL);
2451 	}
2452 	if (aq_error) {
2453 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2454 			device_printf(dev, "Unknown PHY type detected!\n");
2455 		else
2456 			device_printf(dev,
2457 			    "Error getting supported media types, err %d,"
2458 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2459 		return (0);
2460 	}
2461 
2462 	ixl_add_ifmedia(vsi, abilities.phy_type);
2463 
2464 	/* Use autoselect media by default */
2465 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2466 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2467 
2468 	ether_ifattach(ifp, hw->mac.addr);
2469 
2470 	return (0);
2471 }
2472 
2473 static bool
2474 ixl_config_link(struct i40e_hw *hw)
2475 {
2476 	bool check;
2477 
2478 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2479 	check = i40e_get_link_status(hw);
2480 #ifdef IXL_DEBUG
2481 	printf("Link is %s\n", check ? "up":"down");
2482 #endif
2483 	return (check);
2484 }
2485 
2486 /*********************************************************************
2487  *
2488  *  Get Firmware Switch configuration
2489  *	- this will need to be more robust when more complex
2490  *	  switch configurations are enabled.
2491  *
2492  **********************************************************************/
2493 static int
2494 ixl_switch_config(struct ixl_pf *pf)
2495 {
2496 	struct i40e_hw	*hw = &pf->hw;
2497 	struct ixl_vsi	*vsi = &pf->vsi;
2498 	device_t 	dev = vsi->dev;
2499 	struct i40e_aqc_get_switch_config_resp *sw_config;
2500 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2501 	int	ret = I40E_SUCCESS;
2502 	u16	next = 0;
2503 
2504 	memset(&aq_buf, 0, sizeof(aq_buf));
2505 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2506 	ret = i40e_aq_get_switch_config(hw, sw_config,
2507 	    sizeof(aq_buf), &next, NULL);
2508 	if (ret) {
2509 		device_printf(dev,"aq_get_switch_config failed!!\n");
2510 		return (ret);
2511 	}
2512 #ifdef IXL_DEBUG
2513 	printf("Switch config: header reported: %d in structure, %d total\n",
2514     	    sw_config->header.num_reported, sw_config->header.num_total);
2515 	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2516 	    sw_config->element[0].element_type,
2517 	    sw_config->element[0].seid,
2518 	    sw_config->element[0].uplink_seid,
2519 	    sw_config->element[0].downlink_seid);
2520 #endif
2521 	/* Simplified due to a single VSI at the moment */
2522 	vsi->seid = sw_config->element[0].seid;
2523 	return (ret);
2524 }
2525 
2526 /*********************************************************************
2527  *
2528  *  Initialize the VSI:  this handles contexts, which means things
2529  *  			 like the number of descriptors, buffer size,
2530  *			 plus we init the rings thru this function.
2531  *
2532  **********************************************************************/
2533 static int
2534 ixl_initialize_vsi(struct ixl_vsi *vsi)
2535 {
2536 	struct ixl_queue	*que = vsi->queues;
2537 	device_t		dev = vsi->dev;
2538 	struct i40e_hw		*hw = vsi->hw;
2539 	struct i40e_vsi_context	ctxt;
2540 	int			err = 0;
2541 
2542 	memset(&ctxt, 0, sizeof(ctxt));
2543 	ctxt.seid = vsi->seid;
2544 	ctxt.pf_num = hw->pf_id;
2545 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2546 	if (err) {
2547 		device_printf(dev,"get vsi params failed %x!!\n", err);
2548 		return (err);
2549 	}
2550 #ifdef IXL_DEBUG
2551 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2552 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2553 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2554 	    ctxt.uplink_seid, ctxt.vsi_number,
2555 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2556 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2557 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2558 #endif
2559 	/*
2560 	** Set the queue and traffic class bits
2561 	**  - when multiple traffic classes are supported
2562 	**    this will need to be more robust.
2563 	*/
2564 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2565 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2566 	ctxt.info.queue_mapping[0] = 0;
2567 	ctxt.info.tc_mapping[0] = 0x0800;
2568 
2569 	/* Set VLAN receive stripping mode */
2570 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2571 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2572 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2573 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2574 	else
2575 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2576 
2577 	/* Keep copy of VSI info in VSI for statistic counters */
2578 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2579 
2580 	/* Reset VSI statistics */
2581 	ixl_vsi_reset_stats(vsi);
2582 	vsi->hw_filters_add = 0;
2583 	vsi->hw_filters_del = 0;
2584 
2585 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2586 	if (err) {
2587 		device_printf(dev,"update vsi params failed %x!!\n",
2588 		   hw->aq.asq_last_status);
2589 		return (err);
2590 	}
2591 
2592 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2593 		struct tx_ring		*txr = &que->txr;
2594 		struct rx_ring 		*rxr = &que->rxr;
2595 		struct i40e_hmc_obj_txq tctx;
2596 		struct i40e_hmc_obj_rxq rctx;
2597 		u32			txctl;
2598 		u16			size;
2599 
2600 
2601 		/* Setup the HMC TX Context  */
2602 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2603 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2604 		tctx.new_context = 1;
2605 		tctx.base = (txr->dma.pa/128);
2606 		tctx.qlen = que->num_desc;
2607 		tctx.fc_ena = 0;
2608 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2609 		/* Enable HEAD writeback */
2610 		tctx.head_wb_ena = 1;
2611 		tctx.head_wb_addr = txr->dma.pa +
2612 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2613 		tctx.rdylist_act = 0;
2614 		err = i40e_clear_lan_tx_queue_context(hw, i);
2615 		if (err) {
2616 			device_printf(dev, "Unable to clear TX context\n");
2617 			break;
2618 		}
2619 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2620 		if (err) {
2621 			device_printf(dev, "Unable to set TX context\n");
2622 			break;
2623 		}
2624 		/* Associate the ring with this PF */
2625 		txctl = I40E_QTX_CTL_PF_QUEUE;
2626 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2627 		    I40E_QTX_CTL_PF_INDX_MASK);
2628 		wr32(hw, I40E_QTX_CTL(i), txctl);
2629 		ixl_flush(hw);
2630 
2631 		/* Do ring (re)init */
2632 		ixl_init_tx_ring(que);
2633 
2634 		/* Next setup the HMC RX Context  */
2635 		if (vsi->max_frame_size <= 2048)
2636 			rxr->mbuf_sz = MCLBYTES;
2637 		else
2638 			rxr->mbuf_sz = MJUMPAGESIZE;
2639 
2640 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2641 
2642 		/* Set up an RX context for the HMC */
2643 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2644 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2645 		/* ignore header split for now */
2646 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2647 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2648 		    vsi->max_frame_size : max_rxmax;
2649 		rctx.dtype = 0;
2650 		rctx.dsize = 1;	/* do 32byte descriptors */
2651 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2652 		rctx.base = (rxr->dma.pa/128);
2653 		rctx.qlen = que->num_desc;
2654 		rctx.tphrdesc_ena = 1;
2655 		rctx.tphwdesc_ena = 1;
2656 		rctx.tphdata_ena = 0;
2657 		rctx.tphhead_ena = 0;
2658 		rctx.lrxqthresh = 2;
2659 		rctx.crcstrip = 1;
2660 		rctx.l2tsel = 1;
2661 		rctx.showiv = 1;
2662 		rctx.fc_ena = 0;
2663 		rctx.prefena = 1;
2664 
2665 		err = i40e_clear_lan_rx_queue_context(hw, i);
2666 		if (err) {
2667 			device_printf(dev,
2668 			    "Unable to clear RX context %d\n", i);
2669 			break;
2670 		}
2671 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2672 		if (err) {
2673 			device_printf(dev, "Unable to set RX context %d\n", i);
2674 			break;
2675 		}
2676 		err = ixl_init_rx_ring(que);
2677 		if (err) {
2678 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2679 			break;
2680 		}
2681 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2682 #ifdef DEV_NETMAP
2683 		/* preserve queue */
2684 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2685 			struct netmap_adapter *na = NA(vsi->ifp);
2686 			struct netmap_kring *kring = &na->rx_rings[i];
2687 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2688 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2689 		} else
2690 #endif /* DEV_NETMAP */
2691 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2692 	}
2693 	return (err);
2694 }
2695 
2696 
2697 /*********************************************************************
2698  *
2699  *  Free all VSI structs.
2700  *
2701  **********************************************************************/
2702 void
2703 ixl_free_vsi(struct ixl_vsi *vsi)
2704 {
2705 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2706 	struct ixl_queue	*que = vsi->queues;
2707 	struct ixl_mac_filter *f;
2708 
2709 	/* Free station queues */
2710 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2711 		struct tx_ring *txr = &que->txr;
2712 		struct rx_ring *rxr = &que->rxr;
2713 
2714 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2715 			continue;
2716 		IXL_TX_LOCK(txr);
2717 		ixl_free_que_tx(que);
2718 		if (txr->base)
2719 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2720 		IXL_TX_UNLOCK(txr);
2721 		IXL_TX_LOCK_DESTROY(txr);
2722 
2723 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2724 			continue;
2725 		IXL_RX_LOCK(rxr);
2726 		ixl_free_que_rx(que);
2727 		if (rxr->base)
2728 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2729 		IXL_RX_UNLOCK(rxr);
2730 		IXL_RX_LOCK_DESTROY(rxr);
2731 
2732 	}
2733 	free(vsi->queues, M_DEVBUF);
2734 
2735 	/* Free VSI filter list */
2736 	while (!SLIST_EMPTY(&vsi->ftl)) {
2737 		f = SLIST_FIRST(&vsi->ftl);
2738 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2739 		free(f, M_DEVBUF);
2740 	}
2741 }
2742 
2743 
2744 /*********************************************************************
2745  *
2746  *  Allocate memory for the VSI (virtual station interface) and their
2747  *  associated queues, rings and the descriptors associated with each,
2748  *  called only once at attach.
2749  *
2750  **********************************************************************/
2751 static int
2752 ixl_setup_stations(struct ixl_pf *pf)
2753 {
2754 	device_t		dev = pf->dev;
2755 	struct ixl_vsi		*vsi;
2756 	struct ixl_queue	*que;
2757 	struct tx_ring		*txr;
2758 	struct rx_ring		*rxr;
2759 	int 			rsize, tsize;
2760 	int			error = I40E_SUCCESS;
2761 
2762 	vsi = &pf->vsi;
2763 	vsi->back = (void *)pf;
2764 	vsi->hw = &pf->hw;
2765 	vsi->id = 0;
2766 	vsi->num_vlans = 0;
2767 
2768 	/* Get memory for the station queues */
2769         if (!(vsi->queues =
2770             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2771             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2772                 device_printf(dev, "Unable to allocate queue memory\n");
2773                 error = ENOMEM;
2774                 goto early;
2775         }
2776 
2777 	for (int i = 0; i < vsi->num_queues; i++) {
2778 		que = &vsi->queues[i];
2779 		que->num_desc = ixl_ringsz;
2780 		que->me = i;
2781 		que->vsi = vsi;
2782 		/* mark the queue as active */
2783 		vsi->active_queues |= (u64)1 << que->me;
2784 		txr = &que->txr;
2785 		txr->que = que;
2786 		txr->tail = I40E_QTX_TAIL(que->me);
2787 
2788 		/* Initialize the TX lock */
2789 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2790 		    device_get_nameunit(dev), que->me);
2791 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2792 		/* Create the TX descriptor ring */
2793 		tsize = roundup2((que->num_desc *
2794 		    sizeof(struct i40e_tx_desc)) +
2795 		    sizeof(u32), DBA_ALIGN);
2796 		if (i40e_allocate_dma_mem(&pf->hw,
2797 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2798 			device_printf(dev,
2799 			    "Unable to allocate TX Descriptor memory\n");
2800 			error = ENOMEM;
2801 			goto fail;
2802 		}
2803 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2804 		bzero((void *)txr->base, tsize);
2805        		/* Now allocate transmit soft structs for the ring */
2806        		if (ixl_allocate_tx_data(que)) {
2807 			device_printf(dev,
2808 			    "Critical Failure setting up TX structures\n");
2809 			error = ENOMEM;
2810 			goto fail;
2811        		}
2812 		/* Allocate a buf ring */
2813 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2814 		    M_WAITOK, &txr->mtx);
2815 		if (txr->br == NULL) {
2816 			device_printf(dev,
2817 			    "Critical Failure setting up TX buf ring\n");
2818 			error = ENOMEM;
2819 			goto fail;
2820        		}
2821 
2822 		/*
2823 		 * Next the RX queues...
2824 		 */
2825 		rsize = roundup2(que->num_desc *
2826 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2827 		rxr = &que->rxr;
2828 		rxr->que = que;
2829 		rxr->tail = I40E_QRX_TAIL(que->me);
2830 
2831 		/* Initialize the RX side lock */
2832 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2833 		    device_get_nameunit(dev), que->me);
2834 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2835 
2836 		if (i40e_allocate_dma_mem(&pf->hw,
2837 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2838 			device_printf(dev,
2839 			    "Unable to allocate RX Descriptor memory\n");
2840 			error = ENOMEM;
2841 			goto fail;
2842 		}
2843 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2844 		bzero((void *)rxr->base, rsize);
2845 
2846         	/* Allocate receive soft structs for the ring*/
2847 		if (ixl_allocate_rx_data(que)) {
2848 			device_printf(dev,
2849 			    "Critical Failure setting up receive structs\n");
2850 			error = ENOMEM;
2851 			goto fail;
2852 		}
2853 	}
2854 
2855 	return (0);
2856 
2857 fail:
2858 	for (int i = 0; i < vsi->num_queues; i++) {
2859 		que = &vsi->queues[i];
2860 		rxr = &que->rxr;
2861 		txr = &que->txr;
2862 		if (rxr->base)
2863 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2864 		if (txr->base)
2865 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2866 	}
2867 
2868 early:
2869 	return (error);
2870 }
2871 
2872 /*
2873 ** Provide a update to the queue RX
2874 ** interrupt moderation value.
2875 */
2876 static void
2877 ixl_set_queue_rx_itr(struct ixl_queue *que)
2878 {
2879 	struct ixl_vsi	*vsi = que->vsi;
2880 	struct i40e_hw	*hw = vsi->hw;
2881 	struct rx_ring	*rxr = &que->rxr;
2882 	u16		rx_itr;
2883 	u16		rx_latency = 0;
2884 	int		rx_bytes;
2885 
2886 
2887 	/* Idle, do nothing */
2888 	if (rxr->bytes == 0)
2889 		return;
2890 
2891 	if (ixl_dynamic_rx_itr) {
2892 		rx_bytes = rxr->bytes/rxr->itr;
2893 		rx_itr = rxr->itr;
2894 
2895 		/* Adjust latency range */
2896 		switch (rxr->latency) {
2897 		case IXL_LOW_LATENCY:
2898 			if (rx_bytes > 10) {
2899 				rx_latency = IXL_AVE_LATENCY;
2900 				rx_itr = IXL_ITR_20K;
2901 			}
2902 			break;
2903 		case IXL_AVE_LATENCY:
2904 			if (rx_bytes > 20) {
2905 				rx_latency = IXL_BULK_LATENCY;
2906 				rx_itr = IXL_ITR_8K;
2907 			} else if (rx_bytes <= 10) {
2908 				rx_latency = IXL_LOW_LATENCY;
2909 				rx_itr = IXL_ITR_100K;
2910 			}
2911 			break;
2912 		case IXL_BULK_LATENCY:
2913 			if (rx_bytes <= 20) {
2914 				rx_latency = IXL_AVE_LATENCY;
2915 				rx_itr = IXL_ITR_20K;
2916 			}
2917 			break;
2918        		 }
2919 
2920 		rxr->latency = rx_latency;
2921 
2922 		if (rx_itr != rxr->itr) {
2923 			/* do an exponential smoothing */
2924 			rx_itr = (10 * rx_itr * rxr->itr) /
2925 			    ((9 * rx_itr) + rxr->itr);
2926 			rxr->itr = rx_itr & IXL_MAX_ITR;
2927 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2928 			    que->me), rxr->itr);
2929 		}
2930 	} else { /* We may have have toggled to non-dynamic */
2931 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2932 			vsi->rx_itr_setting = ixl_rx_itr;
2933 		/* Update the hardware if needed */
2934 		if (rxr->itr != vsi->rx_itr_setting) {
2935 			rxr->itr = vsi->rx_itr_setting;
2936 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2937 			    que->me), rxr->itr);
2938 		}
2939 	}
2940 	rxr->bytes = 0;
2941 	rxr->packets = 0;
2942 	return;
2943 }
2944 
2945 
2946 /*
2947 ** Provide a update to the queue TX
2948 ** interrupt moderation value.
2949 */
2950 static void
2951 ixl_set_queue_tx_itr(struct ixl_queue *que)
2952 {
2953 	struct ixl_vsi	*vsi = que->vsi;
2954 	struct i40e_hw	*hw = vsi->hw;
2955 	struct tx_ring	*txr = &que->txr;
2956 	u16		tx_itr;
2957 	u16		tx_latency = 0;
2958 	int		tx_bytes;
2959 
2960 
2961 	/* Idle, do nothing */
2962 	if (txr->bytes == 0)
2963 		return;
2964 
2965 	if (ixl_dynamic_tx_itr) {
2966 		tx_bytes = txr->bytes/txr->itr;
2967 		tx_itr = txr->itr;
2968 
2969 		switch (txr->latency) {
2970 		case IXL_LOW_LATENCY:
2971 			if (tx_bytes > 10) {
2972 				tx_latency = IXL_AVE_LATENCY;
2973 				tx_itr = IXL_ITR_20K;
2974 			}
2975 			break;
2976 		case IXL_AVE_LATENCY:
2977 			if (tx_bytes > 20) {
2978 				tx_latency = IXL_BULK_LATENCY;
2979 				tx_itr = IXL_ITR_8K;
2980 			} else if (tx_bytes <= 10) {
2981 				tx_latency = IXL_LOW_LATENCY;
2982 				tx_itr = IXL_ITR_100K;
2983 			}
2984 			break;
2985 		case IXL_BULK_LATENCY:
2986 			if (tx_bytes <= 20) {
2987 				tx_latency = IXL_AVE_LATENCY;
2988 				tx_itr = IXL_ITR_20K;
2989 			}
2990 			break;
2991 		}
2992 
2993 		txr->latency = tx_latency;
2994 
2995 		if (tx_itr != txr->itr) {
2996        	         /* do an exponential smoothing */
2997 			tx_itr = (10 * tx_itr * txr->itr) /
2998 			    ((9 * tx_itr) + txr->itr);
2999 			txr->itr = tx_itr & IXL_MAX_ITR;
3000 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3001 			    que->me), txr->itr);
3002 		}
3003 
3004 	} else { /* We may have have toggled to non-dynamic */
3005 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3006 			vsi->tx_itr_setting = ixl_tx_itr;
3007 		/* Update the hardware if needed */
3008 		if (txr->itr != vsi->tx_itr_setting) {
3009 			txr->itr = vsi->tx_itr_setting;
3010 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3011 			    que->me), txr->itr);
3012 		}
3013 	}
3014 	txr->bytes = 0;
3015 	txr->packets = 0;
3016 	return;
3017 }
3018 
3019 
3020 static void
3021 ixl_add_hw_stats(struct ixl_pf *pf)
3022 {
3023 	device_t dev = pf->dev;
3024 	struct ixl_vsi *vsi = &pf->vsi;
3025 	struct ixl_queue *queues = vsi->queues;
3026 	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3027 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3028 
3029 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3030 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3031 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3032 
3033 	struct sysctl_oid *vsi_node, *queue_node;
3034 	struct sysctl_oid_list *vsi_list, *queue_list;
3035 
3036 	struct tx_ring *txr;
3037 	struct rx_ring *rxr;
3038 
3039 	/* Driver statistics */
3040 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3041 			CTLFLAG_RD, &pf->watchdog_events,
3042 			"Watchdog timeouts");
3043 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3044 			CTLFLAG_RD, &pf->admin_irq,
3045 			"Admin Queue IRQ Handled");
3046 
3047 	/* VSI statistics */
3048 #define QUEUE_NAME_LEN 32
3049 	char queue_namebuf[QUEUE_NAME_LEN];
3050 
3051 	// ERJ: Only one vsi now, re-do when >1 VSI enabled
3052 	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3053 	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3054 				   CTLFLAG_RD, NULL, "VSI-specific stats");
3055 	vsi_list = SYSCTL_CHILDREN(vsi_node);
3056 
3057 	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3058 
3059 	/* Queue statistics */
3060 	for (int q = 0; q < vsi->num_queues; q++) {
3061 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3062 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3063 					     CTLFLAG_RD, NULL, "Queue #");
3064 		queue_list = SYSCTL_CHILDREN(queue_node);
3065 
3066 		txr = &(queues[q].txr);
3067 		rxr = &(queues[q].rxr);
3068 
3069 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3070 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3071 				"m_defrag() failed");
3072 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3073 				CTLFLAG_RD, &(queues[q].dropped_pkts),
3074 				"Driver dropped packets");
3075 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3076 				CTLFLAG_RD, &(queues[q].irqs),
3077 				"irqs on this queue");
3078 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3079 				CTLFLAG_RD, &(queues[q].tso),
3080 				"TSO");
3081 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3082 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3083 				"Driver tx dma failure in xmit");
3084 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3085 				CTLFLAG_RD, &(txr->no_desc),
3086 				"Queue No Descriptor Available");
3087 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3088 				CTLFLAG_RD, &(txr->total_packets),
3089 				"Queue Packets Transmitted");
3090 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3091 				CTLFLAG_RD, &(txr->tx_bytes),
3092 				"Queue Bytes Transmitted");
3093 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3094 				CTLFLAG_RD, &(rxr->rx_packets),
3095 				"Queue Packets Received");
3096 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3097 				CTLFLAG_RD, &(rxr->rx_bytes),
3098 				"Queue Bytes Received");
3099 	}
3100 
3101 	/* MAC stats */
3102 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3103 }
3104 
3105 static void
3106 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3107 	struct sysctl_oid_list *child,
3108 	struct i40e_eth_stats *eth_stats)
3109 {
3110 	struct ixl_sysctl_info ctls[] =
3111 	{
3112 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3113 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3114 			"Unicast Packets Received"},
3115 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3116 			"Multicast Packets Received"},
3117 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3118 			"Broadcast Packets Received"},
3119 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3120 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3121 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3122 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3123 			"Multicast Packets Transmitted"},
3124 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3125 			"Broadcast Packets Transmitted"},
3126 		// end
3127 		{0,0,0}
3128 	};
3129 
3130 	struct ixl_sysctl_info *entry = ctls;
3131 	while (entry->stat != 0)
3132 	{
3133 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3134 				CTLFLAG_RD, entry->stat,
3135 				entry->description);
3136 		entry++;
3137 	}
3138 }
3139 
3140 static void
3141 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3142 	struct sysctl_oid_list *child,
3143 	struct i40e_hw_port_stats *stats)
3144 {
3145 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3146 				    CTLFLAG_RD, NULL, "Mac Statistics");
3147 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3148 
3149 	struct i40e_eth_stats *eth_stats = &stats->eth;
3150 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3151 
3152 	struct ixl_sysctl_info ctls[] =
3153 	{
3154 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3155 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3156 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3157 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3158 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3159 		/* Packet Reception Stats */
3160 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3161 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3162 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3163 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3164 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3165 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3166 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3167 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3168 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3169 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3170 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3171 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3172 		/* Packet Transmission Stats */
3173 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3174 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3175 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3176 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3177 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3178 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3179 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3180 		/* Flow control */
3181 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3182 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3183 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3184 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3185 		/* End */
3186 		{0,0,0}
3187 	};
3188 
3189 	struct ixl_sysctl_info *entry = ctls;
3190 	while (entry->stat != 0)
3191 	{
3192 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3193 				CTLFLAG_RD, entry->stat,
3194 				entry->description);
3195 		entry++;
3196 	}
3197 }
3198 
3199 /*
3200 ** ixl_config_rss - setup RSS
3201 **  - note this is done for the single vsi
3202 */
3203 static void ixl_config_rss(struct ixl_vsi *vsi)
3204 {
3205 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3206 	struct i40e_hw	*hw = vsi->hw;
3207 	u32		lut = 0;
3208 	u64		set_hena = 0, hena;
3209 	int		i, j, que_id;
3210 #ifdef RSS
3211 	u32		rss_hash_config;
3212 	u32		rss_seed[IXL_KEYSZ];
3213 #else
3214 	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3215 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3216 			    0x35897377, 0x328b25e1, 0x4fa98922,
3217 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3218 #endif
3219 
3220 #ifdef RSS
3221         /* Fetch the configured RSS key */
3222         rss_getkey((uint8_t *) &rss_seed);
3223 #endif
3224 
3225 	/* Fill out hash function seed */
3226 	for (i = 0; i < IXL_KEYSZ; i++)
3227                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3228 
3229 	/* Enable PCTYPES for RSS: */
3230 #ifdef RSS
3231 	rss_hash_config = rss_gethashconfig();
3232 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3233                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3234 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3235                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3236 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3237                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3238 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3239                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3240 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3241 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3242 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3243                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3244         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3245                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3246 #else
3247 	set_hena =
3248 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3249 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3250 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3251 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3252 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3253 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3254 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3255 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3256 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3257 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3258 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3259 #endif
3260 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3261 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3262 	hena |= set_hena;
3263 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3264 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3265 
3266 	/* Populate the LUT with max no. of queues in round robin fashion */
3267 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3268 		if (j == vsi->num_queues)
3269 			j = 0;
3270 #ifdef RSS
3271 		/*
3272 		 * Fetch the RSS bucket id for the given indirection entry.
3273 		 * Cap it at the number of configured buckets (which is
3274 		 * num_queues.)
3275 		 */
3276 		que_id = rss_get_indirection_to_bucket(i);
3277 		que_id = que_id % vsi->num_queues;
3278 #else
3279 		que_id = j;
3280 #endif
3281 		/* lut = 4-byte sliding window of 4 lut entries */
3282 		lut = (lut << 8) | (que_id &
3283 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3284 		/* On i = 3, we have 4 entries in lut; write to the register */
3285 		if ((i & 3) == 3)
3286 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3287 	}
3288 	ixl_flush(hw);
3289 }
3290 
3291 
3292 /*
3293 ** This routine is run via an vlan config EVENT,
3294 ** it enables us to use the HW Filter table since
3295 ** we can get the vlan id. This just creates the
3296 ** entry in the soft version of the VFTA, init will
3297 ** repopulate the real table.
3298 */
3299 static void
3300 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3301 {
3302 	struct ixl_vsi	*vsi = ifp->if_softc;
3303 	struct i40e_hw	*hw = vsi->hw;
3304 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3305 
3306 	if (ifp->if_softc !=  arg)   /* Not our event */
3307 		return;
3308 
3309 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3310 		return;
3311 
3312 	IXL_PF_LOCK(pf);
3313 	++vsi->num_vlans;
3314 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3315 	IXL_PF_UNLOCK(pf);
3316 }
3317 
3318 /*
3319 ** This routine is run via an vlan
3320 ** unconfig EVENT, remove our entry
3321 ** in the soft vfta.
3322 */
3323 static void
3324 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3325 {
3326 	struct ixl_vsi	*vsi = ifp->if_softc;
3327 	struct i40e_hw	*hw = vsi->hw;
3328 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3329 
3330 	if (ifp->if_softc !=  arg)
3331 		return;
3332 
3333 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3334 		return;
3335 
3336 	IXL_PF_LOCK(pf);
3337 	--vsi->num_vlans;
3338 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3339 	IXL_PF_UNLOCK(pf);
3340 }
3341 
3342 /*
3343 ** This routine updates vlan filters, called by init
3344 ** it scans the filter table and then updates the hw
3345 ** after a soft reset.
3346 */
3347 static void
3348 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3349 {
3350 	struct ixl_mac_filter	*f;
3351 	int			cnt = 0, flags;
3352 
3353 	if (vsi->num_vlans == 0)
3354 		return;
3355 	/*
3356 	** Scan the filter list for vlan entries,
3357 	** mark them for addition and then call
3358 	** for the AQ update.
3359 	*/
3360 	SLIST_FOREACH(f, &vsi->ftl, next) {
3361 		if (f->flags & IXL_FILTER_VLAN) {
3362 			f->flags |=
3363 			    (IXL_FILTER_ADD |
3364 			    IXL_FILTER_USED);
3365 			cnt++;
3366 		}
3367 	}
3368 	if (cnt == 0) {
3369 		printf("setup vlan: no filters found!\n");
3370 		return;
3371 	}
3372 	flags = IXL_FILTER_VLAN;
3373 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3374 	ixl_add_hw_filters(vsi, flags, cnt);
3375 	return;
3376 }
3377 
3378 /*
3379 ** Initialize filter list and add filters that the hardware
3380 ** needs to know about.
3381 */
3382 static void
3383 ixl_init_filters(struct ixl_vsi *vsi)
3384 {
3385 	/* Add broadcast address */
3386 	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3387 	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3388 }
3389 
3390 /*
3391 ** This routine adds mulicast filters
3392 */
3393 static void
3394 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3395 {
3396 	struct ixl_mac_filter *f;
3397 
3398 	/* Does one already exist */
3399 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3400 	if (f != NULL)
3401 		return;
3402 
3403 	f = ixl_get_filter(vsi);
3404 	if (f == NULL) {
3405 		printf("WARNING: no filter available!!\n");
3406 		return;
3407 	}
3408 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3409 	f->vlan = IXL_VLAN_ANY;
3410 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3411 	    | IXL_FILTER_MC);
3412 
3413 	return;
3414 }
3415 
3416 /*
3417 ** This routine adds macvlan filters
3418 */
3419 static void
3420 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3421 {
3422 	struct ixl_mac_filter	*f, *tmp;
3423 	device_t		dev = vsi->dev;
3424 
3425 	DEBUGOUT("ixl_add_filter: begin");
3426 
3427 	/* Does one already exist */
3428 	f = ixl_find_filter(vsi, macaddr, vlan);
3429 	if (f != NULL)
3430 		return;
3431 	/*
3432 	** Is this the first vlan being registered, if so we
3433 	** need to remove the ANY filter that indicates we are
3434 	** not in a vlan, and replace that with a 0 filter.
3435 	*/
3436 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3437 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3438 		if (tmp != NULL) {
3439 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3440 			ixl_add_filter(vsi, macaddr, 0);
3441 		}
3442 	}
3443 
3444 	f = ixl_get_filter(vsi);
3445 	if (f == NULL) {
3446 		device_printf(dev, "WARNING: no filter available!!\n");
3447 		return;
3448 	}
3449 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3450 	f->vlan = vlan;
3451 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3452 	if (f->vlan != IXL_VLAN_ANY)
3453 		f->flags |= IXL_FILTER_VLAN;
3454 
3455 	ixl_add_hw_filters(vsi, f->flags, 1);
3456 	return;
3457 }
3458 
3459 static void
3460 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3461 {
3462 	struct ixl_mac_filter *f;
3463 
3464 	f = ixl_find_filter(vsi, macaddr, vlan);
3465 	if (f == NULL)
3466 		return;
3467 
3468 	f->flags |= IXL_FILTER_DEL;
3469 	ixl_del_hw_filters(vsi, 1);
3470 
3471 	/* Check if this is the last vlan removal */
3472 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3473 		/* Switch back to a non-vlan filter */
3474 		ixl_del_filter(vsi, macaddr, 0);
3475 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3476 	}
3477 	return;
3478 }
3479 
3480 /*
3481 ** Find the filter with both matching mac addr and vlan id
3482 */
3483 static struct ixl_mac_filter *
3484 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3485 {
3486 	struct ixl_mac_filter	*f;
3487 	bool			match = FALSE;
3488 
3489 	SLIST_FOREACH(f, &vsi->ftl, next) {
3490 		if (!cmp_etheraddr(f->macaddr, macaddr))
3491 			continue;
3492 		if (f->vlan == vlan) {
3493 			match = TRUE;
3494 			break;
3495 		}
3496 	}
3497 
3498 	if (!match)
3499 		f = NULL;
3500 	return (f);
3501 }
3502 
3503 /*
3504 ** This routine takes additions to the vsi filter
3505 ** table and creates an Admin Queue call to create
3506 ** the filters in the hardware.
3507 */
3508 static void
3509 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3510 {
3511 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3512 	struct ixl_mac_filter	*f;
3513 	struct i40e_hw	*hw = vsi->hw;
3514 	device_t	dev = vsi->dev;
3515 	int		err, j = 0;
3516 
3517 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3518 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3519 	if (a == NULL) {
3520 		device_printf(dev, "add_hw_filters failed to get memory\n");
3521 		return;
3522 	}
3523 
3524 	/*
3525 	** Scan the filter list, each time we find one
3526 	** we add it to the admin queue array and turn off
3527 	** the add bit.
3528 	*/
3529 	SLIST_FOREACH(f, &vsi->ftl, next) {
3530 		if (f->flags == flags) {
3531 			b = &a[j]; // a pox on fvl long names :)
3532 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3533 			b->vlan_tag =
3534 			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3535 			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3536 			f->flags &= ~IXL_FILTER_ADD;
3537 			j++;
3538 		}
3539 		if (j == cnt)
3540 			break;
3541 	}
3542 	if (j > 0) {
3543 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3544 		if (err)
3545 			device_printf(dev, "aq_add_macvlan err %d, "
3546 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3547 		else
3548 			vsi->hw_filters_add += j;
3549 	}
3550 	free(a, M_DEVBUF);
3551 	return;
3552 }
3553 
3554 /*
3555 ** This routine takes removals in the vsi filter
3556 ** table and creates an Admin Queue call to delete
3557 ** the filters in the hardware.
3558 */
3559 static void
3560 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3561 {
3562 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3563 	struct i40e_hw		*hw = vsi->hw;
3564 	device_t		dev = vsi->dev;
3565 	struct ixl_mac_filter	*f, *f_temp;
3566 	int			err, j = 0;
3567 
3568 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3569 
3570 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3571 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3572 	if (d == NULL) {
3573 		printf("del hw filter failed to get memory\n");
3574 		return;
3575 	}
3576 
3577 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3578 		if (f->flags & IXL_FILTER_DEL) {
3579 			e = &d[j]; // a pox on fvl long names :)
3580 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3581 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3582 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3583 			/* delete entry from vsi list */
3584 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3585 			free(f, M_DEVBUF);
3586 			j++;
3587 		}
3588 		if (j == cnt)
3589 			break;
3590 	}
3591 	if (j > 0) {
3592 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3593 		/* NOTE: returns ENOENT every time but seems to work fine,
3594 		   so we'll ignore that specific error. */
3595 		// TODO: Does this still occur on current firmwares?
3596 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3597 			int sc = 0;
3598 			for (int i = 0; i < j; i++)
3599 				sc += (!d[i].error_code);
3600 			vsi->hw_filters_del += sc;
3601 			device_printf(dev,
3602 			    "Failed to remove %d/%d filters, aq error %d\n",
3603 			    j - sc, j, hw->aq.asq_last_status);
3604 		} else
3605 			vsi->hw_filters_del += j;
3606 	}
3607 	free(d, M_DEVBUF);
3608 
3609 	DEBUGOUT("ixl_del_hw_filters: end\n");
3610 	return;
3611 }
3612 
3613 
3614 static void
3615 ixl_enable_rings(struct ixl_vsi *vsi)
3616 {
3617 	struct i40e_hw	*hw = vsi->hw;
3618 	u32		reg;
3619 
3620 	for (int i = 0; i < vsi->num_queues; i++) {
3621 		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3622 
3623 		reg = rd32(hw, I40E_QTX_ENA(i));
3624 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3625 		    I40E_QTX_ENA_QENA_STAT_MASK;
3626 		wr32(hw, I40E_QTX_ENA(i), reg);
3627 		/* Verify the enable took */
3628 		for (int j = 0; j < 10; j++) {
3629 			reg = rd32(hw, I40E_QTX_ENA(i));
3630 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3631 				break;
3632 			i40e_msec_delay(10);
3633 		}
3634 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3635 			printf("TX queue %d disabled!\n", i);
3636 
3637 		reg = rd32(hw, I40E_QRX_ENA(i));
3638 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3639 		    I40E_QRX_ENA_QENA_STAT_MASK;
3640 		wr32(hw, I40E_QRX_ENA(i), reg);
3641 		/* Verify the enable took */
3642 		for (int j = 0; j < 10; j++) {
3643 			reg = rd32(hw, I40E_QRX_ENA(i));
3644 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3645 				break;
3646 			i40e_msec_delay(10);
3647 		}
3648 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3649 			printf("RX queue %d disabled!\n", i);
3650 	}
3651 }
3652 
3653 static void
3654 ixl_disable_rings(struct ixl_vsi *vsi)
3655 {
3656 	struct i40e_hw	*hw = vsi->hw;
3657 	u32		reg;
3658 
3659 	for (int i = 0; i < vsi->num_queues; i++) {
3660 		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3661 		i40e_usec_delay(500);
3662 
3663 		reg = rd32(hw, I40E_QTX_ENA(i));
3664 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3665 		wr32(hw, I40E_QTX_ENA(i), reg);
3666 		/* Verify the disable took */
3667 		for (int j = 0; j < 10; j++) {
3668 			reg = rd32(hw, I40E_QTX_ENA(i));
3669 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3670 				break;
3671 			i40e_msec_delay(10);
3672 		}
3673 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3674 			printf("TX queue %d still enabled!\n", i);
3675 
3676 		reg = rd32(hw, I40E_QRX_ENA(i));
3677 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3678 		wr32(hw, I40E_QRX_ENA(i), reg);
3679 		/* Verify the disable took */
3680 		for (int j = 0; j < 10; j++) {
3681 			reg = rd32(hw, I40E_QRX_ENA(i));
3682 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3683 				break;
3684 			i40e_msec_delay(10);
3685 		}
3686 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3687 			printf("RX queue %d still enabled!\n", i);
3688 	}
3689 }
3690 
3691 /**
3692  * ixl_handle_mdd_event
3693  *
3694  * Called from interrupt handler to identify possibly malicious vfs
3695  * (But also detects events from the PF, as well)
3696  **/
3697 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3698 {
3699 	struct i40e_hw *hw = &pf->hw;
3700 	device_t dev = pf->dev;
3701 	bool mdd_detected = false;
3702 	bool pf_mdd_detected = false;
3703 	u32 reg;
3704 
3705 	/* find what triggered the MDD event */
3706 	reg = rd32(hw, I40E_GL_MDET_TX);
3707 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3708 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3709 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3710 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3711 				I40E_GL_MDET_TX_EVENT_SHIFT;
3712 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3713 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3714 		device_printf(dev,
3715 			 "Malicious Driver Detection event 0x%02x"
3716 			 " on TX queue %d pf number 0x%02x\n",
3717 			 event, queue, pf_num);
3718 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3719 		mdd_detected = true;
3720 	}
3721 	reg = rd32(hw, I40E_GL_MDET_RX);
3722 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3723 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3724 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3725 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3726 				I40E_GL_MDET_RX_EVENT_SHIFT;
3727 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3728 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3729 		device_printf(dev,
3730 			 "Malicious Driver Detection event 0x%02x"
3731 			 " on RX queue %d of function 0x%02x\n",
3732 			 event, queue, func);
3733 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3734 		mdd_detected = true;
3735 	}
3736 
3737 	if (mdd_detected) {
3738 		reg = rd32(hw, I40E_PF_MDET_TX);
3739 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3740 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3741 			device_printf(dev,
3742 				 "MDD TX event is for this function 0x%08x",
3743 				 reg);
3744 			pf_mdd_detected = true;
3745 		}
3746 		reg = rd32(hw, I40E_PF_MDET_RX);
3747 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3748 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3749 			device_printf(dev,
3750 				 "MDD RX event is for this function 0x%08x",
3751 				 reg);
3752 			pf_mdd_detected = true;
3753 		}
3754 	}
3755 
3756 	/* re-enable mdd interrupt cause */
3757 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3758 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3759 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3760 	ixl_flush(hw);
3761 }
3762 
3763 static void
3764 ixl_enable_intr(struct ixl_vsi *vsi)
3765 {
3766 	struct i40e_hw		*hw = vsi->hw;
3767 	struct ixl_queue	*que = vsi->queues;
3768 
3769 	if (ixl_enable_msix) {
3770 		ixl_enable_adminq(hw);
3771 		for (int i = 0; i < vsi->num_queues; i++, que++)
3772 			ixl_enable_queue(hw, que->me);
3773 	} else
3774 		ixl_enable_legacy(hw);
3775 }
3776 
3777 static void
3778 ixl_disable_intr(struct ixl_vsi *vsi)
3779 {
3780 	struct i40e_hw		*hw = vsi->hw;
3781 	struct ixl_queue	*que = vsi->queues;
3782 
3783 	if (ixl_enable_msix) {
3784 		ixl_disable_adminq(hw);
3785 		for (int i = 0; i < vsi->num_queues; i++, que++)
3786 			ixl_disable_queue(hw, que->me);
3787 	} else
3788 		ixl_disable_legacy(hw);
3789 }
3790 
3791 static void
3792 ixl_enable_adminq(struct i40e_hw *hw)
3793 {
3794 	u32		reg;
3795 
3796 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3797 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3798 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3799 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3800 	ixl_flush(hw);
3801 	return;
3802 }
3803 
3804 static void
3805 ixl_disable_adminq(struct i40e_hw *hw)
3806 {
3807 	u32		reg;
3808 
3809 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3810 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3811 
3812 	return;
3813 }
3814 
3815 static void
3816 ixl_enable_queue(struct i40e_hw *hw, int id)
3817 {
3818 	u32		reg;
3819 
3820 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3821 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3822 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3823 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3824 }
3825 
3826 static void
3827 ixl_disable_queue(struct i40e_hw *hw, int id)
3828 {
3829 	u32		reg;
3830 
3831 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3832 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3833 
3834 	return;
3835 }
3836 
3837 static void
3838 ixl_enable_legacy(struct i40e_hw *hw)
3839 {
3840 	u32		reg;
3841 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3842 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3843 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3844 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3845 }
3846 
3847 static void
3848 ixl_disable_legacy(struct i40e_hw *hw)
3849 {
3850 	u32		reg;
3851 
3852 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3853 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3854 
3855 	return;
3856 }
3857 
3858 static void
3859 ixl_update_stats_counters(struct ixl_pf *pf)
3860 {
3861 	struct i40e_hw	*hw = &pf->hw;
3862 	struct ixl_vsi *vsi = &pf->vsi;
3863 
3864 	struct i40e_hw_port_stats *nsd = &pf->stats;
3865 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3866 
3867 	/* Update hw stats */
3868 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3869 			   pf->stat_offsets_loaded,
3870 			   &osd->crc_errors, &nsd->crc_errors);
3871 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3872 			   pf->stat_offsets_loaded,
3873 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3874 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3875 			   I40E_GLPRT_GORCL(hw->port),
3876 			   pf->stat_offsets_loaded,
3877 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3878 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3879 			   I40E_GLPRT_GOTCL(hw->port),
3880 			   pf->stat_offsets_loaded,
3881 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3882 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3883 			   pf->stat_offsets_loaded,
3884 			   &osd->eth.rx_discards,
3885 			   &nsd->eth.rx_discards);
3886 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3887 			   I40E_GLPRT_UPRCL(hw->port),
3888 			   pf->stat_offsets_loaded,
3889 			   &osd->eth.rx_unicast,
3890 			   &nsd->eth.rx_unicast);
3891 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3892 			   I40E_GLPRT_UPTCL(hw->port),
3893 			   pf->stat_offsets_loaded,
3894 			   &osd->eth.tx_unicast,
3895 			   &nsd->eth.tx_unicast);
3896 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3897 			   I40E_GLPRT_MPRCL(hw->port),
3898 			   pf->stat_offsets_loaded,
3899 			   &osd->eth.rx_multicast,
3900 			   &nsd->eth.rx_multicast);
3901 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3902 			   I40E_GLPRT_MPTCL(hw->port),
3903 			   pf->stat_offsets_loaded,
3904 			   &osd->eth.tx_multicast,
3905 			   &nsd->eth.tx_multicast);
3906 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3907 			   I40E_GLPRT_BPRCL(hw->port),
3908 			   pf->stat_offsets_loaded,
3909 			   &osd->eth.rx_broadcast,
3910 			   &nsd->eth.rx_broadcast);
3911 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3912 			   I40E_GLPRT_BPTCL(hw->port),
3913 			   pf->stat_offsets_loaded,
3914 			   &osd->eth.tx_broadcast,
3915 			   &nsd->eth.tx_broadcast);
3916 
3917 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3918 			   pf->stat_offsets_loaded,
3919 			   &osd->tx_dropped_link_down,
3920 			   &nsd->tx_dropped_link_down);
3921 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3922 			   pf->stat_offsets_loaded,
3923 			   &osd->mac_local_faults,
3924 			   &nsd->mac_local_faults);
3925 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3926 			   pf->stat_offsets_loaded,
3927 			   &osd->mac_remote_faults,
3928 			   &nsd->mac_remote_faults);
3929 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3930 			   pf->stat_offsets_loaded,
3931 			   &osd->rx_length_errors,
3932 			   &nsd->rx_length_errors);
3933 
3934 	/* Flow control (LFC) stats */
3935 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3936 			   pf->stat_offsets_loaded,
3937 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3938 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3939 			   pf->stat_offsets_loaded,
3940 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3941 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3942 			   pf->stat_offsets_loaded,
3943 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3944 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3945 			   pf->stat_offsets_loaded,
3946 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3947 
3948 	/* Packet size stats rx */
3949 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3950 			   I40E_GLPRT_PRC64L(hw->port),
3951 			   pf->stat_offsets_loaded,
3952 			   &osd->rx_size_64, &nsd->rx_size_64);
3953 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3954 			   I40E_GLPRT_PRC127L(hw->port),
3955 			   pf->stat_offsets_loaded,
3956 			   &osd->rx_size_127, &nsd->rx_size_127);
3957 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3958 			   I40E_GLPRT_PRC255L(hw->port),
3959 			   pf->stat_offsets_loaded,
3960 			   &osd->rx_size_255, &nsd->rx_size_255);
3961 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3962 			   I40E_GLPRT_PRC511L(hw->port),
3963 			   pf->stat_offsets_loaded,
3964 			   &osd->rx_size_511, &nsd->rx_size_511);
3965 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3966 			   I40E_GLPRT_PRC1023L(hw->port),
3967 			   pf->stat_offsets_loaded,
3968 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3969 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3970 			   I40E_GLPRT_PRC1522L(hw->port),
3971 			   pf->stat_offsets_loaded,
3972 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3973 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3974 			   I40E_GLPRT_PRC9522L(hw->port),
3975 			   pf->stat_offsets_loaded,
3976 			   &osd->rx_size_big, &nsd->rx_size_big);
3977 
3978 	/* Packet size stats tx */
3979 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3980 			   I40E_GLPRT_PTC64L(hw->port),
3981 			   pf->stat_offsets_loaded,
3982 			   &osd->tx_size_64, &nsd->tx_size_64);
3983 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3984 			   I40E_GLPRT_PTC127L(hw->port),
3985 			   pf->stat_offsets_loaded,
3986 			   &osd->tx_size_127, &nsd->tx_size_127);
3987 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3988 			   I40E_GLPRT_PTC255L(hw->port),
3989 			   pf->stat_offsets_loaded,
3990 			   &osd->tx_size_255, &nsd->tx_size_255);
3991 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3992 			   I40E_GLPRT_PTC511L(hw->port),
3993 			   pf->stat_offsets_loaded,
3994 			   &osd->tx_size_511, &nsd->tx_size_511);
3995 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3996 			   I40E_GLPRT_PTC1023L(hw->port),
3997 			   pf->stat_offsets_loaded,
3998 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3999 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4000 			   I40E_GLPRT_PTC1522L(hw->port),
4001 			   pf->stat_offsets_loaded,
4002 			   &osd->tx_size_1522, &nsd->tx_size_1522);
4003 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4004 			   I40E_GLPRT_PTC9522L(hw->port),
4005 			   pf->stat_offsets_loaded,
4006 			   &osd->tx_size_big, &nsd->tx_size_big);
4007 
4008 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4009 			   pf->stat_offsets_loaded,
4010 			   &osd->rx_undersize, &nsd->rx_undersize);
4011 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4012 			   pf->stat_offsets_loaded,
4013 			   &osd->rx_fragments, &nsd->rx_fragments);
4014 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4015 			   pf->stat_offsets_loaded,
4016 			   &osd->rx_oversize, &nsd->rx_oversize);
4017 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4018 			   pf->stat_offsets_loaded,
4019 			   &osd->rx_jabber, &nsd->rx_jabber);
4020 	pf->stat_offsets_loaded = true;
4021 	/* End hw stats */
4022 
4023 	/* Update vsi stats */
4024 	ixl_update_eth_stats(vsi);
4025 
4026 	/* OS statistics */
4027 	// ERJ - these are per-port, update all vsis?
4028 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4029 }
4030 
4031 /*
4032 ** Tasklet handler for MSIX Adminq interrupts
4033 **  - do outside interrupt since it might sleep
4034 */
4035 static void
4036 ixl_do_adminq(void *context, int pending)
4037 {
4038 	struct ixl_pf			*pf = context;
4039 	struct i40e_hw			*hw = &pf->hw;
4040 	struct ixl_vsi			*vsi = &pf->vsi;
4041 	struct i40e_arq_event_info	event;
4042 	i40e_status			ret;
4043 	u32				reg, loop = 0;
4044 	u16				opcode, result;
4045 
4046 	event.buf_len = IXL_AQ_BUF_SZ;
4047 	event.msg_buf = malloc(event.buf_len,
4048 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4049 	if (!event.msg_buf) {
4050 		printf("Unable to allocate adminq memory\n");
4051 		return;
4052 	}
4053 
4054 	/* clean and process any events */
4055 	do {
4056 		ret = i40e_clean_arq_element(hw, &event, &result);
4057 		if (ret)
4058 			break;
4059 		opcode = LE16_TO_CPU(event.desc.opcode);
4060 		switch (opcode) {
4061 		case i40e_aqc_opc_get_link_status:
4062 			vsi->link_up = ixl_config_link(hw);
4063 			ixl_update_link_status(pf);
4064 			break;
4065 		case i40e_aqc_opc_send_msg_to_pf:
4066 			/* process pf/vf communication here */
4067 			break;
4068 		case i40e_aqc_opc_event_lan_overflow:
4069 			break;
4070 		default:
4071 #ifdef IXL_DEBUG
4072 			printf("AdminQ unknown event %x\n", opcode);
4073 #endif
4074 			break;
4075 		}
4076 
4077 	} while (result && (loop++ < IXL_ADM_LIMIT));
4078 
4079 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4080 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4081 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4082 	free(event.msg_buf, M_DEVBUF);
4083 
4084 	if (pf->msix > 1)
4085 		ixl_enable_adminq(&pf->hw);
4086 	else
4087 		ixl_enable_intr(vsi);
4088 }
4089 
4090 static int
4091 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4092 {
4093 	struct ixl_pf	*pf;
4094 	int		error, input = 0;
4095 
4096 	error = sysctl_handle_int(oidp, &input, 0, req);
4097 
4098 	if (error || !req->newptr)
4099 		return (error);
4100 
4101 	if (input == 1) {
4102 		pf = (struct ixl_pf *)arg1;
4103 		ixl_print_debug_info(pf);
4104 	}
4105 
4106 	return (error);
4107 }
4108 
4109 static void
4110 ixl_print_debug_info(struct ixl_pf *pf)
4111 {
4112 	struct i40e_hw		*hw = &pf->hw;
4113 	struct ixl_vsi		*vsi = &pf->vsi;
4114 	struct ixl_queue	*que = vsi->queues;
4115 	struct rx_ring		*rxr = &que->rxr;
4116 	struct tx_ring		*txr = &que->txr;
4117 	u32			reg;
4118 
4119 
4120 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4121 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4122 	printf("RX next check = %x\n", rxr->next_check);
4123 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4124 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4125 	printf("TX desc avail = %x\n", txr->avail);
4126 
4127 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4128 	 printf("RX Bytes = %x\n", reg);
4129 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4130 	 printf("Port RX Bytes = %x\n", reg);
4131 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4132 	 printf("RX discard = %x\n", reg);
4133 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4134 	 printf("Port RX discard = %x\n", reg);
4135 
4136 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4137 	 printf("TX errors = %x\n", reg);
4138 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4139 	 printf("TX Bytes = %x\n", reg);
4140 
4141 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4142 	 printf("RX undersize = %x\n", reg);
4143 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4144 	 printf("RX fragments = %x\n", reg);
4145 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4146 	 printf("RX oversize = %x\n", reg);
4147 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4148 	 printf("RX length error = %x\n", reg);
4149 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4150 	 printf("mac remote fault = %x\n", reg);
4151 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4152 	 printf("mac local fault = %x\n", reg);
4153 }
4154 
4155 /**
4156  * Update VSI-specific ethernet statistics counters.
4157  **/
4158 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4159 {
4160 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4161 	struct i40e_hw *hw = &pf->hw;
4162 	struct i40e_eth_stats *es;
4163 	struct i40e_eth_stats *oes;
4164 	int i;
4165 	uint64_t tx_discards;
4166 	struct i40e_hw_port_stats *nsd;
4167 	u16 stat_idx = vsi->info.stat_counter_idx;
4168 
4169 	es = &vsi->eth_stats;
4170 	oes = &vsi->eth_stats_offsets;
4171 	nsd = &pf->stats;
4172 
4173 	/* Gather up the stats that the hw collects */
4174 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4175 			   vsi->stat_offsets_loaded,
4176 			   &oes->tx_errors, &es->tx_errors);
4177 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4178 			   vsi->stat_offsets_loaded,
4179 			   &oes->rx_discards, &es->rx_discards);
4180 
4181 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4182 			   I40E_GLV_GORCL(stat_idx),
4183 			   vsi->stat_offsets_loaded,
4184 			   &oes->rx_bytes, &es->rx_bytes);
4185 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4186 			   I40E_GLV_UPRCL(stat_idx),
4187 			   vsi->stat_offsets_loaded,
4188 			   &oes->rx_unicast, &es->rx_unicast);
4189 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4190 			   I40E_GLV_MPRCL(stat_idx),
4191 			   vsi->stat_offsets_loaded,
4192 			   &oes->rx_multicast, &es->rx_multicast);
4193 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4194 			   I40E_GLV_BPRCL(stat_idx),
4195 			   vsi->stat_offsets_loaded,
4196 			   &oes->rx_broadcast, &es->rx_broadcast);
4197 
4198 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4199 			   I40E_GLV_GOTCL(stat_idx),
4200 			   vsi->stat_offsets_loaded,
4201 			   &oes->tx_bytes, &es->tx_bytes);
4202 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4203 			   I40E_GLV_UPTCL(stat_idx),
4204 			   vsi->stat_offsets_loaded,
4205 			   &oes->tx_unicast, &es->tx_unicast);
4206 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4207 			   I40E_GLV_MPTCL(stat_idx),
4208 			   vsi->stat_offsets_loaded,
4209 			   &oes->tx_multicast, &es->tx_multicast);
4210 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4211 			   I40E_GLV_BPTCL(stat_idx),
4212 			   vsi->stat_offsets_loaded,
4213 			   &oes->tx_broadcast, &es->tx_broadcast);
4214 	vsi->stat_offsets_loaded = true;
4215 
4216 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4217 	for (i = 0; i < vsi->num_queues; i++)
4218 		tx_discards += vsi->queues[i].txr.br->br_drops;
4219 
4220 	/* Update ifnet stats */
4221 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4222 	                   es->rx_multicast +
4223 			   es->rx_broadcast);
4224 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4225 	                   es->tx_multicast +
4226 			   es->tx_broadcast);
4227 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4228 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4229 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4230 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4231 
4232 	IXL_SET_OERRORS(vsi, es->tx_errors);
4233 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4234 	IXL_SET_OQDROPS(vsi, tx_discards);
4235 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4236 	IXL_SET_COLLISIONS(vsi, 0);
4237 }
4238 
4239 /**
4240  * Reset all of the stats for the given pf
4241  **/
4242 void ixl_pf_reset_stats(struct ixl_pf *pf)
4243 {
4244 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4245 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4246 	pf->stat_offsets_loaded = false;
4247 }
4248 
4249 /**
4250  * Resets all stats of the given vsi
4251  **/
4252 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4253 {
4254 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4255 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4256 	vsi->stat_offsets_loaded = false;
4257 }
4258 
4259 /**
4260  * Read and update a 48 bit stat from the hw
4261  *
4262  * Since the device stats are not reset at PFReset, they likely will not
4263  * be zeroed when the driver starts.  We'll save the first values read
4264  * and use them as offsets to be subtracted from the raw values in order
4265  * to report stats that count from zero.
4266  **/
4267 static void
4268 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4269 	bool offset_loaded, u64 *offset, u64 *stat)
4270 {
4271 	u64 new_data;
4272 
4273 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4274 	new_data = rd64(hw, loreg);
4275 #else
4276 	/*
4277 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4278 	 * 10 don't support 8 byte bus reads/writes.
4279 	 */
4280 	new_data = rd32(hw, loreg);
4281 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4282 #endif
4283 
4284 	if (!offset_loaded)
4285 		*offset = new_data;
4286 	if (new_data >= *offset)
4287 		*stat = new_data - *offset;
4288 	else
4289 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4290 	*stat &= 0xFFFFFFFFFFFFULL;
4291 }
4292 
4293 /**
4294  * Read and update a 32 bit stat from the hw
4295  **/
4296 static void
4297 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4298 	bool offset_loaded, u64 *offset, u64 *stat)
4299 {
4300 	u32 new_data;
4301 
4302 	new_data = rd32(hw, reg);
4303 	if (!offset_loaded)
4304 		*offset = new_data;
4305 	if (new_data >= *offset)
4306 		*stat = (u32)(new_data - *offset);
4307 	else
4308 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4309 }
4310 
4311 /*
4312 ** Set flow control using sysctl:
4313 ** 	0 - off
4314 **	1 - rx pause
4315 **	2 - tx pause
4316 **	3 - full
4317 */
4318 static int
4319 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4320 {
4321 	/*
4322 	 * TODO: ensure flow control is disabled if
4323 	 * priority flow control is enabled
4324 	 *
4325 	 * TODO: ensure tx CRC by hardware should be enabled
4326 	 * if tx flow control is enabled.
4327 	 */
4328 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4329 	struct i40e_hw *hw = &pf->hw;
4330 	device_t dev = pf->dev;
4331 	int error = 0;
4332 	enum i40e_status_code aq_error = 0;
4333 	u8 fc_aq_err = 0;
4334 
4335 	/* Get request */
4336 	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4337 	if ((error) || (req->newptr == NULL))
4338 		return (error);
4339 	if (pf->fc < 0 || pf->fc > 3) {
4340 		device_printf(dev,
4341 		    "Invalid fc mode; valid modes are 0 through 3\n");
4342 		return (EINVAL);
4343 	}
4344 
4345 	/*
4346 	** Changing flow control mode currently does not work on
4347 	** 40GBASE-CR4 PHYs
4348 	*/
4349 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4350 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4351 		device_printf(dev, "Changing flow control mode unsupported"
4352 		    " on 40GBase-CR4 media.\n");
4353 		return (ENODEV);
4354 	}
4355 
4356 	/* Set fc ability for port */
4357 	hw->fc.requested_mode = pf->fc;
4358 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4359 	if (aq_error) {
4360 		device_printf(dev,
4361 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4362 		    __func__, aq_error, fc_aq_err);
4363 		return (EAGAIN);
4364 	}
4365 
4366 	return (0);
4367 }
4368 
4369 static int
4370 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4371 {
4372 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4373 	struct i40e_hw *hw = &pf->hw;
4374 	int error = 0, index = 0;
4375 
4376 	char *speeds[] = {
4377 		"Unknown",
4378 		"100M",
4379 		"1G",
4380 		"10G",
4381 		"40G",
4382 		"20G"
4383 	};
4384 
4385 	ixl_update_link_status(pf);
4386 
4387 	switch (hw->phy.link_info.link_speed) {
4388 	case I40E_LINK_SPEED_100MB:
4389 		index = 1;
4390 		break;
4391 	case I40E_LINK_SPEED_1GB:
4392 		index = 2;
4393 		break;
4394 	case I40E_LINK_SPEED_10GB:
4395 		index = 3;
4396 		break;
4397 	case I40E_LINK_SPEED_40GB:
4398 		index = 4;
4399 		break;
4400 	case I40E_LINK_SPEED_20GB:
4401 		index = 5;
4402 		break;
4403 	case I40E_LINK_SPEED_UNKNOWN:
4404 	default:
4405 		index = 0;
4406 		break;
4407 	}
4408 
4409 	error = sysctl_handle_string(oidp, speeds[index],
4410 	    strlen(speeds[index]), req);
4411 	return (error);
4412 }
4413 
4414 static int
4415 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4416 {
4417 	struct i40e_hw *hw = &pf->hw;
4418 	device_t dev = pf->dev;
4419 	struct i40e_aq_get_phy_abilities_resp abilities;
4420 	struct i40e_aq_set_phy_config config;
4421 	enum i40e_status_code aq_error = 0;
4422 
4423 	/* Get current capability information */
4424 	aq_error = i40e_aq_get_phy_capabilities(hw,
4425 	    FALSE, FALSE, &abilities, NULL);
4426 	if (aq_error) {
4427 		device_printf(dev,
4428 		    "%s: Error getting phy capabilities %d,"
4429 		    " aq error: %d\n", __func__, aq_error,
4430 		    hw->aq.asq_last_status);
4431 		return (EAGAIN);
4432 	}
4433 
4434 	/* Prepare new config */
4435 	bzero(&config, sizeof(config));
4436 	config.phy_type = abilities.phy_type;
4437 	config.abilities = abilities.abilities
4438 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4439 	config.eee_capability = abilities.eee_capability;
4440 	config.eeer = abilities.eeer_val;
4441 	config.low_power_ctrl = abilities.d3_lpan;
4442 	/* Translate into aq cmd link_speed */
4443 	if (speeds & 0x4)
4444 		config.link_speed |= I40E_LINK_SPEED_10GB;
4445 	if (speeds & 0x2)
4446 		config.link_speed |= I40E_LINK_SPEED_1GB;
4447 	if (speeds & 0x1)
4448 		config.link_speed |= I40E_LINK_SPEED_100MB;
4449 
4450 	/* Do aq command & restart link */
4451 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4452 	if (aq_error) {
4453 		device_printf(dev,
4454 		    "%s: Error setting new phy config %d,"
4455 		    " aq error: %d\n", __func__, aq_error,
4456 		    hw->aq.asq_last_status);
4457 		return (EAGAIN);
4458 	}
4459 
4460 	/*
4461 	** This seems a bit heavy handed, but we
4462 	** need to get a reinit on some devices
4463 	*/
4464 	IXL_PF_LOCK(pf);
4465 	ixl_stop(pf);
4466 	ixl_init_locked(pf);
4467 	IXL_PF_UNLOCK(pf);
4468 
4469 	return (0);
4470 }
4471 
4472 /*
4473 ** Control link advertise speed:
4474 **	Flags:
4475 **	0x1 - advertise 100 Mb
4476 **	0x2 - advertise 1G
4477 **	0x4 - advertise 10G
4478 **
4479 ** Does not work on 40G devices.
4480 */
4481 static int
4482 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4483 {
4484 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4485 	struct i40e_hw *hw = &pf->hw;
4486 	device_t dev = pf->dev;
4487 	int requested_ls = 0;
4488 	int error = 0;
4489 
4490 	/*
4491 	** FW doesn't support changing advertised speed
4492 	** for 40G devices; speed is always 40G.
4493 	*/
4494 	if (i40e_is_40G_device(hw->device_id))
4495 		return (ENODEV);
4496 
4497 	/* Read in new mode */
4498 	requested_ls = pf->advertised_speed;
4499 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4500 	if ((error) || (req->newptr == NULL))
4501 		return (error);
4502 	if (requested_ls < 1 || requested_ls > 7) {
4503 		device_printf(dev,
4504 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4505 		return (EINVAL);
4506 	}
4507 
4508 	/* Exit if no change */
4509 	if (pf->advertised_speed == requested_ls)
4510 		return (0);
4511 
4512 	error = ixl_set_advertised_speeds(pf, requested_ls);
4513 	if (error)
4514 		return (error);
4515 
4516 	pf->advertised_speed = requested_ls;
4517 	ixl_update_link_status(pf);
4518 	return (0);
4519 }
4520 
4521 /*
4522 ** Get the width and transaction speed of
4523 ** the bus this adapter is plugged into.
4524 */
4525 static u16
4526 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4527 {
4528         u16                     link;
4529         u32                     offset;
4530 
4531 
4532         /* Get the PCI Express Capabilities offset */
4533         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4534 
4535         /* ...and read the Link Status Register */
4536         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4537 
4538         switch (link & I40E_PCI_LINK_WIDTH) {
4539         case I40E_PCI_LINK_WIDTH_1:
4540                 hw->bus.width = i40e_bus_width_pcie_x1;
4541                 break;
4542         case I40E_PCI_LINK_WIDTH_2:
4543                 hw->bus.width = i40e_bus_width_pcie_x2;
4544                 break;
4545         case I40E_PCI_LINK_WIDTH_4:
4546                 hw->bus.width = i40e_bus_width_pcie_x4;
4547                 break;
4548         case I40E_PCI_LINK_WIDTH_8:
4549                 hw->bus.width = i40e_bus_width_pcie_x8;
4550                 break;
4551         default:
4552                 hw->bus.width = i40e_bus_width_unknown;
4553                 break;
4554         }
4555 
4556         switch (link & I40E_PCI_LINK_SPEED) {
4557         case I40E_PCI_LINK_SPEED_2500:
4558                 hw->bus.speed = i40e_bus_speed_2500;
4559                 break;
4560         case I40E_PCI_LINK_SPEED_5000:
4561                 hw->bus.speed = i40e_bus_speed_5000;
4562                 break;
4563         case I40E_PCI_LINK_SPEED_8000:
4564                 hw->bus.speed = i40e_bus_speed_8000;
4565                 break;
4566         default:
4567                 hw->bus.speed = i40e_bus_speed_unknown;
4568                 break;
4569         }
4570 
4571 
4572         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4573             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4574             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4575             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4576             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4577             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4578             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4579             ("Unknown"));
4580 
4581         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4582             (hw->bus.speed < i40e_bus_speed_8000)) {
4583                 device_printf(dev, "PCI-Express bandwidth available"
4584                     " for this device\n     is not sufficient for"
4585                     " normal operation.\n");
4586                 device_printf(dev, "For expected performance a x8 "
4587                     "PCIE Gen3 slot is required.\n");
4588         }
4589 
4590         return (link);
4591 }
4592 
4593 static int
4594 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4595 {
4596 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4597 	struct i40e_hw	*hw = &pf->hw;
4598 	char		buf[32];
4599 
4600 	snprintf(buf, sizeof(buf),
4601 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4602 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4603 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4604 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4605 	    IXL_NVM_VERSION_HI_SHIFT,
4606 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4607 	    IXL_NVM_VERSION_LO_SHIFT,
4608 	    hw->nvm.eetrack);
4609 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4610 }
4611 
4612 
4613 #ifdef IXL_DEBUG_SYSCTL
4614 static int
4615 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4616 {
4617 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4618 	struct i40e_hw *hw = &pf->hw;
4619 	struct i40e_link_status link_status;
4620 	char buf[512];
4621 
4622 	enum i40e_status_code aq_error = 0;
4623 
4624 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4625 	if (aq_error) {
4626 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4627 		return (EPERM);
4628 	}
4629 
4630 	sprintf(buf, "\n"
4631 	    "PHY Type : %#04x\n"
4632 	    "Speed    : %#04x\n"
4633 	    "Link info: %#04x\n"
4634 	    "AN info  : %#04x\n"
4635 	    "Ext info : %#04x",
4636 	    link_status.phy_type, link_status.link_speed,
4637 	    link_status.link_info, link_status.an_info,
4638 	    link_status.ext_info);
4639 
4640 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4641 }
4642 
4643 static int
4644 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4645 {
4646 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4647 	struct i40e_hw *hw = &pf->hw;
4648 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4649 	char buf[512];
4650 
4651 	enum i40e_status_code aq_error = 0;
4652 
4653 	// TODO: Print out list of qualified modules as well?
4654 	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4655 	if (aq_error) {
4656 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4657 		return (EPERM);
4658 	}
4659 
4660 	sprintf(buf, "\n"
4661 	    "PHY Type : %#010x\n"
4662 	    "Speed    : %#04x\n"
4663 	    "Abilities: %#04x\n"
4664 	    "EEE cap  : %#06x\n"
4665 	    "EEER reg : %#010x\n"
4666 	    "D3 Lpan  : %#04x",
4667 	    abilities_resp.phy_type, abilities_resp.link_speed,
4668 	    abilities_resp.abilities, abilities_resp.eee_capability,
4669 	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4670 
4671 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4672 }
4673 
4674 static int
4675 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4676 {
4677 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4678 	struct ixl_vsi *vsi = &pf->vsi;
4679 	struct ixl_mac_filter *f;
4680 	char *buf, *buf_i;
4681 
4682 	int error = 0;
4683 	int ftl_len = 0;
4684 	int ftl_counter = 0;
4685 	int buf_len = 0;
4686 	int entry_len = 42;
4687 
4688 	SLIST_FOREACH(f, &vsi->ftl, next) {
4689 		ftl_len++;
4690 	}
4691 
4692 	if (ftl_len < 1) {
4693 		sysctl_handle_string(oidp, "(none)", 6, req);
4694 		return (0);
4695 	}
4696 
4697 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4698 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4699 
4700 	sprintf(buf_i++, "\n");
4701 	SLIST_FOREACH(f, &vsi->ftl, next) {
4702 		sprintf(buf_i,
4703 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4704 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4705 		buf_i += entry_len;
4706 		/* don't print '\n' for last entry */
4707 		if (++ftl_counter != ftl_len) {
4708 			sprintf(buf_i, "\n");
4709 			buf_i++;
4710 		}
4711 	}
4712 
4713 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4714 	if (error)
4715 		printf("sysctl error: %d\n", error);
4716 	free(buf, M_DEVBUF);
4717 	return error;
4718 }
4719 
4720 #define IXL_SW_RES_SIZE 0x14
4721 static int
4722 ixl_res_alloc_cmp(const void *a, const void *b)
4723 {
4724 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4725 	one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4726 	two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4727 
4728 	return ((int)one->resource_type - (int)two->resource_type);
4729 }
4730 
4731 static int
4732 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4733 {
4734 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4735 	struct i40e_hw *hw = &pf->hw;
4736 	device_t dev = pf->dev;
4737 	struct sbuf *buf;
4738 	int error = 0;
4739 
4740 	u8 num_entries;
4741 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4742 
4743 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4744 	if (!buf) {
4745 		device_printf(dev, "Could not allocate sbuf for output.\n");
4746 		return (ENOMEM);
4747 	}
4748 
4749 	bzero(resp, sizeof(resp));
4750 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4751 				resp,
4752 				IXL_SW_RES_SIZE,
4753 				NULL);
4754 	if (error) {
4755 		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4756 		    __func__, error, hw->aq.asq_last_status);
4757 		sbuf_delete(buf);
4758 		return error;
4759 	}
4760 
4761 	/* Sort entries by type for display */
4762 	qsort(resp, num_entries,
4763 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4764 	    &ixl_res_alloc_cmp);
4765 
4766 	sbuf_cat(buf, "\n");
4767 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4768 	sbuf_printf(buf,
4769 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4770 	    "     | (this)     | (all) | (this) | (all)       \n");
4771 	for (int i = 0; i < num_entries; i++) {
4772 		sbuf_printf(buf,
4773 		    "%#4x | %10d   %5d   %6d   %12d",
4774 		    resp[i].resource_type,
4775 		    resp[i].guaranteed,
4776 		    resp[i].total,
4777 		    resp[i].used,
4778 		    resp[i].total_unalloced);
4779 		if (i < num_entries - 1)
4780 			sbuf_cat(buf, "\n");
4781 	}
4782 
4783 	error = sbuf_finish(buf);
4784 	if (error) {
4785 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4786 		sbuf_delete(buf);
4787 		return error;
4788 	}
4789 
4790 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4791 	if (error)
4792 		device_printf(dev, "sysctl error: %d\n", error);
4793 	sbuf_delete(buf);
4794 	return error;
4795 }
4796 
4797 /*
4798 ** Caller must init and delete sbuf; this function will clear and
4799 ** finish it for caller.
4800 */
4801 static char *
4802 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4803 {
4804 	sbuf_clear(s);
4805 
4806 	if (seid == 0 && uplink)
4807 		sbuf_cat(s, "Network");
4808 	else if (seid == 0)
4809 		sbuf_cat(s, "Host");
4810 	else if (seid == 1)
4811 		sbuf_cat(s, "EMP");
4812 	else if (seid <= 5)
4813 		sbuf_printf(s, "MAC %d", seid - 2);
4814 	else if (seid <= 15)
4815 		sbuf_cat(s, "Reserved");
4816 	else if (seid <= 31)
4817 		sbuf_printf(s, "PF %d", seid - 16);
4818 	else if (seid <= 159)
4819 		sbuf_printf(s, "VF %d", seid - 32);
4820 	else if (seid <= 287)
4821 		sbuf_cat(s, "Reserved");
4822 	else if (seid <= 511)
4823 		sbuf_cat(s, "Other"); // for other structures
4824 	else if (seid <= 895)
4825 		sbuf_printf(s, "VSI %d", seid - 512);
4826 	else if (seid <= 1023)
4827 		sbuf_printf(s, "Reserved");
4828 	else
4829 		sbuf_cat(s, "Invalid");
4830 
4831 	sbuf_finish(s);
4832 	return sbuf_data(s);
4833 }
4834 
4835 static int
4836 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4837 {
4838 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4839 	struct i40e_hw *hw = &pf->hw;
4840 	device_t dev = pf->dev;
4841 	struct sbuf *buf;
4842 	struct sbuf *nmbuf;
4843 	int error = 0;
4844 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4845 
4846 	u16 next = 0;
4847 	struct i40e_aqc_get_switch_config_resp *sw_config;
4848 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4849 
4850 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4851 	if (!buf) {
4852 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4853 		return (ENOMEM);
4854 	}
4855 
4856 	error = i40e_aq_get_switch_config(hw, sw_config,
4857 	    sizeof(aq_buf), &next, NULL);
4858 	if (error) {
4859 		device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4860 		    __func__, error, hw->aq.asq_last_status);
4861 		sbuf_delete(buf);
4862 		return error;
4863 	}
4864 
4865 	nmbuf = sbuf_new_auto();
4866 	if (!nmbuf) {
4867 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4868 		return (ENOMEM);
4869 	}
4870 
4871 	sbuf_cat(buf, "\n");
4872 	// Assuming <= 255 elements in switch
4873 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4874 	/* Exclude:
4875 	** Revision -- all elements are revision 1 for now
4876 	*/
4877 	sbuf_printf(buf,
4878 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4879 	    "                |          |          | (uplink)\n");
4880 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4881 		// "%4d (%8s) | %8s   %8s   %#8x",
4882 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4883 		sbuf_cat(buf, " ");
4884 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4885 		sbuf_cat(buf, " | ");
4886 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4887 		sbuf_cat(buf, "   ");
4888 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4889 		sbuf_cat(buf, "   ");
4890 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4891 		if (i < sw_config->header.num_reported - 1)
4892 			sbuf_cat(buf, "\n");
4893 	}
4894 	sbuf_delete(nmbuf);
4895 
4896 	error = sbuf_finish(buf);
4897 	if (error) {
4898 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4899 		sbuf_delete(buf);
4900 		return error;
4901 	}
4902 
4903 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4904 	if (error)
4905 		device_printf(dev, "sysctl error: %d\n", error);
4906 	sbuf_delete(buf);
4907 
4908 	return (error);
4909 }
4910 
4911 /*
4912 ** Dump TX desc given index.
4913 ** Doesn't work; don't use.
4914 ** TODO: Also needs a queue index input!
4915 **/
4916 static int
4917 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4918 {
4919 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4920 	device_t dev = pf->dev;
4921 	struct sbuf *buf;
4922 	int error = 0;
4923 
4924 	u16 desc_idx = 0;
4925 
4926 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4927 	if (!buf) {
4928 		device_printf(dev, "Could not allocate sbuf for output.\n");
4929 		return (ENOMEM);
4930 	}
4931 
4932 	/* Read in index */
4933 	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4934 	if (error)
4935 		return (error);
4936 	if (req->newptr == NULL)
4937 		return (EIO); // fix
4938 	if (desc_idx > 1024) { // fix
4939 		device_printf(dev,
4940 		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4941 		return (EINVAL);
4942 	}
4943 
4944 	// Don't use this sysctl yet
4945 	if (TRUE)
4946 		return (ENODEV);
4947 
4948 	sbuf_cat(buf, "\n");
4949 
4950 	// set to queue 1?
4951 	struct ixl_queue *que = pf->vsi.queues;
4952 	struct tx_ring *txr = &(que[1].txr);
4953 	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4954 
4955 	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4956 	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4957 	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4958 
4959 	error = sbuf_finish(buf);
4960 	if (error) {
4961 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4962 		sbuf_delete(buf);
4963 		return error;
4964 	}
4965 
4966 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4967 	if (error)
4968 		device_printf(dev, "sysctl error: %d\n", error);
4969 	sbuf_delete(buf);
4970 	return error;
4971 }
4972 #endif /* IXL_DEBUG_SYSCTL */
4973 
4974