xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 2e5b60079b7d8c3ca68f1390cd90f305e651f8d3)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38 #include "ixl.h"
39 #include "ixl_pf.h"
40 
41 #ifdef RSS
42 #include <net/rss_config.h>
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 char ixl_driver_version[] = "1.3.1";
49 
50 /*********************************************************************
51  *  PCI Device ID Table
52  *
53  *  Used by probe to select devices to load on
54  *  Last field stores an index into ixl_strings
55  *  Last entry must be all 0s
56  *
57  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  *********************************************************************/
59 
60 static ixl_vendor_info_t ixl_vendor_info_array[] =
61 {
62 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
63 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
64 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
65 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
66 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
67 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
68 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
69 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
70 	/* required last entry */
71 	{0, 0, 0, 0, 0}
72 };
73 
74 /*********************************************************************
75  *  Table of branding strings
76  *********************************************************************/
77 
78 static char    *ixl_strings[] = {
79 	"Intel(R) Ethernet Connection XL710 Driver"
80 };
81 
82 
83 /*********************************************************************
84  *  Function prototypes
85  *********************************************************************/
86 static int      ixl_probe(device_t);
87 static int      ixl_attach(device_t);
88 static int      ixl_detach(device_t);
89 static int      ixl_shutdown(device_t);
90 static int	ixl_get_hw_capabilities(struct ixl_pf *);
91 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
92 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
93 static void	ixl_init(void *);
94 static void	ixl_init_locked(struct ixl_pf *);
95 static void     ixl_stop(struct ixl_pf *);
96 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
97 static int      ixl_media_change(struct ifnet *);
98 static void     ixl_update_link_status(struct ixl_pf *);
99 static int      ixl_allocate_pci_resources(struct ixl_pf *);
100 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
101 static int	ixl_setup_stations(struct ixl_pf *);
102 static int	ixl_setup_vsi(struct ixl_vsi *);
103 static int	ixl_initialize_vsi(struct ixl_vsi *);
104 static int	ixl_assign_vsi_msix(struct ixl_pf *);
105 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
106 static int	ixl_init_msix(struct ixl_pf *);
107 static void	ixl_configure_msix(struct ixl_pf *);
108 static void	ixl_configure_itr(struct ixl_pf *);
109 static void	ixl_configure_legacy(struct ixl_pf *);
110 static void	ixl_free_pci_resources(struct ixl_pf *);
111 static void	ixl_local_timer(void *);
112 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
113 static bool	ixl_config_link(struct i40e_hw *);
114 static void	ixl_config_rss(struct ixl_vsi *);
115 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
116 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
117 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
118 
119 static void	ixl_enable_rings(struct ixl_vsi *);
120 static void	ixl_disable_rings(struct ixl_vsi *);
121 static void     ixl_enable_intr(struct ixl_vsi *);
122 static void     ixl_disable_intr(struct ixl_vsi *);
123 
124 static void     ixl_enable_adminq(struct i40e_hw *);
125 static void     ixl_disable_adminq(struct i40e_hw *);
126 static void     ixl_enable_queue(struct i40e_hw *, int);
127 static void     ixl_disable_queue(struct i40e_hw *, int);
128 static void     ixl_enable_legacy(struct i40e_hw *);
129 static void     ixl_disable_legacy(struct i40e_hw *);
130 
131 static void     ixl_set_promisc(struct ixl_vsi *);
132 static void     ixl_add_multi(struct ixl_vsi *);
133 static void     ixl_del_multi(struct ixl_vsi *);
134 static void	ixl_register_vlan(void *, struct ifnet *, u16);
135 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
136 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
137 
138 static void	ixl_init_filters(struct ixl_vsi *);
139 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
140 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
141 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
142 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
143 static struct ixl_mac_filter *
144 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
145 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
146 
147 /* Sysctl debug interface */
148 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
149 static void	ixl_print_debug_info(struct ixl_pf *);
150 
151 /* The MSI/X Interrupt handlers */
152 static void	ixl_intr(void *);
153 static void	ixl_msix_que(void *);
154 static void	ixl_msix_adminq(void *);
155 static void	ixl_handle_mdd_event(struct ixl_pf *);
156 
157 /* Deferred interrupt tasklets */
158 static void	ixl_do_adminq(void *, int);
159 
160 /* Sysctl handlers */
161 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
162 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
163 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
164 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
165 
166 /* Statistics */
167 static void     ixl_add_hw_stats(struct ixl_pf *);
168 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
169 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
170 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
171 		    struct sysctl_oid_list *,
172 		    struct i40e_eth_stats *);
173 static void	ixl_update_stats_counters(struct ixl_pf *);
174 static void	ixl_update_eth_stats(struct ixl_vsi *);
175 static void	ixl_pf_reset_stats(struct ixl_pf *);
176 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
177 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
178 		    u64 *, u64 *);
179 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
180 		    u64 *, u64 *);
181 
182 #ifdef IXL_DEBUG_SYSCTL
183 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
184 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
185 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
186 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
187 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
188 static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
189 #endif
190 
191 /*********************************************************************
192  *  FreeBSD Device Interface Entry Points
193  *********************************************************************/
194 
195 static device_method_t ixl_methods[] = {
196 	/* Device interface */
197 	DEVMETHOD(device_probe, ixl_probe),
198 	DEVMETHOD(device_attach, ixl_attach),
199 	DEVMETHOD(device_detach, ixl_detach),
200 	DEVMETHOD(device_shutdown, ixl_shutdown),
201 	{0, 0}
202 };
203 
204 static driver_t ixl_driver = {
205 	"ixl", ixl_methods, sizeof(struct ixl_pf),
206 };
207 
208 devclass_t ixl_devclass;
209 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
210 
211 MODULE_DEPEND(ixl, pci, 1, 1, 1);
212 MODULE_DEPEND(ixl, ether, 1, 1, 1);
213 
214 /*
215 ** Global reset mutex
216 */
217 static struct mtx ixl_reset_mtx;
218 
219 /*
220 ** TUNEABLE PARAMETERS:
221 */
222 
223 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
224                    "IXL driver parameters");
225 
226 /*
227  * MSIX should be the default for best performance,
228  * but this allows it to be forced off for testing.
229  */
230 static int ixl_enable_msix = 1;
231 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
232 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
233     "Enable MSI-X interrupts");
234 
235 /*
236 ** Number of descriptors per ring:
237 **   - TX and RX are the same size
238 */
239 static int ixl_ringsz = DEFAULT_RING;
240 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
241 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
242     &ixl_ringsz, 0, "Descriptor Ring Size");
243 
244 /*
245 ** This can be set manually, if left as 0 the
246 ** number of queues will be calculated based
247 ** on cpus and msix vectors available.
248 */
249 int ixl_max_queues = 0;
250 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
252     &ixl_max_queues, 0, "Number of Queues");
253 
254 /*
255 ** Controls for Interrupt Throttling
256 **	- true/false for dynamic adjustment
257 ** 	- default values for static ITR
258 */
259 int ixl_dynamic_rx_itr = 0;
260 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
262     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
263 
264 int ixl_dynamic_tx_itr = 0;
265 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
267     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
268 
269 int ixl_rx_itr = IXL_ITR_8K;
270 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
272     &ixl_rx_itr, 0, "RX Interrupt Rate");
273 
274 int ixl_tx_itr = IXL_ITR_4K;
275 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
277     &ixl_tx_itr, 0, "TX Interrupt Rate");
278 
279 #ifdef IXL_FDIR
280 static int ixl_enable_fdir = 1;
281 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
282 /* Rate at which we sample */
283 int ixl_atr_rate = 20;
284 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
285 #endif
286 
287 
288 static char *ixl_fc_string[6] = {
289 	"None",
290 	"Rx",
291 	"Tx",
292 	"Full",
293 	"Priority",
294 	"Default"
295 };
296 
297 
298 /*********************************************************************
299  *  Device identification routine
300  *
301  *  ixl_probe determines if the driver should be loaded on
302  *  the hardware based on PCI vendor/device id of the device.
303  *
304  *  return BUS_PROBE_DEFAULT on success, positive on failure
305  *********************************************************************/
306 
307 static int
308 ixl_probe(device_t dev)
309 {
310 	ixl_vendor_info_t *ent;
311 
312 	u16	pci_vendor_id, pci_device_id;
313 	u16	pci_subvendor_id, pci_subdevice_id;
314 	char	device_name[256];
315 	static bool lock_init = FALSE;
316 
317 	INIT_DEBUGOUT("ixl_probe: begin");
318 
319 	pci_vendor_id = pci_get_vendor(dev);
320 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
321 		return (ENXIO);
322 
323 	pci_device_id = pci_get_device(dev);
324 	pci_subvendor_id = pci_get_subvendor(dev);
325 	pci_subdevice_id = pci_get_subdevice(dev);
326 
327 	ent = ixl_vendor_info_array;
328 	while (ent->vendor_id != 0) {
329 		if ((pci_vendor_id == ent->vendor_id) &&
330 		    (pci_device_id == ent->device_id) &&
331 
332 		    ((pci_subvendor_id == ent->subvendor_id) ||
333 		     (ent->subvendor_id == 0)) &&
334 
335 		    ((pci_subdevice_id == ent->subdevice_id) ||
336 		     (ent->subdevice_id == 0))) {
337 			sprintf(device_name, "%s, Version - %s",
338 				ixl_strings[ent->index],
339 				ixl_driver_version);
340 			device_set_desc_copy(dev, device_name);
341 			/* One shot mutex init */
342 			if (lock_init == FALSE) {
343 				lock_init = TRUE;
344 				mtx_init(&ixl_reset_mtx,
345 				    "ixl_reset",
346 				    "IXL RESET Lock", MTX_DEF);
347 			}
348 			return (BUS_PROBE_DEFAULT);
349 		}
350 		ent++;
351 	}
352 	return (ENXIO);
353 }
354 
355 /*********************************************************************
356  *  Device initialization routine
357  *
358  *  The attach entry point is called when the driver is being loaded.
359  *  This routine identifies the type of hardware, allocates all resources
360  *  and initializes the hardware.
361  *
362  *  return 0 on success, positive on failure
363  *********************************************************************/
364 
365 static int
366 ixl_attach(device_t dev)
367 {
368 	struct ixl_pf	*pf;
369 	struct i40e_hw	*hw;
370 	struct ixl_vsi *vsi;
371 	u16		bus;
372 	int             error = 0;
373 
374 	INIT_DEBUGOUT("ixl_attach: begin");
375 
376 	/* Allocate, clear, and link in our primary soft structure */
377 	pf = device_get_softc(dev);
378 	pf->dev = pf->osdep.dev = dev;
379 	hw = &pf->hw;
380 
381 	/*
382 	** Note this assumes we have a single embedded VSI,
383 	** this could be enhanced later to allocate multiple
384 	*/
385 	vsi = &pf->vsi;
386 	vsi->dev = pf->dev;
387 
388 	/* Core Lock Init*/
389 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
390 
391 	/* Set up the timer callout */
392 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
393 
394 	/* Set up sysctls */
395 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
396 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
398 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
399 
400 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
403 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
404 
405 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
408 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
409 
410 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
411 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
413 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
414 
415 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
416 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
418 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
419 
420 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
421 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
423 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
424 
425 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
426 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
427 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
428 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
429 
430 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
431 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
432 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
433 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
434 
435 #ifdef IXL_DEBUG_SYSCTL
436 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
437 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
438 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
439 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
440 
441 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
442 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
443 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
444 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
445 
446 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
449 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
450 
451 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
454 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
455 
456 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
459 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
460 
461 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463 	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
464 	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
465 #endif
466 
467 	/* Save off the PCI information */
468 	hw->vendor_id = pci_get_vendor(dev);
469 	hw->device_id = pci_get_device(dev);
470 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
471 	hw->subsystem_vendor_id =
472 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
473 	hw->subsystem_device_id =
474 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
475 
476 	hw->bus.device = pci_get_slot(dev);
477 	hw->bus.func = pci_get_function(dev);
478 
479 	/* Do PCI setup - map BAR0, etc */
480 	if (ixl_allocate_pci_resources(pf)) {
481 		device_printf(dev, "Allocation of PCI resources failed\n");
482 		error = ENXIO;
483 		goto err_out;
484 	}
485 
486 	/* Create for initial debugging use */
487 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
490 	    ixl_debug_info, "I", "Debug Information");
491 
492 
493 	/* Establish a clean starting point */
494 	i40e_clear_hw(hw);
495 	error = i40e_pf_reset(hw);
496 	if (error) {
497 		device_printf(dev,"PF reset failure %x\n", error);
498 		error = EIO;
499 		goto err_out;
500 	}
501 
502 	/* For now always do an initial CORE reset on first device */
503 	{
504 		static int	ixl_dev_count;
505 		static int	ixl_dev_track[32];
506 		u32		my_dev;
507 		int		i, found = FALSE;
508 		u16		bus = pci_get_bus(dev);
509 
510 		mtx_lock(&ixl_reset_mtx);
511 		my_dev = (bus << 8) | hw->bus.device;
512 
513 		for (i = 0; i < ixl_dev_count; i++) {
514 			if (ixl_dev_track[i] == my_dev)
515 				found = TRUE;
516 		}
517 
518                 if (!found) {
519                         u32 reg;
520 
521                         ixl_dev_track[ixl_dev_count] = my_dev;
522                         ixl_dev_count++;
523 
524 			INIT_DEBUGOUT("Initial CORE RESET\n");
525                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
526                         ixl_flush(hw);
527                         i = 50;
528                         do {
529 				i40e_msec_delay(50);
530                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
531                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
532                                         break;
533                         } while (i--);
534 
535                         /* paranoia */
536                         wr32(hw, I40E_PF_ATQLEN, 0);
537                         wr32(hw, I40E_PF_ATQBAL, 0);
538                         wr32(hw, I40E_PF_ATQBAH, 0);
539                         i40e_clear_pxe_mode(hw);
540                 }
541                 mtx_unlock(&ixl_reset_mtx);
542 	}
543 
544 	/* Set admin queue parameters */
545 	hw->aq.num_arq_entries = IXL_AQ_LEN;
546 	hw->aq.num_asq_entries = IXL_AQ_LEN;
547 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
548 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
549 
550 	/* Initialize the shared code */
551 	error = i40e_init_shared_code(hw);
552 	if (error) {
553 		device_printf(dev,"Unable to initialize the shared code\n");
554 		error = EIO;
555 		goto err_out;
556 	}
557 
558 	/* Set up the admin queue */
559 	error = i40e_init_adminq(hw);
560 	if (error) {
561 		device_printf(dev, "The driver for the device stopped "
562 		    "because the NVM image is newer than expected.\n"
563 		    "You must install the most recent version of "
564 		    " the network driver.\n");
565 		goto err_out;
566 	}
567 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
568 
569         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
570 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
571 		device_printf(dev, "The driver for the device detected "
572 		    "a newer version of the NVM image than expected.\n"
573 		    "Please install the most recent version of the network driver.\n");
574 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
575 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
576 		device_printf(dev, "The driver for the device detected "
577 		    "an older version of the NVM image than expected.\n"
578 		    "Please update the NVM image.\n");
579 
580 	/* Clear PXE mode */
581 	i40e_clear_pxe_mode(hw);
582 
583 	/* Get capabilities from the device */
584 	error = ixl_get_hw_capabilities(pf);
585 	if (error) {
586 		device_printf(dev, "HW capabilities failure!\n");
587 		goto err_get_cap;
588 	}
589 
590 	/* Set up host memory cache */
591 	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
592 	if (error) {
593 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
594 		goto err_get_cap;
595 	}
596 
597 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
598 	if (error) {
599 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
600 		goto err_mac_hmc;
601 	}
602 
603 	/* Disable LLDP from the firmware */
604 	i40e_aq_stop_lldp(hw, TRUE, NULL);
605 
606 	i40e_get_mac_addr(hw, hw->mac.addr);
607 	error = i40e_validate_mac_addr(hw->mac.addr);
608 	if (error) {
609 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
610 		goto err_mac_hmc;
611 	}
612 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
613 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
614 
615 	/* Set up VSI and queues */
616 	if (ixl_setup_stations(pf) != 0) {
617 		device_printf(dev, "setup stations failed!\n");
618 		error = ENOMEM;
619 		goto err_mac_hmc;
620 	}
621 
622 	/* Initialize mac filter list for VSI */
623 	SLIST_INIT(&vsi->ftl);
624 
625 	/* Set up interrupt routing here */
626 	if (pf->msix > 1)
627 		error = ixl_assign_vsi_msix(pf);
628 	else
629 		error = ixl_assign_vsi_legacy(pf);
630 	if (error)
631 		goto err_late;
632 
633 	i40e_msec_delay(75);
634 	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
635 	if (error) {
636 		device_printf(dev, "link restart failed, aq_err=%d\n",
637 		    pf->hw.aq.asq_last_status);
638 	}
639 
640 	/* Determine link state */
641 	vsi->link_up = ixl_config_link(hw);
642 
643 	/* Report if Unqualified modules are found */
644 	if ((vsi->link_up == FALSE) &&
645 	    (pf->hw.phy.link_info.link_info &
646 	    I40E_AQ_MEDIA_AVAILABLE) &&
647 	    (!(pf->hw.phy.link_info.an_info &
648 	    I40E_AQ_QUALIFIED_MODULE)))
649 		device_printf(dev, "Link failed because "
650 		    "an unqualified module was detected\n");
651 
652 	/* Setup OS specific network interface */
653 	if (ixl_setup_interface(dev, vsi) != 0) {
654 		device_printf(dev, "interface setup failed!\n");
655 		error = EIO;
656 		goto err_late;
657 	}
658 
659 	/* Get the bus configuration and set the shared code */
660 	bus = ixl_get_bus_info(hw, dev);
661 	i40e_set_pci_config_data(hw, bus);
662 
663 	/* Initialize statistics */
664 	ixl_pf_reset_stats(pf);
665 	ixl_update_stats_counters(pf);
666 	ixl_add_hw_stats(pf);
667 
668 	/* Reset port's advertised speeds */
669 	if (!i40e_is_40G_device(hw->device_id)) {
670 		pf->advertised_speed =
671 		    (hw->device_id == I40E_DEV_ID_10G_BASE_T) ? 0x7 : 0x6;
672 		ixl_set_advertised_speeds(pf, pf->advertised_speed);
673 	}
674 
675 	/* Register for VLAN events */
676 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
677 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
678 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
679 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
680 
681 
682 	INIT_DEBUGOUT("ixl_attach: end");
683 	return (0);
684 
685 err_late:
686 	if (vsi->ifp != NULL)
687 		if_free(vsi->ifp);
688 err_mac_hmc:
689 	i40e_shutdown_lan_hmc(hw);
690 err_get_cap:
691 	i40e_shutdown_adminq(hw);
692 err_out:
693 	ixl_free_pci_resources(pf);
694 	ixl_free_vsi(vsi);
695 	IXL_PF_LOCK_DESTROY(pf);
696 	return (error);
697 }
698 
699 /*********************************************************************
700  *  Device removal routine
701  *
702  *  The detach entry point is called when the driver is being removed.
703  *  This routine stops the adapter and deallocates all the resources
704  *  that were allocated for driver operation.
705  *
706  *  return 0 on success, positive on failure
707  *********************************************************************/
708 
709 static int
710 ixl_detach(device_t dev)
711 {
712 	struct ixl_pf		*pf = device_get_softc(dev);
713 	struct i40e_hw		*hw = &pf->hw;
714 	struct ixl_vsi		*vsi = &pf->vsi;
715 	struct ixl_queue	*que = vsi->queues;
716 	i40e_status		status;
717 
718 	INIT_DEBUGOUT("ixl_detach: begin");
719 
720 	/* Make sure VLANS are not using driver */
721 	if (vsi->ifp->if_vlantrunk != NULL) {
722 		device_printf(dev,"Vlan in use, detach first\n");
723 		return (EBUSY);
724 	}
725 
726 	IXL_PF_LOCK(pf);
727 	ixl_stop(pf);
728 	IXL_PF_UNLOCK(pf);
729 
730 	for (int i = 0; i < vsi->num_queues; i++, que++) {
731 		if (que->tq) {
732 			taskqueue_drain(que->tq, &que->task);
733 			taskqueue_drain(que->tq, &que->tx_task);
734 			taskqueue_free(que->tq);
735 		}
736 	}
737 
738 	/* Shutdown LAN HMC */
739 	status = i40e_shutdown_lan_hmc(hw);
740 	if (status)
741 		device_printf(dev,
742 		    "Shutdown LAN HMC failed with code %d\n", status);
743 
744 	/* Shutdown admin queue */
745 	status = i40e_shutdown_adminq(hw);
746 	if (status)
747 		device_printf(dev,
748 		    "Shutdown Admin queue failed with code %d\n", status);
749 
750 	/* Unregister VLAN events */
751 	if (vsi->vlan_attach != NULL)
752 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
753 	if (vsi->vlan_detach != NULL)
754 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
755 
756 	ether_ifdetach(vsi->ifp);
757 	callout_drain(&pf->timer);
758 
759 
760 	ixl_free_pci_resources(pf);
761 	bus_generic_detach(dev);
762 	if_free(vsi->ifp);
763 	ixl_free_vsi(vsi);
764 	IXL_PF_LOCK_DESTROY(pf);
765 	return (0);
766 }
767 
768 /*********************************************************************
769  *
770  *  Shutdown entry point
771  *
772  **********************************************************************/
773 
774 static int
775 ixl_shutdown(device_t dev)
776 {
777 	struct ixl_pf *pf = device_get_softc(dev);
778 	IXL_PF_LOCK(pf);
779 	ixl_stop(pf);
780 	IXL_PF_UNLOCK(pf);
781 	return (0);
782 }
783 
784 
785 /*********************************************************************
786  *
787  *  Get the hardware capabilities
788  *
789  **********************************************************************/
790 
791 static int
792 ixl_get_hw_capabilities(struct ixl_pf *pf)
793 {
794 	struct i40e_aqc_list_capabilities_element_resp *buf;
795 	struct i40e_hw	*hw = &pf->hw;
796 	device_t 	dev = pf->dev;
797 	int             error, len;
798 	u16		needed;
799 	bool		again = TRUE;
800 
801 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
802 retry:
803 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
804 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
805 		device_printf(dev, "Unable to allocate cap memory\n");
806                 return (ENOMEM);
807 	}
808 
809 	/* This populates the hw struct */
810         error = i40e_aq_discover_capabilities(hw, buf, len,
811 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
812 	free(buf, M_DEVBUF);
813 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
814 	    (again == TRUE)) {
815 		/* retry once with a larger buffer */
816 		again = FALSE;
817 		len = needed;
818 		goto retry;
819 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
820 		device_printf(dev, "capability discovery failed: %d\n",
821 		    pf->hw.aq.asq_last_status);
822 		return (ENODEV);
823 	}
824 
825 	/* Capture this PF's starting queue pair */
826 	pf->qbase = hw->func_caps.base_queue;
827 
828 #ifdef IXL_DEBUG
829 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
830 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
831 	    hw->pf_id, hw->func_caps.num_vfs,
832 	    hw->func_caps.num_msix_vectors,
833 	    hw->func_caps.num_msix_vectors_vf,
834 	    hw->func_caps.fd_filters_guaranteed,
835 	    hw->func_caps.fd_filters_best_effort,
836 	    hw->func_caps.num_tx_qp,
837 	    hw->func_caps.num_rx_qp,
838 	    hw->func_caps.base_queue);
839 #endif
840 	return (error);
841 }
842 
843 static void
844 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
845 {
846 	device_t 	dev = vsi->dev;
847 
848 	/* Enable/disable TXCSUM/TSO4 */
849 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
850 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
851 		if (mask & IFCAP_TXCSUM) {
852 			ifp->if_capenable |= IFCAP_TXCSUM;
853 			/* enable TXCSUM, restore TSO if previously enabled */
854 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
855 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
856 				ifp->if_capenable |= IFCAP_TSO4;
857 			}
858 		}
859 		else if (mask & IFCAP_TSO4) {
860 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
861 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
862 			device_printf(dev,
863 			    "TSO4 requires txcsum, enabling both...\n");
864 		}
865 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
866 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
867 		if (mask & IFCAP_TXCSUM)
868 			ifp->if_capenable &= ~IFCAP_TXCSUM;
869 		else if (mask & IFCAP_TSO4)
870 			ifp->if_capenable |= IFCAP_TSO4;
871 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
872 	    && (ifp->if_capenable & IFCAP_TSO4)) {
873 		if (mask & IFCAP_TXCSUM) {
874 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
875 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
876 			device_printf(dev,
877 			    "TSO4 requires txcsum, disabling both...\n");
878 		} else if (mask & IFCAP_TSO4)
879 			ifp->if_capenable &= ~IFCAP_TSO4;
880 	}
881 
882 	/* Enable/disable TXCSUM_IPV6/TSO6 */
883 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
884 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
885 		if (mask & IFCAP_TXCSUM_IPV6) {
886 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
887 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
888 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
889 				ifp->if_capenable |= IFCAP_TSO6;
890 			}
891 		} else if (mask & IFCAP_TSO6) {
892 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
893 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
894 			device_printf(dev,
895 			    "TSO6 requires txcsum6, enabling both...\n");
896 		}
897 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
898 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
899 		if (mask & IFCAP_TXCSUM_IPV6)
900 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
901 		else if (mask & IFCAP_TSO6)
902 			ifp->if_capenable |= IFCAP_TSO6;
903 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
904 	    && (ifp->if_capenable & IFCAP_TSO6)) {
905 		if (mask & IFCAP_TXCSUM_IPV6) {
906 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
907 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
908 			device_printf(dev,
909 			    "TSO6 requires txcsum6, disabling both...\n");
910 		} else if (mask & IFCAP_TSO6)
911 			ifp->if_capenable &= ~IFCAP_TSO6;
912 	}
913 }
914 
915 /*********************************************************************
916  *  Ioctl entry point
917  *
918  *  ixl_ioctl is called when the user wants to configure the
919  *  interface.
920  *
921  *  return 0 on success, positive on failure
922  **********************************************************************/
923 
924 static int
925 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
926 {
927 	struct ixl_vsi	*vsi = ifp->if_softc;
928 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
929 	struct ifreq	*ifr = (struct ifreq *) data;
930 #if defined(INET) || defined(INET6)
931 	struct ifaddr *ifa = (struct ifaddr *)data;
932 	bool		avoid_reset = FALSE;
933 #endif
934 	int             error = 0;
935 
936 	switch (command) {
937 
938         case SIOCSIFADDR:
939 #ifdef INET
940 		if (ifa->ifa_addr->sa_family == AF_INET)
941 			avoid_reset = TRUE;
942 #endif
943 #ifdef INET6
944 		if (ifa->ifa_addr->sa_family == AF_INET6)
945 			avoid_reset = TRUE;
946 #endif
947 #if defined(INET) || defined(INET6)
948 		/*
949 		** Calling init results in link renegotiation,
950 		** so we avoid doing it when possible.
951 		*/
952 		if (avoid_reset) {
953 			ifp->if_flags |= IFF_UP;
954 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
955 				ixl_init(pf);
956 #ifdef INET
957 			if (!(ifp->if_flags & IFF_NOARP))
958 				arp_ifinit(ifp, ifa);
959 #endif
960 		} else
961 			error = ether_ioctl(ifp, command, data);
962 		break;
963 #endif
964 	case SIOCSIFMTU:
965 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
966 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
967 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
968 			error = EINVAL;
969 		} else {
970 			IXL_PF_LOCK(pf);
971 			ifp->if_mtu = ifr->ifr_mtu;
972 			vsi->max_frame_size =
973 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
974 			    + ETHER_VLAN_ENCAP_LEN;
975 			ixl_init_locked(pf);
976 			IXL_PF_UNLOCK(pf);
977 		}
978 		break;
979 	case SIOCSIFFLAGS:
980 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
981 		IXL_PF_LOCK(pf);
982 		if (ifp->if_flags & IFF_UP) {
983 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
984 				if ((ifp->if_flags ^ pf->if_flags) &
985 				    (IFF_PROMISC | IFF_ALLMULTI)) {
986 					ixl_set_promisc(vsi);
987 				}
988 			} else
989 				ixl_init_locked(pf);
990 		} else
991 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
992 				ixl_stop(pf);
993 		pf->if_flags = ifp->if_flags;
994 		IXL_PF_UNLOCK(pf);
995 		break;
996 	case SIOCADDMULTI:
997 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
998 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
999 			IXL_PF_LOCK(pf);
1000 			ixl_disable_intr(vsi);
1001 			ixl_add_multi(vsi);
1002 			ixl_enable_intr(vsi);
1003 			IXL_PF_UNLOCK(pf);
1004 		}
1005 		break;
1006 	case SIOCDELMULTI:
1007 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1008 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1009 			IXL_PF_LOCK(pf);
1010 			ixl_disable_intr(vsi);
1011 			ixl_del_multi(vsi);
1012 			ixl_enable_intr(vsi);
1013 			IXL_PF_UNLOCK(pf);
1014 		}
1015 		break;
1016 	case SIOCSIFMEDIA:
1017 	case SIOCGIFMEDIA:
1018 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1019 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1020 		break;
1021 	case SIOCSIFCAP:
1022 	{
1023 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1024 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1025 
1026 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1027 
1028 		if (mask & IFCAP_RXCSUM)
1029 			ifp->if_capenable ^= IFCAP_RXCSUM;
1030 		if (mask & IFCAP_RXCSUM_IPV6)
1031 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1032 		if (mask & IFCAP_LRO)
1033 			ifp->if_capenable ^= IFCAP_LRO;
1034 		if (mask & IFCAP_VLAN_HWTAGGING)
1035 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1036 		if (mask & IFCAP_VLAN_HWFILTER)
1037 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1038 		if (mask & IFCAP_VLAN_HWTSO)
1039 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1040 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1041 			IXL_PF_LOCK(pf);
1042 			ixl_init_locked(pf);
1043 			IXL_PF_UNLOCK(pf);
1044 		}
1045 		VLAN_CAPABILITIES(ifp);
1046 
1047 		break;
1048 	}
1049 
1050 	default:
1051 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1052 		error = ether_ioctl(ifp, command, data);
1053 		break;
1054 	}
1055 
1056 	return (error);
1057 }
1058 
1059 
1060 /*********************************************************************
1061  *  Init entry point
1062  *
1063  *  This routine is used in two ways. It is used by the stack as
1064  *  init entry point in network interface structure. It is also used
1065  *  by the driver as a hw/sw initialization routine to get to a
1066  *  consistent state.
1067  *
1068  *  return 0 on success, positive on failure
1069  **********************************************************************/
1070 
1071 static void
1072 ixl_init_locked(struct ixl_pf *pf)
1073 {
1074 	struct i40e_hw	*hw = &pf->hw;
1075 	struct ixl_vsi	*vsi = &pf->vsi;
1076 	struct ifnet	*ifp = vsi->ifp;
1077 	device_t 	dev = pf->dev;
1078 	struct i40e_filter_control_settings	filter;
1079 	u8		tmpaddr[ETHER_ADDR_LEN];
1080 	int		ret;
1081 
1082 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1083 	INIT_DEBUGOUT("ixl_init: begin");
1084 	ixl_stop(pf);
1085 
1086 	/* Get the latest mac address... User might use a LAA */
1087 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1088 	      I40E_ETH_LENGTH_OF_ADDRESS);
1089 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1090 	    i40e_validate_mac_addr(tmpaddr)) {
1091 		bcopy(tmpaddr, hw->mac.addr,
1092 		    I40E_ETH_LENGTH_OF_ADDRESS);
1093 		ret = i40e_aq_mac_address_write(hw,
1094 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1095 		    hw->mac.addr, NULL);
1096 		if (ret) {
1097 			device_printf(dev, "LLA address"
1098 			 "change failed!!\n");
1099 			return;
1100 		}
1101 	}
1102 
1103 	/* Set the various hardware offload abilities */
1104 	ifp->if_hwassist = 0;
1105 	if (ifp->if_capenable & IFCAP_TSO)
1106 		ifp->if_hwassist |= CSUM_TSO;
1107 	if (ifp->if_capenable & IFCAP_TXCSUM)
1108 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1109 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1110 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1111 
1112 	/* Set up the device filtering */
1113 	bzero(&filter, sizeof(filter));
1114 	filter.enable_ethtype = TRUE;
1115 	filter.enable_macvlan = TRUE;
1116 #ifdef IXL_FDIR
1117 	filter.enable_fdir = TRUE;
1118 #endif
1119 	if (i40e_set_filter_control(hw, &filter))
1120 		device_printf(dev, "set_filter_control() failed\n");
1121 
1122 	/* Set up RSS */
1123 	ixl_config_rss(vsi);
1124 
1125 	/* Setup the VSI */
1126 	ixl_setup_vsi(vsi);
1127 
1128 	/*
1129 	** Prepare the rings, hmc contexts, etc...
1130 	*/
1131 	if (ixl_initialize_vsi(vsi)) {
1132 		device_printf(dev, "initialize vsi failed!!\n");
1133 		return;
1134 	}
1135 
1136 	/* Add protocol filters to list */
1137 	ixl_init_filters(vsi);
1138 
1139 	/* Setup vlan's if needed */
1140 	ixl_setup_vlan_filters(vsi);
1141 
1142 	/* Start the local timer */
1143 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1144 
1145 	/* Set up MSI/X routing and the ITR settings */
1146 	if (ixl_enable_msix) {
1147 		ixl_configure_msix(pf);
1148 		ixl_configure_itr(pf);
1149 	} else
1150 		ixl_configure_legacy(pf);
1151 
1152 	ixl_enable_rings(vsi);
1153 
1154 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1155 
1156 	/* Set MTU in hardware*/
1157 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1158 	    TRUE, 0, NULL);
1159 	if (aq_error)
1160 		device_printf(vsi->dev,
1161 			"aq_set_mac_config in init error, code %d\n",
1162 		    aq_error);
1163 
1164 	/* And now turn on interrupts */
1165 	ixl_enable_intr(vsi);
1166 
1167 	/* Now inform the stack we're ready */
1168 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1169 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1170 
1171 	return;
1172 }
1173 
1174 static void
1175 ixl_init(void *arg)
1176 {
1177 	struct ixl_pf *pf = arg;
1178 
1179 	IXL_PF_LOCK(pf);
1180 	ixl_init_locked(pf);
1181 	IXL_PF_UNLOCK(pf);
1182 	return;
1183 }
1184 
1185 /*
1186 **
1187 ** MSIX Interrupt Handlers and Tasklets
1188 **
1189 */
1190 static void
1191 ixl_handle_que(void *context, int pending)
1192 {
1193 	struct ixl_queue *que = context;
1194 	struct ixl_vsi *vsi = que->vsi;
1195 	struct i40e_hw  *hw = vsi->hw;
1196 	struct tx_ring  *txr = &que->txr;
1197 	struct ifnet    *ifp = vsi->ifp;
1198 	bool		more;
1199 
1200 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1201 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1202 		IXL_TX_LOCK(txr);
1203 		ixl_txeof(que);
1204 		if (!drbr_empty(ifp, txr->br))
1205 			ixl_mq_start_locked(ifp, txr);
1206 		IXL_TX_UNLOCK(txr);
1207 		if (more) {
1208 			taskqueue_enqueue(que->tq, &que->task);
1209 			return;
1210 		}
1211 	}
1212 
1213 	/* Reenable this interrupt - hmmm */
1214 	ixl_enable_queue(hw, que->me);
1215 	return;
1216 }
1217 
1218 
1219 /*********************************************************************
1220  *
1221  *  Legacy Interrupt Service routine
1222  *
1223  **********************************************************************/
1224 void
1225 ixl_intr(void *arg)
1226 {
1227 	struct ixl_pf		*pf = arg;
1228 	struct i40e_hw		*hw =  &pf->hw;
1229 	struct ixl_vsi		*vsi = &pf->vsi;
1230 	struct ixl_queue	*que = vsi->queues;
1231 	struct ifnet		*ifp = vsi->ifp;
1232 	struct tx_ring		*txr = &que->txr;
1233         u32			reg, icr0, mask;
1234 	bool			more_tx, more_rx;
1235 
1236 	++que->irqs;
1237 
1238 	/* Protect against spurious interrupts */
1239 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1240 		return;
1241 
1242 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1243 
1244 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1245 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1246 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1247 
1248         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1249 
1250 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1251 		taskqueue_enqueue(pf->tq, &pf->adminq);
1252 		return;
1253 	}
1254 
1255 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1256 
1257 	IXL_TX_LOCK(txr);
1258 	more_tx = ixl_txeof(que);
1259 	if (!drbr_empty(vsi->ifp, txr->br))
1260 		more_tx = 1;
1261 	IXL_TX_UNLOCK(txr);
1262 
1263 	/* re-enable other interrupt causes */
1264 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1265 
1266 	/* And now the queues */
1267 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1268 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1269 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1270 
1271 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1272 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1273 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1274 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1275 
1276 	ixl_enable_legacy(hw);
1277 
1278 	return;
1279 }
1280 
1281 
1282 /*********************************************************************
1283  *
1284  *  MSIX VSI Interrupt Service routine
1285  *
1286  **********************************************************************/
1287 void
1288 ixl_msix_que(void *arg)
1289 {
1290 	struct ixl_queue	*que = arg;
1291 	struct ixl_vsi	*vsi = que->vsi;
1292 	struct i40e_hw	*hw = vsi->hw;
1293 	struct tx_ring	*txr = &que->txr;
1294 	bool		more_tx, more_rx;
1295 
1296 	/* Protect against spurious interrupts */
1297 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1298 		return;
1299 
1300 	++que->irqs;
1301 
1302 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1303 
1304 	IXL_TX_LOCK(txr);
1305 	more_tx = ixl_txeof(que);
1306 	/*
1307 	** Make certain that if the stack
1308 	** has anything queued the task gets
1309 	** scheduled to handle it.
1310 	*/
1311 	if (!drbr_empty(vsi->ifp, txr->br))
1312 		more_tx = 1;
1313 	IXL_TX_UNLOCK(txr);
1314 
1315 	ixl_set_queue_rx_itr(que);
1316 	ixl_set_queue_tx_itr(que);
1317 
1318 	if (more_tx || more_rx)
1319 		taskqueue_enqueue(que->tq, &que->task);
1320 	else
1321 		ixl_enable_queue(hw, que->me);
1322 
1323 	return;
1324 }
1325 
1326 
1327 /*********************************************************************
1328  *
1329  *  MSIX Admin Queue Interrupt Service routine
1330  *
1331  **********************************************************************/
1332 static void
1333 ixl_msix_adminq(void *arg)
1334 {
1335 	struct ixl_pf	*pf = arg;
1336 	struct i40e_hw	*hw = &pf->hw;
1337 	u32		reg, mask;
1338 
1339 	++pf->admin_irq;
1340 
1341 	reg = rd32(hw, I40E_PFINT_ICR0);
1342 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1343 
1344 	/* Check on the cause */
1345 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1346 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1347 
1348 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1349 		ixl_handle_mdd_event(pf);
1350 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1351 	}
1352 
1353 	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1354 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1355 
1356 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1357 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1358 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1359 
1360 	taskqueue_enqueue(pf->tq, &pf->adminq);
1361 	return;
1362 }
1363 
1364 /*********************************************************************
1365  *
1366  *  Media Ioctl callback
1367  *
1368  *  This routine is called whenever the user queries the status of
1369  *  the interface using ifconfig.
1370  *
1371  **********************************************************************/
1372 static void
1373 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1374 {
1375 	struct ixl_vsi	*vsi = ifp->if_softc;
1376 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1377 	struct i40e_hw  *hw = &pf->hw;
1378 
1379 	INIT_DEBUGOUT("ixl_media_status: begin");
1380 	IXL_PF_LOCK(pf);
1381 
1382 	ixl_update_link_status(pf);
1383 
1384 	ifmr->ifm_status = IFM_AVALID;
1385 	ifmr->ifm_active = IFM_ETHER;
1386 
1387 	if (!vsi->link_up) {
1388 		IXL_PF_UNLOCK(pf);
1389 		return;
1390 	}
1391 
1392 	ifmr->ifm_status |= IFM_ACTIVE;
1393 	/* Hardware is always full-duplex */
1394 	ifmr->ifm_active |= IFM_FDX;
1395 
1396 	switch (hw->phy.link_info.phy_type) {
1397 		/* 100 M */
1398 		case I40E_PHY_TYPE_100BASE_TX:
1399 			ifmr->ifm_active |= IFM_100_TX;
1400 			break;
1401 		/* 1 G */
1402 		case I40E_PHY_TYPE_1000BASE_T:
1403 			ifmr->ifm_active |= IFM_1000_T;
1404 			break;
1405 		case I40E_PHY_TYPE_1000BASE_SX:
1406 			ifmr->ifm_active |= IFM_1000_SX;
1407 			break;
1408 		case I40E_PHY_TYPE_1000BASE_LX:
1409 			ifmr->ifm_active |= IFM_1000_LX;
1410 			break;
1411 		/* 10 G */
1412 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1413 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1414 			ifmr->ifm_active |= IFM_10G_TWINAX;
1415 			break;
1416 		case I40E_PHY_TYPE_10GBASE_KR:
1417 			/*
1418 			** this is not technically correct
1419 			** but FreeBSD does not have the media
1420 			** type defined yet, so its a compromise.
1421 			*/
1422 		case I40E_PHY_TYPE_10GBASE_SR:
1423 			ifmr->ifm_active |= IFM_10G_SR;
1424 			break;
1425 		case I40E_PHY_TYPE_10GBASE_LR:
1426 			ifmr->ifm_active |= IFM_10G_LR;
1427 			break;
1428 		case I40E_PHY_TYPE_10GBASE_T:
1429 			ifmr->ifm_active |= IFM_10G_T;
1430 			break;
1431 		/* 40 G */
1432 		case I40E_PHY_TYPE_40GBASE_CR4:
1433 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1434 			ifmr->ifm_active |= IFM_40G_CR4;
1435 			break;
1436 		case I40E_PHY_TYPE_40GBASE_SR4:
1437 			ifmr->ifm_active |= IFM_40G_SR4;
1438 			break;
1439 		case I40E_PHY_TYPE_40GBASE_LR4:
1440 			ifmr->ifm_active |= IFM_40G_LR4;
1441 			break;
1442 		default:
1443 			ifmr->ifm_active |= IFM_UNKNOWN;
1444 			break;
1445 	}
1446 	/* Report flow control status as well */
1447 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1448 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1449 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1450 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1451 
1452 	IXL_PF_UNLOCK(pf);
1453 
1454 	return;
1455 }
1456 
1457 /*********************************************************************
1458  *
1459  *  Media Ioctl callback
1460  *
1461  *  This routine is called when the user changes speed/duplex using
1462  *  media/mediopt option with ifconfig.
1463  *
1464  **********************************************************************/
1465 static int
1466 ixl_media_change(struct ifnet * ifp)
1467 {
1468 	struct ixl_vsi *vsi = ifp->if_softc;
1469 	struct ifmedia *ifm = &vsi->media;
1470 
1471 	INIT_DEBUGOUT("ixl_media_change: begin");
1472 
1473 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1474 		return (EINVAL);
1475 
1476 	if_printf(ifp, "Media change is currently not supported.\n");
1477 
1478 	return (ENODEV);
1479 }
1480 
1481 
1482 #ifdef IXL_FDIR
1483 /*
1484 ** ATR: Application Targetted Receive - creates a filter
1485 **	based on TX flow info that will keep the receive
1486 **	portion of the flow on the same queue. Based on the
1487 **	implementation this is only available for TCP connections
1488 */
1489 void
1490 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1491 {
1492 	struct ixl_vsi			*vsi = que->vsi;
1493 	struct tx_ring			*txr = &que->txr;
1494 	struct i40e_filter_program_desc	*FDIR;
1495 	u32				ptype, dtype;
1496 	int				idx;
1497 
1498 	/* check if ATR is enabled and sample rate */
1499 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1500 		return;
1501 	/*
1502 	** We sample all TCP SYN/FIN packets,
1503 	** or at the selected sample rate
1504 	*/
1505 	txr->atr_count++;
1506 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1507 	    (txr->atr_count < txr->atr_rate))
1508                 return;
1509 	txr->atr_count = 0;
1510 
1511 	/* Get a descriptor to use */
1512 	idx = txr->next_avail;
1513 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1514 	if (++idx == que->num_desc)
1515 		idx = 0;
1516 	txr->avail--;
1517 	txr->next_avail = idx;
1518 
1519 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1520 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1521 
1522 	ptype |= (etype == ETHERTYPE_IP) ?
1523 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1524 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1525 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1526 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1527 
1528 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1529 
1530 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1531 
1532 	/*
1533 	** We use the TCP TH_FIN as a trigger to remove
1534 	** the filter, otherwise its an update.
1535 	*/
1536 	dtype |= (th->th_flags & TH_FIN) ?
1537 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1538 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1539 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1540 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1541 
1542 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1543 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1544 
1545 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1546 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1547 
1548 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1549 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1550 	return;
1551 }
1552 #endif
1553 
1554 
1555 static void
1556 ixl_set_promisc(struct ixl_vsi *vsi)
1557 {
1558 	struct ifnet	*ifp = vsi->ifp;
1559 	struct i40e_hw	*hw = vsi->hw;
1560 	int		err, mcnt = 0;
1561 	bool		uni = FALSE, multi = FALSE;
1562 
1563 	if (ifp->if_flags & IFF_ALLMULTI)
1564                 multi = TRUE;
1565 	else { /* Need to count the multicast addresses */
1566 		struct  ifmultiaddr *ifma;
1567 		if_maddr_rlock(ifp);
1568 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1569                         if (ifma->ifma_addr->sa_family != AF_LINK)
1570                                 continue;
1571                         if (mcnt == MAX_MULTICAST_ADDR)
1572                                 break;
1573                         mcnt++;
1574 		}
1575 		if_maddr_runlock(ifp);
1576 	}
1577 
1578 	if (mcnt >= MAX_MULTICAST_ADDR)
1579                 multi = TRUE;
1580         if (ifp->if_flags & IFF_PROMISC)
1581 		uni = TRUE;
1582 
1583 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1584 	    vsi->seid, uni, NULL);
1585 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1586 	    vsi->seid, multi, NULL);
1587 	return;
1588 }
1589 
1590 /*********************************************************************
1591  * 	Filter Routines
1592  *
1593  *	Routines for multicast and vlan filter management.
1594  *
1595  *********************************************************************/
1596 static void
1597 ixl_add_multi(struct ixl_vsi *vsi)
1598 {
1599 	struct	ifmultiaddr	*ifma;
1600 	struct ifnet		*ifp = vsi->ifp;
1601 	struct i40e_hw		*hw = vsi->hw;
1602 	int			mcnt = 0, flags;
1603 
1604 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1605 
1606 	if_maddr_rlock(ifp);
1607 	/*
1608 	** First just get a count, to decide if we
1609 	** we simply use multicast promiscuous.
1610 	*/
1611 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1612 		if (ifma->ifma_addr->sa_family != AF_LINK)
1613 			continue;
1614 		mcnt++;
1615 	}
1616 	if_maddr_runlock(ifp);
1617 
1618 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1619 		/* delete existing MC filters */
1620 		ixl_del_hw_filters(vsi, mcnt);
1621 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1622 		    vsi->seid, TRUE, NULL);
1623 		return;
1624 	}
1625 
1626 	mcnt = 0;
1627 	if_maddr_rlock(ifp);
1628 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1629 		if (ifma->ifma_addr->sa_family != AF_LINK)
1630 			continue;
1631 		ixl_add_mc_filter(vsi,
1632 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1633 		mcnt++;
1634 	}
1635 	if_maddr_runlock(ifp);
1636 	if (mcnt > 0) {
1637 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1638 		ixl_add_hw_filters(vsi, flags, mcnt);
1639 	}
1640 
1641 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1642 	return;
1643 }
1644 
1645 static void
1646 ixl_del_multi(struct ixl_vsi *vsi)
1647 {
1648 	struct ifnet		*ifp = vsi->ifp;
1649 	struct ifmultiaddr	*ifma;
1650 	struct ixl_mac_filter	*f;
1651 	int			mcnt = 0;
1652 	bool		match = FALSE;
1653 
1654 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1655 
1656 	/* Search for removed multicast addresses */
1657 	if_maddr_rlock(ifp);
1658 	SLIST_FOREACH(f, &vsi->ftl, next) {
1659 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1660 			match = FALSE;
1661 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1662 				if (ifma->ifma_addr->sa_family != AF_LINK)
1663 					continue;
1664 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1665 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1666 					match = TRUE;
1667 					break;
1668 				}
1669 			}
1670 			if (match == FALSE) {
1671 				f->flags |= IXL_FILTER_DEL;
1672 				mcnt++;
1673 			}
1674 		}
1675 	}
1676 	if_maddr_runlock(ifp);
1677 
1678 	if (mcnt > 0)
1679 		ixl_del_hw_filters(vsi, mcnt);
1680 }
1681 
1682 
1683 /*********************************************************************
1684  *  Timer routine
1685  *
1686  *  This routine checks for link status,updates statistics,
1687  *  and runs the watchdog check.
1688  *
1689  **********************************************************************/
1690 
1691 static void
1692 ixl_local_timer(void *arg)
1693 {
1694 	struct ixl_pf		*pf = arg;
1695 	struct i40e_hw		*hw = &pf->hw;
1696 	struct ixl_vsi		*vsi = &pf->vsi;
1697 	struct ixl_queue	*que = vsi->queues;
1698 	device_t		dev = pf->dev;
1699 	int			hung = 0;
1700 	u32			mask;
1701 
1702 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1703 
1704 	/* Fire off the adminq task */
1705 	taskqueue_enqueue(pf->tq, &pf->adminq);
1706 
1707 	/* Update stats */
1708 	ixl_update_stats_counters(pf);
1709 
1710 	/*
1711 	** Check status of the queues
1712 	*/
1713 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1714 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1715 
1716 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1717 		/* Any queues with outstanding work get a sw irq */
1718 		if (que->busy)
1719 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1720 		/*
1721 		** Each time txeof runs without cleaning, but there
1722 		** are uncleaned descriptors it increments busy. If
1723 		** we get to 5 we declare it hung.
1724 		*/
1725 		if (que->busy == IXL_QUEUE_HUNG) {
1726 			++hung;
1727 			/* Mark the queue as inactive */
1728 			vsi->active_queues &= ~((u64)1 << que->me);
1729 			continue;
1730 		} else {
1731 			/* Check if we've come back from hung */
1732 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1733 				vsi->active_queues |= ((u64)1 << que->me);
1734 		}
1735 		if (que->busy >= IXL_MAX_TX_BUSY) {
1736 #ifdef IXL_DEBUG
1737 			device_printf(dev,"Warning queue %d "
1738 			    "appears to be hung!\n", i);
1739 #endif
1740 			que->busy = IXL_QUEUE_HUNG;
1741 			++hung;
1742 		}
1743 	}
1744 	/* Only reinit if all queues show hung */
1745 	if (hung == vsi->num_queues)
1746 		goto hung;
1747 
1748 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1749 	return;
1750 
1751 hung:
1752 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1753 	ixl_init_locked(pf);
1754 }
1755 
1756 /*
1757 ** Note: this routine updates the OS on the link state
1758 **	the real check of the hardware only happens with
1759 **	a link interrupt.
1760 */
1761 static void
1762 ixl_update_link_status(struct ixl_pf *pf)
1763 {
1764 	struct ixl_vsi		*vsi = &pf->vsi;
1765 	struct i40e_hw		*hw = &pf->hw;
1766 	struct ifnet		*ifp = vsi->ifp;
1767 	device_t		dev = pf->dev;
1768 	enum i40e_fc_mode 	fc;
1769 
1770 
1771 	if (vsi->link_up){
1772 		if (vsi->link_active == FALSE) {
1773 			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1774 			if (bootverbose) {
1775 				fc = hw->fc.current_mode;
1776 				device_printf(dev,"Link is up %d Gbps %s,"
1777 				    " Flow Control: %s\n",
1778 				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1779 				    "Full Duplex", ixl_fc_string[fc]);
1780 			}
1781 			vsi->link_active = TRUE;
1782 			/*
1783 			** Warn user if link speed on NPAR enabled
1784 			** partition is not at least 10GB
1785 			*/
1786 			if (hw->func_caps.npar_enable &&
1787 			   (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1788 			   hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1789 				device_printf(dev, "The partition detected link"
1790 				    "speed that is less than 10Gbps\n");
1791 			if_link_state_change(ifp, LINK_STATE_UP);
1792 		}
1793 	} else { /* Link down */
1794 		if (vsi->link_active == TRUE) {
1795 			if (bootverbose)
1796 				device_printf(dev,"Link is Down\n");
1797 			if_link_state_change(ifp, LINK_STATE_DOWN);
1798 			vsi->link_active = FALSE;
1799 		}
1800 	}
1801 
1802 	return;
1803 }
1804 
1805 /*********************************************************************
1806  *
1807  *  This routine disables all traffic on the adapter by issuing a
1808  *  global reset on the MAC and deallocates TX/RX buffers.
1809  *
1810  **********************************************************************/
1811 
1812 static void
1813 ixl_stop(struct ixl_pf *pf)
1814 {
1815 	struct ixl_vsi	*vsi = &pf->vsi;
1816 	struct ifnet	*ifp = vsi->ifp;
1817 
1818 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1819 
1820 	INIT_DEBUGOUT("ixl_stop: begin\n");
1821 	ixl_disable_intr(vsi);
1822 	ixl_disable_rings(vsi);
1823 
1824 	/* Tell the stack that the interface is no longer active */
1825 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1826 
1827 	/* Stop the local timer */
1828 	callout_stop(&pf->timer);
1829 
1830 	return;
1831 }
1832 
1833 
1834 /*********************************************************************
1835  *
1836  *  Setup MSIX Interrupt resources and handlers for the VSI
1837  *
1838  **********************************************************************/
1839 static int
1840 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1841 {
1842 	device_t        dev = pf->dev;
1843 	struct 		ixl_vsi *vsi = &pf->vsi;
1844 	struct		ixl_queue *que = vsi->queues;
1845 	int 		error, rid = 0;
1846 
1847 	if (pf->msix == 1)
1848 		rid = 1;
1849 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1850 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1851 	if (pf->res == NULL) {
1852 		device_printf(dev,"Unable to allocate"
1853 		    " bus resource: vsi legacy/msi interrupt\n");
1854 		return (ENXIO);
1855 	}
1856 
1857 	/* Set the handler function */
1858 	error = bus_setup_intr(dev, pf->res,
1859 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1860 	    ixl_intr, pf, &pf->tag);
1861 	if (error) {
1862 		pf->res = NULL;
1863 		device_printf(dev, "Failed to register legacy/msi handler");
1864 		return (error);
1865 	}
1866 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1867 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1868 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1869 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1870 	    taskqueue_thread_enqueue, &que->tq);
1871 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1872 	    device_get_nameunit(dev));
1873 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1874 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1875 	    taskqueue_thread_enqueue, &pf->tq);
1876 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1877 	    device_get_nameunit(dev));
1878 
1879 	return (0);
1880 }
1881 
1882 
1883 /*********************************************************************
1884  *
1885  *  Setup MSIX Interrupt resources and handlers for the VSI
1886  *
1887  **********************************************************************/
1888 static int
1889 ixl_assign_vsi_msix(struct ixl_pf *pf)
1890 {
1891 	device_t	dev = pf->dev;
1892 	struct 		ixl_vsi *vsi = &pf->vsi;
1893 	struct 		ixl_queue *que = vsi->queues;
1894 	struct		tx_ring	 *txr;
1895 	int 		error, rid, vector = 0;
1896 
1897 	/* Admin Que is vector 0*/
1898 	rid = vector + 1;
1899 	pf->res = bus_alloc_resource_any(dev,
1900     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1901 	if (!pf->res) {
1902 		device_printf(dev,"Unable to allocate"
1903     	    " bus resource: Adminq interrupt [%d]\n", rid);
1904 		return (ENXIO);
1905 	}
1906 	/* Set the adminq vector and handler */
1907 	error = bus_setup_intr(dev, pf->res,
1908 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1909 	    ixl_msix_adminq, pf, &pf->tag);
1910 	if (error) {
1911 		pf->res = NULL;
1912 		device_printf(dev, "Failed to register Admin que handler");
1913 		return (error);
1914 	}
1915 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1916 	pf->admvec = vector;
1917 	/* Tasklet for Admin Queue */
1918 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1919 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1920 	    taskqueue_thread_enqueue, &pf->tq);
1921 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1922 	    device_get_nameunit(pf->dev));
1923 	++vector;
1924 
1925 	/* Now set up the stations */
1926 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1927 		int cpu_id = i;
1928 		rid = vector + 1;
1929 		txr = &que->txr;
1930 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1931 		    RF_SHAREABLE | RF_ACTIVE);
1932 		if (que->res == NULL) {
1933 			device_printf(dev,"Unable to allocate"
1934 		    	    " bus resource: que interrupt [%d]\n", vector);
1935 			return (ENXIO);
1936 		}
1937 		/* Set the handler function */
1938 		error = bus_setup_intr(dev, que->res,
1939 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1940 		    ixl_msix_que, que, &que->tag);
1941 		if (error) {
1942 			que->res = NULL;
1943 			device_printf(dev, "Failed to register que handler");
1944 			return (error);
1945 		}
1946 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1947 		/* Bind the vector to a CPU */
1948 #ifdef RSS
1949 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1950 #endif
1951 		bus_bind_intr(dev, que->res, cpu_id);
1952 		que->msix = vector;
1953 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1954 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1955 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1956 		    taskqueue_thread_enqueue, &que->tq);
1957 #ifdef RSS
1958 		taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1959 		    cpu_id, "%s (bucket %d)",
1960 		    device_get_nameunit(dev), cpu_id);
1961 #else
1962 		taskqueue_start_threads(&que->tq, 1, PI_NET,
1963 		    "%s que", device_get_nameunit(dev));
1964 #endif
1965 	}
1966 
1967 	return (0);
1968 }
1969 
1970 
1971 /*
1972  * Allocate MSI/X vectors
1973  */
1974 static int
1975 ixl_init_msix(struct ixl_pf *pf)
1976 {
1977 	device_t dev = pf->dev;
1978 	int rid, want, vectors, queues, available;
1979 
1980 	/* Override by tuneable */
1981 	if (ixl_enable_msix == 0)
1982 		goto msi;
1983 
1984 	/*
1985 	** When used in a virtualized environment
1986 	** PCI BUSMASTER capability may not be set
1987 	** so explicity set it here and rewrite
1988 	** the ENABLE in the MSIX control register
1989 	** at this point to cause the host to
1990 	** successfully initialize us.
1991 	*/
1992 	{
1993 		u16 pci_cmd_word;
1994 		int msix_ctrl;
1995 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1996 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1997 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1998 		pci_find_cap(dev, PCIY_MSIX, &rid);
1999 		rid += PCIR_MSIX_CTRL;
2000 		msix_ctrl = pci_read_config(dev, rid, 2);
2001 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2002 		pci_write_config(dev, rid, msix_ctrl, 2);
2003 	}
2004 
2005 	/* First try MSI/X */
2006 	rid = PCIR_BAR(IXL_BAR);
2007 	pf->msix_mem = bus_alloc_resource_any(dev,
2008 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2009        	if (!pf->msix_mem) {
2010 		/* May not be enabled */
2011 		device_printf(pf->dev,
2012 		    "Unable to map MSIX table \n");
2013 		goto msi;
2014 	}
2015 
2016 	available = pci_msix_count(dev);
2017 	if (available == 0) { /* system has msix disabled */
2018 		bus_release_resource(dev, SYS_RES_MEMORY,
2019 		    rid, pf->msix_mem);
2020 		pf->msix_mem = NULL;
2021 		goto msi;
2022 	}
2023 
2024 	/* Figure out a reasonable auto config value */
2025 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2026 
2027 	/* Override with hardcoded value if sane */
2028 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2029 		queues = ixl_max_queues;
2030 
2031 #ifdef  RSS
2032 	/* If we're doing RSS, clamp at the number of RSS buckets */
2033 	if (queues > rss_getnumbuckets())
2034 		queues = rss_getnumbuckets();
2035 #endif
2036 
2037 	/*
2038 	** Want one vector (RX/TX pair) per queue
2039 	** plus an additional for the admin queue.
2040 	*/
2041 	want = queues + 1;
2042 	if (want <= available)	/* Have enough */
2043 		vectors = want;
2044 	else {
2045                	device_printf(pf->dev,
2046 		    "MSIX Configuration Problem, "
2047 		    "%d vectors available but %d wanted!\n",
2048 		    available, want);
2049 		return (0); /* Will go to Legacy setup */
2050 	}
2051 
2052 	if (pci_alloc_msix(dev, &vectors) == 0) {
2053                	device_printf(pf->dev,
2054 		    "Using MSIX interrupts with %d vectors\n", vectors);
2055 		pf->msix = vectors;
2056 		pf->vsi.num_queues = queues;
2057 #ifdef RSS
2058 		/*
2059 		 * If we're doing RSS, the number of queues needs to
2060 		 * match the number of RSS buckets that are configured.
2061 		 *
2062 		 * + If there's more queues than RSS buckets, we'll end
2063 		 *   up with queues that get no traffic.
2064 		 *
2065 		 * + If there's more RSS buckets than queues, we'll end
2066 		 *   up having multiple RSS buckets map to the same queue,
2067 		 *   so there'll be some contention.
2068 		 */
2069 		if (queues != rss_getnumbuckets()) {
2070 			device_printf(dev,
2071 			    "%s: queues (%d) != RSS buckets (%d)"
2072 			    "; performance will be impacted.\n",
2073 			    __func__, queues, rss_getnumbuckets());
2074 		}
2075 #endif
2076 		return (vectors);
2077 	}
2078 msi:
2079        	vectors = pci_msi_count(dev);
2080 	pf->vsi.num_queues = 1;
2081 	pf->msix = 1;
2082 	ixl_max_queues = 1;
2083 	ixl_enable_msix = 0;
2084        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2085                	device_printf(pf->dev,"Using an MSI interrupt\n");
2086 	else {
2087 		pf->msix = 0;
2088                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2089 	}
2090 	return (vectors);
2091 }
2092 
2093 
2094 /*
2095  * Plumb MSI/X vectors
2096  */
2097 static void
2098 ixl_configure_msix(struct ixl_pf *pf)
2099 {
2100 	struct i40e_hw	*hw = &pf->hw;
2101 	struct ixl_vsi *vsi = &pf->vsi;
2102 	u32		reg;
2103 	u16		vector = 1;
2104 
2105 	/* First set up the adminq - vector 0 */
2106 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2107 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2108 
2109 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2110 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2111 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2112 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2113 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2114 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2115 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2116 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2117 
2118 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2119 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2120 
2121 	wr32(hw, I40E_PFINT_DYN_CTL0,
2122 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2123 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2124 
2125 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2126 
2127 	/* Next configure the queues */
2128 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2129 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2130 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2131 
2132 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2133 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2134 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2135 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2136 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2137 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2138 
2139 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2140 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2141 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2142 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2143 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2144 		if (i == (vsi->num_queues - 1))
2145 			reg |= (IXL_QUEUE_EOL
2146 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2147 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2148 	}
2149 }
2150 
2151 /*
2152  * Configure for MSI single vector operation
2153  */
2154 static void
2155 ixl_configure_legacy(struct ixl_pf *pf)
2156 {
2157 	struct i40e_hw	*hw = &pf->hw;
2158 	u32		reg;
2159 
2160 
2161 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2162 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2163 
2164 
2165 	/* Setup "other" causes */
2166 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2167 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2168 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2169 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2170 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2171 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2172 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2173 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2174 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2175 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2176 	    ;
2177 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2178 
2179 	/* SW_ITR_IDX = 0, but don't change INTENA */
2180 	wr32(hw, I40E_PFINT_DYN_CTL0,
2181 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2182 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2183 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2184 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2185 
2186 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2187 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2188 
2189 	/* Associate the queue pair to the vector and enable the q int */
2190 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2191 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2192 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2193 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2194 
2195 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2196 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2197 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2198 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2199 
2200 	/* Next enable the queue pair */
2201 	reg = rd32(hw, I40E_QTX_ENA(0));
2202 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2203 	wr32(hw, I40E_QTX_ENA(0), reg);
2204 
2205 	reg = rd32(hw, I40E_QRX_ENA(0));
2206 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2207 	wr32(hw, I40E_QRX_ENA(0), reg);
2208 }
2209 
2210 
2211 /*
2212  * Set the Initial ITR state
2213  */
2214 static void
2215 ixl_configure_itr(struct ixl_pf *pf)
2216 {
2217 	struct i40e_hw		*hw = &pf->hw;
2218 	struct ixl_vsi		*vsi = &pf->vsi;
2219 	struct ixl_queue	*que = vsi->queues;
2220 
2221 	vsi->rx_itr_setting = ixl_rx_itr;
2222 	if (ixl_dynamic_rx_itr)
2223 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2224 	vsi->tx_itr_setting = ixl_tx_itr;
2225 	if (ixl_dynamic_tx_itr)
2226 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2227 
2228 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2229 		struct tx_ring	*txr = &que->txr;
2230 		struct rx_ring 	*rxr = &que->rxr;
2231 
2232 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2233 		    vsi->rx_itr_setting);
2234 		rxr->itr = vsi->rx_itr_setting;
2235 		rxr->latency = IXL_AVE_LATENCY;
2236 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2237 		    vsi->tx_itr_setting);
2238 		txr->itr = vsi->tx_itr_setting;
2239 		txr->latency = IXL_AVE_LATENCY;
2240 	}
2241 }
2242 
2243 
2244 static int
2245 ixl_allocate_pci_resources(struct ixl_pf *pf)
2246 {
2247 	int             rid;
2248 	device_t        dev = pf->dev;
2249 
2250 	rid = PCIR_BAR(0);
2251 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2252 	    &rid, RF_ACTIVE);
2253 
2254 	if (!(pf->pci_mem)) {
2255 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2256 		return (ENXIO);
2257 	}
2258 
2259 	pf->osdep.mem_bus_space_tag =
2260 		rman_get_bustag(pf->pci_mem);
2261 	pf->osdep.mem_bus_space_handle =
2262 		rman_get_bushandle(pf->pci_mem);
2263 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2264 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2265 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2266 
2267 	pf->hw.back = &pf->osdep;
2268 
2269 	/*
2270 	** Now setup MSI or MSI/X, should
2271 	** return us the number of supported
2272 	** vectors. (Will be 1 for MSI)
2273 	*/
2274 	pf->msix = ixl_init_msix(pf);
2275 	return (0);
2276 }
2277 
2278 static void
2279 ixl_free_pci_resources(struct ixl_pf * pf)
2280 {
2281 	struct ixl_vsi		*vsi = &pf->vsi;
2282 	struct ixl_queue	*que = vsi->queues;
2283 	device_t		dev = pf->dev;
2284 	int			rid, memrid;
2285 
2286 	memrid = PCIR_BAR(IXL_BAR);
2287 
2288 	/* We may get here before stations are setup */
2289 	if ((!ixl_enable_msix) || (que == NULL))
2290 		goto early;
2291 
2292 	/*
2293 	**  Release all msix VSI resources:
2294 	*/
2295 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2296 		rid = que->msix + 1;
2297 		if (que->tag != NULL) {
2298 			bus_teardown_intr(dev, que->res, que->tag);
2299 			que->tag = NULL;
2300 		}
2301 		if (que->res != NULL)
2302 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2303 	}
2304 
2305 early:
2306 	/* Clean the AdminQ interrupt last */
2307 	if (pf->admvec) /* we are doing MSIX */
2308 		rid = pf->admvec + 1;
2309 	else
2310 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2311 
2312 	if (pf->tag != NULL) {
2313 		bus_teardown_intr(dev, pf->res, pf->tag);
2314 		pf->tag = NULL;
2315 	}
2316 	if (pf->res != NULL)
2317 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2318 
2319 	if (pf->msix)
2320 		pci_release_msi(dev);
2321 
2322 	if (pf->msix_mem != NULL)
2323 		bus_release_resource(dev, SYS_RES_MEMORY,
2324 		    memrid, pf->msix_mem);
2325 
2326 	if (pf->pci_mem != NULL)
2327 		bus_release_resource(dev, SYS_RES_MEMORY,
2328 		    PCIR_BAR(0), pf->pci_mem);
2329 
2330 	return;
2331 }
2332 
2333 static void
2334 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2335 {
2336 	/* Display supported media types */
2337 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2338 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2339 
2340 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2341 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2342 
2343 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2344 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2345 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2346 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2347 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2348 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2349 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2350 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2351 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2352 
2353 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2354 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2355 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2356 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2357 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2358 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2359 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2360 }
2361 
2362 /*********************************************************************
2363  *
2364  *  Setup networking device structure and register an interface.
2365  *
2366  **********************************************************************/
2367 static int
2368 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2369 {
2370 	struct ifnet		*ifp;
2371 	struct i40e_hw		*hw = vsi->hw;
2372 	struct ixl_queue	*que = vsi->queues;
2373 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2374 	enum i40e_status_code aq_error = 0;
2375 
2376 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2377 
2378 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2379 	if (ifp == NULL) {
2380 		device_printf(dev, "can not allocate ifnet structure\n");
2381 		return (-1);
2382 	}
2383 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2384 	ifp->if_mtu = ETHERMTU;
2385 	ifp->if_baudrate = 4000000000;  // ??
2386 	ifp->if_init = ixl_init;
2387 	ifp->if_softc = vsi;
2388 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2389 	ifp->if_ioctl = ixl_ioctl;
2390 
2391 #if __FreeBSD_version >= 1100036
2392 	if_setgetcounterfn(ifp, ixl_get_counter);
2393 #endif
2394 
2395 	ifp->if_transmit = ixl_mq_start;
2396 
2397 	ifp->if_qflush = ixl_qflush;
2398 
2399 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2400 
2401 	vsi->max_frame_size =
2402 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2403 	    + ETHER_VLAN_ENCAP_LEN;
2404 
2405 	/*
2406 	 * Tell the upper layer(s) we support long frames.
2407 	 */
2408 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2409 
2410 	ifp->if_capabilities |= IFCAP_HWCSUM;
2411 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2412 	ifp->if_capabilities |= IFCAP_TSO;
2413 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2414 	ifp->if_capabilities |= IFCAP_LRO;
2415 
2416 	/* VLAN capabilties */
2417 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2418 			     |  IFCAP_VLAN_HWTSO
2419 			     |  IFCAP_VLAN_MTU
2420 			     |  IFCAP_VLAN_HWCSUM;
2421 	ifp->if_capenable = ifp->if_capabilities;
2422 
2423 	/*
2424 	** Don't turn this on by default, if vlans are
2425 	** created on another pseudo device (eg. lagg)
2426 	** then vlan events are not passed thru, breaking
2427 	** operation, but with HW FILTER off it works. If
2428 	** using vlans directly on the ixl driver you can
2429 	** enable this and get full hardware tag filtering.
2430 	*/
2431 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2432 
2433 	/*
2434 	 * Specify the media types supported by this adapter and register
2435 	 * callbacks to update media and link information
2436 	 */
2437 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2438 		     ixl_media_status);
2439 
2440 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2441 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2442 		/* Need delay to detect fiber correctly */
2443 		i40e_msec_delay(200);
2444 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2445 		    TRUE, &abilities_resp, NULL);
2446 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2447 			device_printf(dev, "Unknown PHY type detected!\n");
2448 		else
2449 			ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2450 	} else if (aq_error) {
2451 		device_printf(dev, "Error getting supported media types, err %d,"
2452 		    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2453 	} else
2454 		ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2455 
2456 	/* Use autoselect media by default */
2457 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2458 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2459 
2460 	ether_ifattach(ifp, hw->mac.addr);
2461 
2462 	return (0);
2463 }
2464 
2465 static bool
2466 ixl_config_link(struct i40e_hw *hw)
2467 {
2468 	bool check;
2469 
2470 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2471 	check = i40e_get_link_status(hw);
2472 #ifdef IXL_DEBUG
2473 	printf("Link is %s\n", check ? "up":"down");
2474 #endif
2475 	return (check);
2476 }
2477 
2478 /*********************************************************************
2479  *
2480  *  Initialize this VSI
2481  *
2482  **********************************************************************/
2483 static int
2484 ixl_setup_vsi(struct ixl_vsi *vsi)
2485 {
2486 	struct i40e_hw	*hw = vsi->hw;
2487 	device_t 	dev = vsi->dev;
2488 	struct i40e_aqc_get_switch_config_resp *sw_config;
2489 	struct i40e_vsi_context	ctxt;
2490 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2491 	int	ret = I40E_SUCCESS;
2492 	u16	next = 0;
2493 
2494 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2495 	ret = i40e_aq_get_switch_config(hw, sw_config,
2496 	    sizeof(aq_buf), &next, NULL);
2497 	if (ret) {
2498 		device_printf(dev,"aq_get_switch_config failed!!\n");
2499 		return (ret);
2500 	}
2501 #ifdef IXL_DEBUG
2502 	printf("Switch config: header reported: %d in structure, %d total\n",
2503     	    sw_config->header.num_reported, sw_config->header.num_total);
2504 	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2505 	    sw_config->element[0].element_type,
2506 	    sw_config->element[0].seid,
2507 	    sw_config->element[0].uplink_seid,
2508 	    sw_config->element[0].downlink_seid);
2509 #endif
2510 	/* Save off this important value */
2511 	vsi->seid = sw_config->element[0].seid;
2512 
2513 	memset(&ctxt, 0, sizeof(ctxt));
2514 	ctxt.seid = vsi->seid;
2515 	ctxt.pf_num = hw->pf_id;
2516 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2517 	if (ret) {
2518 		device_printf(dev,"get vsi params failed %x!!\n", ret);
2519 		return (ret);
2520 	}
2521 #ifdef IXL_DEBUG
2522 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2523 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2524 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2525 	    ctxt.uplink_seid, ctxt.vsi_number,
2526 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2527 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2528 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2529 #endif
2530 	/*
2531 	** Set the queue and traffic class bits
2532 	**  - when multiple traffic classes are supported
2533 	**    this will need to be more robust.
2534 	*/
2535 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2536 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2537 	ctxt.info.queue_mapping[0] = 0;
2538 	ctxt.info.tc_mapping[0] = 0x0800;
2539 
2540 	/* Set VLAN receive stripping mode */
2541 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2542 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2543 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2544 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2545 	else
2546 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2547 
2548 	/* Keep copy of VSI info in VSI for statistic counters */
2549 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2550 
2551 	/* Reset VSI statistics */
2552 	ixl_vsi_reset_stats(vsi);
2553 	vsi->hw_filters_add = 0;
2554 	vsi->hw_filters_del = 0;
2555 
2556 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2557 	if (ret)
2558 		device_printf(dev,"update vsi params failed %x!!\n",
2559 		   hw->aq.asq_last_status);
2560 	return (ret);
2561 }
2562 
2563 
2564 /*********************************************************************
2565  *
2566  *  Initialize the VSI:  this handles contexts, which means things
2567  *  			 like the number of descriptors, buffer size,
2568  *			 plus we init the rings thru this function.
2569  *
2570  **********************************************************************/
2571 static int
2572 ixl_initialize_vsi(struct ixl_vsi *vsi)
2573 {
2574 	struct ixl_queue	*que = vsi->queues;
2575 	device_t		dev = vsi->dev;
2576 	struct i40e_hw		*hw = vsi->hw;
2577 	int			err = 0;
2578 
2579 
2580 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2581 		struct tx_ring		*txr = &que->txr;
2582 		struct rx_ring 		*rxr = &que->rxr;
2583 		struct i40e_hmc_obj_txq tctx;
2584 		struct i40e_hmc_obj_rxq rctx;
2585 		u32			txctl;
2586 		u16			size;
2587 
2588 
2589 		/* Setup the HMC TX Context  */
2590 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2591 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2592 		tctx.new_context = 1;
2593 		tctx.base = (txr->dma.pa/128);
2594 		tctx.qlen = que->num_desc;
2595 		tctx.fc_ena = 0;
2596 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2597 		/* Enable HEAD writeback */
2598 		tctx.head_wb_ena = 1;
2599 		tctx.head_wb_addr = txr->dma.pa +
2600 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2601 		tctx.rdylist_act = 0;
2602 		err = i40e_clear_lan_tx_queue_context(hw, i);
2603 		if (err) {
2604 			device_printf(dev, "Unable to clear TX context\n");
2605 			break;
2606 		}
2607 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2608 		if (err) {
2609 			device_printf(dev, "Unable to set TX context\n");
2610 			break;
2611 		}
2612 		/* Associate the ring with this PF */
2613 		txctl = I40E_QTX_CTL_PF_QUEUE;
2614 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2615 		    I40E_QTX_CTL_PF_INDX_MASK);
2616 		wr32(hw, I40E_QTX_CTL(i), txctl);
2617 		ixl_flush(hw);
2618 
2619 		/* Do ring (re)init */
2620 		ixl_init_tx_ring(que);
2621 
2622 		/* Next setup the HMC RX Context  */
2623 		if (vsi->max_frame_size <= 2048)
2624 			rxr->mbuf_sz = MCLBYTES;
2625 		else
2626 			rxr->mbuf_sz = MJUMPAGESIZE;
2627 
2628 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2629 
2630 		/* Set up an RX context for the HMC */
2631 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2632 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2633 		/* ignore header split for now */
2634 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2635 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2636 		    vsi->max_frame_size : max_rxmax;
2637 		rctx.dtype = 0;
2638 		rctx.dsize = 1;	/* do 32byte descriptors */
2639 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2640 		rctx.base = (rxr->dma.pa/128);
2641 		rctx.qlen = que->num_desc;
2642 		rctx.tphrdesc_ena = 1;
2643 		rctx.tphwdesc_ena = 1;
2644 		rctx.tphdata_ena = 0;
2645 		rctx.tphhead_ena = 0;
2646 		rctx.lrxqthresh = 2;
2647 		rctx.crcstrip = 1;
2648 		rctx.l2tsel = 1;
2649 		rctx.showiv = 1;
2650 		rctx.fc_ena = 0;
2651 		rctx.prefena = 1;
2652 
2653 		err = i40e_clear_lan_rx_queue_context(hw, i);
2654 		if (err) {
2655 			device_printf(dev,
2656 			    "Unable to clear RX context %d\n", i);
2657 			break;
2658 		}
2659 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2660 		if (err) {
2661 			device_printf(dev, "Unable to set RX context %d\n", i);
2662 			break;
2663 		}
2664 		err = ixl_init_rx_ring(que);
2665 		if (err) {
2666 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2667 			break;
2668 		}
2669 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2670 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2671 	}
2672 	return (err);
2673 }
2674 
2675 
2676 /*********************************************************************
2677  *
2678  *  Free all VSI structs.
2679  *
2680  **********************************************************************/
2681 void
2682 ixl_free_vsi(struct ixl_vsi *vsi)
2683 {
2684 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2685 	struct ixl_queue	*que = vsi->queues;
2686 	struct ixl_mac_filter *f;
2687 
2688 	/* Free station queues */
2689 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2690 		struct tx_ring *txr = &que->txr;
2691 		struct rx_ring *rxr = &que->rxr;
2692 
2693 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2694 			continue;
2695 		IXL_TX_LOCK(txr);
2696 		ixl_free_que_tx(que);
2697 		if (txr->base)
2698 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2699 		IXL_TX_UNLOCK(txr);
2700 		IXL_TX_LOCK_DESTROY(txr);
2701 
2702 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2703 			continue;
2704 		IXL_RX_LOCK(rxr);
2705 		ixl_free_que_rx(que);
2706 		if (rxr->base)
2707 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2708 		IXL_RX_UNLOCK(rxr);
2709 		IXL_RX_LOCK_DESTROY(rxr);
2710 
2711 	}
2712 	free(vsi->queues, M_DEVBUF);
2713 
2714 	/* Free VSI filter list */
2715 	while (!SLIST_EMPTY(&vsi->ftl)) {
2716 		f = SLIST_FIRST(&vsi->ftl);
2717 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2718 		free(f, M_DEVBUF);
2719 	}
2720 }
2721 
2722 
2723 /*********************************************************************
2724  *
2725  *  Allocate memory for the VSI (virtual station interface) and their
2726  *  associated queues, rings and the descriptors associated with each,
2727  *  called only once at attach.
2728  *
2729  **********************************************************************/
2730 static int
2731 ixl_setup_stations(struct ixl_pf *pf)
2732 {
2733 	device_t		dev = pf->dev;
2734 	struct ixl_vsi		*vsi;
2735 	struct ixl_queue	*que;
2736 	struct tx_ring		*txr;
2737 	struct rx_ring		*rxr;
2738 	int 			rsize, tsize;
2739 	int			error = I40E_SUCCESS;
2740 
2741 	vsi = &pf->vsi;
2742 	vsi->back = (void *)pf;
2743 	vsi->hw = &pf->hw;
2744 	vsi->id = 0;
2745 	vsi->num_vlans = 0;
2746 
2747 	/* Get memory for the station queues */
2748         if (!(vsi->queues =
2749             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2750             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2751                 device_printf(dev, "Unable to allocate queue memory\n");
2752                 error = ENOMEM;
2753                 goto early;
2754         }
2755 
2756 	for (int i = 0; i < vsi->num_queues; i++) {
2757 		que = &vsi->queues[i];
2758 		que->num_desc = ixl_ringsz;
2759 		que->me = i;
2760 		que->vsi = vsi;
2761 		/* mark the queue as active */
2762 		vsi->active_queues |= (u64)1 << que->me;
2763 		txr = &que->txr;
2764 		txr->que = que;
2765 		txr->tail = I40E_QTX_TAIL(que->me);
2766 
2767 		/* Initialize the TX lock */
2768 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2769 		    device_get_nameunit(dev), que->me);
2770 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2771 		/* Create the TX descriptor ring */
2772 		tsize = roundup2((que->num_desc *
2773 		    sizeof(struct i40e_tx_desc)) +
2774 		    sizeof(u32), DBA_ALIGN);
2775 		if (i40e_allocate_dma_mem(&pf->hw,
2776 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2777 			device_printf(dev,
2778 			    "Unable to allocate TX Descriptor memory\n");
2779 			error = ENOMEM;
2780 			goto fail;
2781 		}
2782 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2783 		bzero((void *)txr->base, tsize);
2784        		/* Now allocate transmit soft structs for the ring */
2785        		if (ixl_allocate_tx_data(que)) {
2786 			device_printf(dev,
2787 			    "Critical Failure setting up TX structures\n");
2788 			error = ENOMEM;
2789 			goto fail;
2790        		}
2791 		/* Allocate a buf ring */
2792 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2793 		    M_WAITOK, &txr->mtx);
2794 		if (txr->br == NULL) {
2795 			device_printf(dev,
2796 			    "Critical Failure setting up TX buf ring\n");
2797 			error = ENOMEM;
2798 			goto fail;
2799        		}
2800 
2801 		/*
2802 		 * Next the RX queues...
2803 		 */
2804 		rsize = roundup2(que->num_desc *
2805 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2806 		rxr = &que->rxr;
2807 		rxr->que = que;
2808 		rxr->tail = I40E_QRX_TAIL(que->me);
2809 
2810 		/* Initialize the RX side lock */
2811 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2812 		    device_get_nameunit(dev), que->me);
2813 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2814 
2815 		if (i40e_allocate_dma_mem(&pf->hw,
2816 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2817 			device_printf(dev,
2818 			    "Unable to allocate RX Descriptor memory\n");
2819 			error = ENOMEM;
2820 			goto fail;
2821 		}
2822 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2823 		bzero((void *)rxr->base, rsize);
2824 
2825         	/* Allocate receive soft structs for the ring*/
2826 		if (ixl_allocate_rx_data(que)) {
2827 			device_printf(dev,
2828 			    "Critical Failure setting up receive structs\n");
2829 			error = ENOMEM;
2830 			goto fail;
2831 		}
2832 	}
2833 
2834 	return (0);
2835 
2836 fail:
2837 	for (int i = 0; i < vsi->num_queues; i++) {
2838 		que = &vsi->queues[i];
2839 		rxr = &que->rxr;
2840 		txr = &que->txr;
2841 		if (rxr->base)
2842 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2843 		if (txr->base)
2844 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2845 	}
2846 
2847 early:
2848 	return (error);
2849 }
2850 
2851 /*
2852 ** Provide a update to the queue RX
2853 ** interrupt moderation value.
2854 */
2855 static void
2856 ixl_set_queue_rx_itr(struct ixl_queue *que)
2857 {
2858 	struct ixl_vsi	*vsi = que->vsi;
2859 	struct i40e_hw	*hw = vsi->hw;
2860 	struct rx_ring	*rxr = &que->rxr;
2861 	u16		rx_itr;
2862 	u16		rx_latency = 0;
2863 	int		rx_bytes;
2864 
2865 
2866 	/* Idle, do nothing */
2867 	if (rxr->bytes == 0)
2868 		return;
2869 
2870 	if (ixl_dynamic_rx_itr) {
2871 		rx_bytes = rxr->bytes/rxr->itr;
2872 		rx_itr = rxr->itr;
2873 
2874 		/* Adjust latency range */
2875 		switch (rxr->latency) {
2876 		case IXL_LOW_LATENCY:
2877 			if (rx_bytes > 10) {
2878 				rx_latency = IXL_AVE_LATENCY;
2879 				rx_itr = IXL_ITR_20K;
2880 			}
2881 			break;
2882 		case IXL_AVE_LATENCY:
2883 			if (rx_bytes > 20) {
2884 				rx_latency = IXL_BULK_LATENCY;
2885 				rx_itr = IXL_ITR_8K;
2886 			} else if (rx_bytes <= 10) {
2887 				rx_latency = IXL_LOW_LATENCY;
2888 				rx_itr = IXL_ITR_100K;
2889 			}
2890 			break;
2891 		case IXL_BULK_LATENCY:
2892 			if (rx_bytes <= 20) {
2893 				rx_latency = IXL_AVE_LATENCY;
2894 				rx_itr = IXL_ITR_20K;
2895 			}
2896 			break;
2897        		 }
2898 
2899 		rxr->latency = rx_latency;
2900 
2901 		if (rx_itr != rxr->itr) {
2902 			/* do an exponential smoothing */
2903 			rx_itr = (10 * rx_itr * rxr->itr) /
2904 			    ((9 * rx_itr) + rxr->itr);
2905 			rxr->itr = rx_itr & IXL_MAX_ITR;
2906 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2907 			    que->me), rxr->itr);
2908 		}
2909 	} else { /* We may have have toggled to non-dynamic */
2910 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2911 			vsi->rx_itr_setting = ixl_rx_itr;
2912 		/* Update the hardware if needed */
2913 		if (rxr->itr != vsi->rx_itr_setting) {
2914 			rxr->itr = vsi->rx_itr_setting;
2915 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2916 			    que->me), rxr->itr);
2917 		}
2918 	}
2919 	rxr->bytes = 0;
2920 	rxr->packets = 0;
2921 	return;
2922 }
2923 
2924 
2925 /*
2926 ** Provide a update to the queue TX
2927 ** interrupt moderation value.
2928 */
2929 static void
2930 ixl_set_queue_tx_itr(struct ixl_queue *que)
2931 {
2932 	struct ixl_vsi	*vsi = que->vsi;
2933 	struct i40e_hw	*hw = vsi->hw;
2934 	struct tx_ring	*txr = &que->txr;
2935 	u16		tx_itr;
2936 	u16		tx_latency = 0;
2937 	int		tx_bytes;
2938 
2939 
2940 	/* Idle, do nothing */
2941 	if (txr->bytes == 0)
2942 		return;
2943 
2944 	if (ixl_dynamic_tx_itr) {
2945 		tx_bytes = txr->bytes/txr->itr;
2946 		tx_itr = txr->itr;
2947 
2948 		switch (txr->latency) {
2949 		case IXL_LOW_LATENCY:
2950 			if (tx_bytes > 10) {
2951 				tx_latency = IXL_AVE_LATENCY;
2952 				tx_itr = IXL_ITR_20K;
2953 			}
2954 			break;
2955 		case IXL_AVE_LATENCY:
2956 			if (tx_bytes > 20) {
2957 				tx_latency = IXL_BULK_LATENCY;
2958 				tx_itr = IXL_ITR_8K;
2959 			} else if (tx_bytes <= 10) {
2960 				tx_latency = IXL_LOW_LATENCY;
2961 				tx_itr = IXL_ITR_100K;
2962 			}
2963 			break;
2964 		case IXL_BULK_LATENCY:
2965 			if (tx_bytes <= 20) {
2966 				tx_latency = IXL_AVE_LATENCY;
2967 				tx_itr = IXL_ITR_20K;
2968 			}
2969 			break;
2970 		}
2971 
2972 		txr->latency = tx_latency;
2973 
2974 		if (tx_itr != txr->itr) {
2975        	         /* do an exponential smoothing */
2976 			tx_itr = (10 * tx_itr * txr->itr) /
2977 			    ((9 * tx_itr) + txr->itr);
2978 			txr->itr = tx_itr & IXL_MAX_ITR;
2979 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2980 			    que->me), txr->itr);
2981 		}
2982 
2983 	} else { /* We may have have toggled to non-dynamic */
2984 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2985 			vsi->tx_itr_setting = ixl_tx_itr;
2986 		/* Update the hardware if needed */
2987 		if (txr->itr != vsi->tx_itr_setting) {
2988 			txr->itr = vsi->tx_itr_setting;
2989 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2990 			    que->me), txr->itr);
2991 		}
2992 	}
2993 	txr->bytes = 0;
2994 	txr->packets = 0;
2995 	return;
2996 }
2997 
2998 
2999 static void
3000 ixl_add_hw_stats(struct ixl_pf *pf)
3001 {
3002 	device_t dev = pf->dev;
3003 	struct ixl_vsi *vsi = &pf->vsi;
3004 	struct ixl_queue *queues = vsi->queues;
3005 	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3006 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3007 
3008 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3009 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3010 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3011 
3012 	struct sysctl_oid *vsi_node, *queue_node;
3013 	struct sysctl_oid_list *vsi_list, *queue_list;
3014 
3015 	struct tx_ring *txr;
3016 	struct rx_ring *rxr;
3017 
3018 	/* Driver statistics */
3019 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3020 			CTLFLAG_RD, &pf->watchdog_events,
3021 			"Watchdog timeouts");
3022 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3023 			CTLFLAG_RD, &pf->admin_irq,
3024 			"Admin Queue IRQ Handled");
3025 
3026 	/* VSI statistics */
3027 #define QUEUE_NAME_LEN 32
3028 	char queue_namebuf[QUEUE_NAME_LEN];
3029 
3030 	// ERJ: Only one vsi now, re-do when >1 VSI enabled
3031 	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3032 	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3033 				   CTLFLAG_RD, NULL, "VSI-specific stats");
3034 	vsi_list = SYSCTL_CHILDREN(vsi_node);
3035 
3036 	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3037 
3038 	/* Queue statistics */
3039 	for (int q = 0; q < vsi->num_queues; q++) {
3040 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3041 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3042 					     CTLFLAG_RD, NULL, "Queue #");
3043 		queue_list = SYSCTL_CHILDREN(queue_node);
3044 
3045 		txr = &(queues[q].txr);
3046 		rxr = &(queues[q].rxr);
3047 
3048 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3049 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3050 				"m_defrag() failed");
3051 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3052 				CTLFLAG_RD, &(queues[q].dropped_pkts),
3053 				"Driver dropped packets");
3054 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3055 				CTLFLAG_RD, &(queues[q].irqs),
3056 				"irqs on this queue");
3057 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3058 				CTLFLAG_RD, &(queues[q].tso),
3059 				"TSO");
3060 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3061 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3062 				"Driver tx dma failure in xmit");
3063 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3064 				CTLFLAG_RD, &(txr->no_desc),
3065 				"Queue No Descriptor Available");
3066 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3067 				CTLFLAG_RD, &(txr->total_packets),
3068 				"Queue Packets Transmitted");
3069 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3070 				CTLFLAG_RD, &(txr->tx_bytes),
3071 				"Queue Bytes Transmitted");
3072 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3073 				CTLFLAG_RD, &(rxr->rx_packets),
3074 				"Queue Packets Received");
3075 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3076 				CTLFLAG_RD, &(rxr->rx_bytes),
3077 				"Queue Bytes Received");
3078 	}
3079 
3080 	/* MAC stats */
3081 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3082 }
3083 
3084 static void
3085 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3086 	struct sysctl_oid_list *child,
3087 	struct i40e_eth_stats *eth_stats)
3088 {
3089 	struct ixl_sysctl_info ctls[] =
3090 	{
3091 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3092 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3093 			"Unicast Packets Received"},
3094 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3095 			"Multicast Packets Received"},
3096 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3097 			"Broadcast Packets Received"},
3098 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3099 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3100 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3101 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3102 			"Multicast Packets Transmitted"},
3103 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3104 			"Broadcast Packets Transmitted"},
3105 		// end
3106 		{0,0,0}
3107 	};
3108 
3109 	struct ixl_sysctl_info *entry = ctls;
3110 	while (entry->stat != 0)
3111 	{
3112 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3113 				CTLFLAG_RD, entry->stat,
3114 				entry->description);
3115 		entry++;
3116 	}
3117 }
3118 
3119 static void
3120 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3121 	struct sysctl_oid_list *child,
3122 	struct i40e_hw_port_stats *stats)
3123 {
3124 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3125 				    CTLFLAG_RD, NULL, "Mac Statistics");
3126 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3127 
3128 	struct i40e_eth_stats *eth_stats = &stats->eth;
3129 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3130 
3131 	struct ixl_sysctl_info ctls[] =
3132 	{
3133 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3134 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3135 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3136 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3137 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3138 		/* Packet Reception Stats */
3139 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3140 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3141 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3142 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3143 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3144 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3145 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3146 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3147 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3148 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3149 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3150 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3151 		/* Packet Transmission Stats */
3152 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3153 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3154 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3155 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3156 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3157 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3158 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3159 		/* Flow control */
3160 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3161 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3162 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3163 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3164 		/* End */
3165 		{0,0,0}
3166 	};
3167 
3168 	struct ixl_sysctl_info *entry = ctls;
3169 	while (entry->stat != 0)
3170 	{
3171 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3172 				CTLFLAG_RD, entry->stat,
3173 				entry->description);
3174 		entry++;
3175 	}
3176 }
3177 
3178 /*
3179 ** ixl_config_rss - setup RSS
3180 **  - note this is done for the single vsi
3181 */
3182 static void ixl_config_rss(struct ixl_vsi *vsi)
3183 {
3184 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3185 	struct i40e_hw	*hw = vsi->hw;
3186 	u32		lut = 0;
3187 	u64		set_hena = 0, hena;
3188 	int		i, j, que_id;
3189 #ifdef RSS
3190 	u32		rss_hash_config;
3191 	u32		rss_seed[IXL_KEYSZ];
3192 #else
3193 	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3194 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3195 			    0x35897377, 0x328b25e1, 0x4fa98922,
3196 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3197 #endif
3198 
3199 #ifdef RSS
3200         /* Fetch the configured RSS key */
3201         rss_getkey((uint8_t *) &rss_seed);
3202 #endif
3203 
3204 	/* Fill out hash function seed */
3205 	for (i = 0; i < IXL_KEYSZ; i++)
3206                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3207 
3208 	/* Enable PCTYPES for RSS: */
3209 #ifdef RSS
3210 	rss_hash_config = rss_gethashconfig();
3211 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3212                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3213 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3214                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3215 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3216                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3217 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3218                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3219         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3220 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3221 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3222                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3223         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3224                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3225 #else
3226 	set_hena =
3227 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3228 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3229 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3230 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3231 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3232 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3233 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3234 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3235 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3236 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3237 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3238 #endif
3239 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3240 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3241 	hena |= set_hena;
3242 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3243 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3244 
3245 	/* Populate the LUT with max no. of queues in round robin fashion */
3246 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3247 		if (j == vsi->num_queues)
3248 			j = 0;
3249 #ifdef RSS
3250 		/*
3251 		 * Fetch the RSS bucket id for the given indirection entry.
3252 		 * Cap it at the number of configured buckets (which is
3253 		 * num_queues.)
3254 		 */
3255 		que_id = rss_get_indirection_to_bucket(i);
3256 		que_id = que_id % vsi->num_queues;
3257 #else
3258 		que_id = j;
3259 #endif
3260 		/* lut = 4-byte sliding window of 4 lut entries */
3261 		lut = (lut << 8) | (que_id &
3262 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3263 		/* On i = 3, we have 4 entries in lut; write to the register */
3264 		if ((i & 3) == 3)
3265 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3266 	}
3267 	ixl_flush(hw);
3268 }
3269 
3270 
3271 /*
3272 ** This routine is run via an vlan config EVENT,
3273 ** it enables us to use the HW Filter table since
3274 ** we can get the vlan id. This just creates the
3275 ** entry in the soft version of the VFTA, init will
3276 ** repopulate the real table.
3277 */
3278 static void
3279 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3280 {
3281 	struct ixl_vsi	*vsi = ifp->if_softc;
3282 	struct i40e_hw	*hw = vsi->hw;
3283 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3284 
3285 	if (ifp->if_softc !=  arg)   /* Not our event */
3286 		return;
3287 
3288 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3289 		return;
3290 
3291 	IXL_PF_LOCK(pf);
3292 	++vsi->num_vlans;
3293 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3294 	IXL_PF_UNLOCK(pf);
3295 }
3296 
3297 /*
3298 ** This routine is run via an vlan
3299 ** unconfig EVENT, remove our entry
3300 ** in the soft vfta.
3301 */
3302 static void
3303 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3304 {
3305 	struct ixl_vsi	*vsi = ifp->if_softc;
3306 	struct i40e_hw	*hw = vsi->hw;
3307 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3308 
3309 	if (ifp->if_softc !=  arg)
3310 		return;
3311 
3312 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3313 		return;
3314 
3315 	IXL_PF_LOCK(pf);
3316 	--vsi->num_vlans;
3317 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3318 	IXL_PF_UNLOCK(pf);
3319 }
3320 
3321 /*
3322 ** This routine updates vlan filters, called by init
3323 ** it scans the filter table and then updates the hw
3324 ** after a soft reset.
3325 */
3326 static void
3327 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3328 {
3329 	struct ixl_mac_filter	*f;
3330 	int			cnt = 0, flags;
3331 
3332 	if (vsi->num_vlans == 0)
3333 		return;
3334 	/*
3335 	** Scan the filter list for vlan entries,
3336 	** mark them for addition and then call
3337 	** for the AQ update.
3338 	*/
3339 	SLIST_FOREACH(f, &vsi->ftl, next) {
3340 		if (f->flags & IXL_FILTER_VLAN) {
3341 			f->flags |=
3342 			    (IXL_FILTER_ADD |
3343 			    IXL_FILTER_USED);
3344 			cnt++;
3345 		}
3346 	}
3347 	if (cnt == 0) {
3348 		printf("setup vlan: no filters found!\n");
3349 		return;
3350 	}
3351 	flags = IXL_FILTER_VLAN;
3352 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3353 	ixl_add_hw_filters(vsi, flags, cnt);
3354 	return;
3355 }
3356 
3357 /*
3358 ** Initialize filter list and add filters that the hardware
3359 ** needs to know about.
3360 */
3361 static void
3362 ixl_init_filters(struct ixl_vsi *vsi)
3363 {
3364 	/* Add broadcast address */
3365 	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3366 	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3367 }
3368 
3369 /*
3370 ** This routine adds mulicast filters
3371 */
3372 static void
3373 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3374 {
3375 	struct ixl_mac_filter *f;
3376 
3377 	/* Does one already exist */
3378 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3379 	if (f != NULL)
3380 		return;
3381 
3382 	f = ixl_get_filter(vsi);
3383 	if (f == NULL) {
3384 		printf("WARNING: no filter available!!\n");
3385 		return;
3386 	}
3387 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3388 	f->vlan = IXL_VLAN_ANY;
3389 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3390 	    | IXL_FILTER_MC);
3391 
3392 	return;
3393 }
3394 
3395 /*
3396 ** This routine adds macvlan filters
3397 */
3398 static void
3399 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3400 {
3401 	struct ixl_mac_filter	*f, *tmp;
3402 	device_t		dev = vsi->dev;
3403 
3404 	DEBUGOUT("ixl_add_filter: begin");
3405 
3406 	/* Does one already exist */
3407 	f = ixl_find_filter(vsi, macaddr, vlan);
3408 	if (f != NULL)
3409 		return;
3410 	/*
3411 	** Is this the first vlan being registered, if so we
3412 	** need to remove the ANY filter that indicates we are
3413 	** not in a vlan, and replace that with a 0 filter.
3414 	*/
3415 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3416 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3417 		if (tmp != NULL) {
3418 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3419 			ixl_add_filter(vsi, macaddr, 0);
3420 		}
3421 	}
3422 
3423 	f = ixl_get_filter(vsi);
3424 	if (f == NULL) {
3425 		device_printf(dev, "WARNING: no filter available!!\n");
3426 		return;
3427 	}
3428 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3429 	f->vlan = vlan;
3430 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3431 	if (f->vlan != IXL_VLAN_ANY)
3432 		f->flags |= IXL_FILTER_VLAN;
3433 
3434 	ixl_add_hw_filters(vsi, f->flags, 1);
3435 	return;
3436 }
3437 
3438 static void
3439 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3440 {
3441 	struct ixl_mac_filter *f;
3442 
3443 	f = ixl_find_filter(vsi, macaddr, vlan);
3444 	if (f == NULL)
3445 		return;
3446 
3447 	f->flags |= IXL_FILTER_DEL;
3448 	ixl_del_hw_filters(vsi, 1);
3449 
3450 	/* Check if this is the last vlan removal */
3451 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3452 		/* Switch back to a non-vlan filter */
3453 		ixl_del_filter(vsi, macaddr, 0);
3454 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3455 	}
3456 	return;
3457 }
3458 
3459 /*
3460 ** Find the filter with both matching mac addr and vlan id
3461 */
3462 static struct ixl_mac_filter *
3463 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3464 {
3465 	struct ixl_mac_filter	*f;
3466 	bool			match = FALSE;
3467 
3468 	SLIST_FOREACH(f, &vsi->ftl, next) {
3469 		if (!cmp_etheraddr(f->macaddr, macaddr))
3470 			continue;
3471 		if (f->vlan == vlan) {
3472 			match = TRUE;
3473 			break;
3474 		}
3475 	}
3476 
3477 	if (!match)
3478 		f = NULL;
3479 	return (f);
3480 }
3481 
3482 /*
3483 ** This routine takes additions to the vsi filter
3484 ** table and creates an Admin Queue call to create
3485 ** the filters in the hardware.
3486 */
3487 static void
3488 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3489 {
3490 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3491 	struct ixl_mac_filter	*f;
3492 	struct i40e_hw	*hw = vsi->hw;
3493 	device_t	dev = vsi->dev;
3494 	int		err, j = 0;
3495 
3496 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3497 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3498 	if (a == NULL) {
3499 		device_printf(dev, "add_hw_filters failed to get memory\n");
3500 		return;
3501 	}
3502 
3503 	/*
3504 	** Scan the filter list, each time we find one
3505 	** we add it to the admin queue array and turn off
3506 	** the add bit.
3507 	*/
3508 	SLIST_FOREACH(f, &vsi->ftl, next) {
3509 		if (f->flags == flags) {
3510 			b = &a[j]; // a pox on fvl long names :)
3511 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3512 			b->vlan_tag =
3513 			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3514 			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3515 			f->flags &= ~IXL_FILTER_ADD;
3516 			j++;
3517 		}
3518 		if (j == cnt)
3519 			break;
3520 	}
3521 	if (j > 0) {
3522 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3523 		if (err)
3524 			device_printf(dev, "aq_add_macvlan err %d, aq_error %d\n",
3525 			    err, hw->aq.asq_last_status);
3526 		else
3527 			vsi->hw_filters_add += j;
3528 	}
3529 	free(a, M_DEVBUF);
3530 	return;
3531 }
3532 
3533 /*
3534 ** This routine takes removals in the vsi filter
3535 ** table and creates an Admin Queue call to delete
3536 ** the filters in the hardware.
3537 */
3538 static void
3539 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3540 {
3541 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3542 	struct i40e_hw		*hw = vsi->hw;
3543 	device_t		dev = vsi->dev;
3544 	struct ixl_mac_filter	*f, *f_temp;
3545 	int			err, j = 0;
3546 
3547 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3548 
3549 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3550 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3551 	if (d == NULL) {
3552 		printf("del hw filter failed to get memory\n");
3553 		return;
3554 	}
3555 
3556 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3557 		if (f->flags & IXL_FILTER_DEL) {
3558 			e = &d[j]; // a pox on fvl long names :)
3559 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3560 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3561 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3562 			/* delete entry from vsi list */
3563 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3564 			free(f, M_DEVBUF);
3565 			j++;
3566 		}
3567 		if (j == cnt)
3568 			break;
3569 	}
3570 	if (j > 0) {
3571 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3572 		/* NOTE: returns ENOENT every time but seems to work fine,
3573 		   so we'll ignore that specific error. */
3574 		// TODO: Does this still occur on current firmwares?
3575 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3576 			int sc = 0;
3577 			for (int i = 0; i < j; i++)
3578 				sc += (!d[i].error_code);
3579 			vsi->hw_filters_del += sc;
3580 			device_printf(dev,
3581 			    "Failed to remove %d/%d filters, aq error %d\n",
3582 			    j - sc, j, hw->aq.asq_last_status);
3583 		} else
3584 			vsi->hw_filters_del += j;
3585 	}
3586 	free(d, M_DEVBUF);
3587 
3588 	DEBUGOUT("ixl_del_hw_filters: end\n");
3589 	return;
3590 }
3591 
3592 
3593 static void
3594 ixl_enable_rings(struct ixl_vsi *vsi)
3595 {
3596 	struct i40e_hw	*hw = vsi->hw;
3597 	u32		reg;
3598 
3599 	for (int i = 0; i < vsi->num_queues; i++) {
3600 		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3601 
3602 		reg = rd32(hw, I40E_QTX_ENA(i));
3603 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3604 		    I40E_QTX_ENA_QENA_STAT_MASK;
3605 		wr32(hw, I40E_QTX_ENA(i), reg);
3606 		/* Verify the enable took */
3607 		for (int j = 0; j < 10; j++) {
3608 			reg = rd32(hw, I40E_QTX_ENA(i));
3609 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3610 				break;
3611 			i40e_msec_delay(10);
3612 		}
3613 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3614 			printf("TX queue %d disabled!\n", i);
3615 
3616 		reg = rd32(hw, I40E_QRX_ENA(i));
3617 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3618 		    I40E_QRX_ENA_QENA_STAT_MASK;
3619 		wr32(hw, I40E_QRX_ENA(i), reg);
3620 		/* Verify the enable took */
3621 		for (int j = 0; j < 10; j++) {
3622 			reg = rd32(hw, I40E_QRX_ENA(i));
3623 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3624 				break;
3625 			i40e_msec_delay(10);
3626 		}
3627 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3628 			printf("RX queue %d disabled!\n", i);
3629 	}
3630 }
3631 
3632 static void
3633 ixl_disable_rings(struct ixl_vsi *vsi)
3634 {
3635 	struct i40e_hw	*hw = vsi->hw;
3636 	u32		reg;
3637 
3638 	for (int i = 0; i < vsi->num_queues; i++) {
3639 		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3640 		i40e_usec_delay(500);
3641 
3642 		reg = rd32(hw, I40E_QTX_ENA(i));
3643 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3644 		wr32(hw, I40E_QTX_ENA(i), reg);
3645 		/* Verify the disable took */
3646 		for (int j = 0; j < 10; j++) {
3647 			reg = rd32(hw, I40E_QTX_ENA(i));
3648 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3649 				break;
3650 			i40e_msec_delay(10);
3651 		}
3652 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3653 			printf("TX queue %d still enabled!\n", i);
3654 
3655 		reg = rd32(hw, I40E_QRX_ENA(i));
3656 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3657 		wr32(hw, I40E_QRX_ENA(i), reg);
3658 		/* Verify the disable took */
3659 		for (int j = 0; j < 10; j++) {
3660 			reg = rd32(hw, I40E_QRX_ENA(i));
3661 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3662 				break;
3663 			i40e_msec_delay(10);
3664 		}
3665 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3666 			printf("RX queue %d still enabled!\n", i);
3667 	}
3668 }
3669 
3670 /**
3671  * ixl_handle_mdd_event
3672  *
3673  * Called from interrupt handler to identify possibly malicious vfs
3674  * (But also detects events from the PF, as well)
3675  **/
3676 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3677 {
3678 	struct i40e_hw *hw = &pf->hw;
3679 	device_t dev = pf->dev;
3680 	bool mdd_detected = false;
3681 	bool pf_mdd_detected = false;
3682 	u32 reg;
3683 
3684 	/* find what triggered the MDD event */
3685 	reg = rd32(hw, I40E_GL_MDET_TX);
3686 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3687 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3688 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3689 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3690 				I40E_GL_MDET_TX_EVENT_SHIFT;
3691 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3692 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3693 		device_printf(dev,
3694 			 "Malicious Driver Detection event 0x%02x"
3695 			 " on TX queue %d pf number 0x%02x\n",
3696 			 event, queue, pf_num);
3697 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3698 		mdd_detected = true;
3699 	}
3700 	reg = rd32(hw, I40E_GL_MDET_RX);
3701 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3702 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3703 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3704 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3705 				I40E_GL_MDET_RX_EVENT_SHIFT;
3706 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3707 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3708 		device_printf(dev,
3709 			 "Malicious Driver Detection event 0x%02x"
3710 			 " on RX queue %d of function 0x%02x\n",
3711 			 event, queue, func);
3712 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3713 		mdd_detected = true;
3714 	}
3715 
3716 	if (mdd_detected) {
3717 		reg = rd32(hw, I40E_PF_MDET_TX);
3718 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3719 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3720 			device_printf(dev,
3721 				 "MDD TX event is for this function 0x%08x",
3722 				 reg);
3723 			pf_mdd_detected = true;
3724 		}
3725 		reg = rd32(hw, I40E_PF_MDET_RX);
3726 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3727 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3728 			device_printf(dev,
3729 				 "MDD RX event is for this function 0x%08x",
3730 				 reg);
3731 			pf_mdd_detected = true;
3732 		}
3733 	}
3734 
3735 	/* re-enable mdd interrupt cause */
3736 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3737 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3738 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3739 	ixl_flush(hw);
3740 }
3741 
3742 static void
3743 ixl_enable_intr(struct ixl_vsi *vsi)
3744 {
3745 	struct i40e_hw		*hw = vsi->hw;
3746 	struct ixl_queue	*que = vsi->queues;
3747 
3748 	if (ixl_enable_msix) {
3749 		ixl_enable_adminq(hw);
3750 		for (int i = 0; i < vsi->num_queues; i++, que++)
3751 			ixl_enable_queue(hw, que->me);
3752 	} else
3753 		ixl_enable_legacy(hw);
3754 }
3755 
3756 static void
3757 ixl_disable_intr(struct ixl_vsi *vsi)
3758 {
3759 	struct i40e_hw		*hw = vsi->hw;
3760 	struct ixl_queue	*que = vsi->queues;
3761 
3762 	if (ixl_enable_msix) {
3763 		ixl_disable_adminq(hw);
3764 		for (int i = 0; i < vsi->num_queues; i++, que++)
3765 			ixl_disable_queue(hw, que->me);
3766 	} else
3767 		ixl_disable_legacy(hw);
3768 }
3769 
3770 static void
3771 ixl_enable_adminq(struct i40e_hw *hw)
3772 {
3773 	u32		reg;
3774 
3775 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3776 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3777 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3778 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3779 	ixl_flush(hw);
3780 	return;
3781 }
3782 
3783 static void
3784 ixl_disable_adminq(struct i40e_hw *hw)
3785 {
3786 	u32		reg;
3787 
3788 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3789 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3790 
3791 	return;
3792 }
3793 
3794 static void
3795 ixl_enable_queue(struct i40e_hw *hw, int id)
3796 {
3797 	u32		reg;
3798 
3799 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3800 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3801 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3802 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3803 }
3804 
3805 static void
3806 ixl_disable_queue(struct i40e_hw *hw, int id)
3807 {
3808 	u32		reg;
3809 
3810 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3811 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3812 
3813 	return;
3814 }
3815 
3816 static void
3817 ixl_enable_legacy(struct i40e_hw *hw)
3818 {
3819 	u32		reg;
3820 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3821 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3822 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3823 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3824 }
3825 
3826 static void
3827 ixl_disable_legacy(struct i40e_hw *hw)
3828 {
3829 	u32		reg;
3830 
3831 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3832 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3833 
3834 	return;
3835 }
3836 
3837 static void
3838 ixl_update_stats_counters(struct ixl_pf *pf)
3839 {
3840 	struct i40e_hw	*hw = &pf->hw;
3841 	struct ixl_vsi *vsi = &pf->vsi;
3842 
3843 	struct i40e_hw_port_stats *nsd = &pf->stats;
3844 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3845 
3846 	/* Update hw stats */
3847 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3848 			   pf->stat_offsets_loaded,
3849 			   &osd->crc_errors, &nsd->crc_errors);
3850 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3851 			   pf->stat_offsets_loaded,
3852 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3853 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3854 			   I40E_GLPRT_GORCL(hw->port),
3855 			   pf->stat_offsets_loaded,
3856 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3857 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3858 			   I40E_GLPRT_GOTCL(hw->port),
3859 			   pf->stat_offsets_loaded,
3860 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3861 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3862 			   pf->stat_offsets_loaded,
3863 			   &osd->eth.rx_discards,
3864 			   &nsd->eth.rx_discards);
3865 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3866 			   I40E_GLPRT_UPRCL(hw->port),
3867 			   pf->stat_offsets_loaded,
3868 			   &osd->eth.rx_unicast,
3869 			   &nsd->eth.rx_unicast);
3870 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3871 			   I40E_GLPRT_UPTCL(hw->port),
3872 			   pf->stat_offsets_loaded,
3873 			   &osd->eth.tx_unicast,
3874 			   &nsd->eth.tx_unicast);
3875 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3876 			   I40E_GLPRT_MPRCL(hw->port),
3877 			   pf->stat_offsets_loaded,
3878 			   &osd->eth.rx_multicast,
3879 			   &nsd->eth.rx_multicast);
3880 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3881 			   I40E_GLPRT_MPTCL(hw->port),
3882 			   pf->stat_offsets_loaded,
3883 			   &osd->eth.tx_multicast,
3884 			   &nsd->eth.tx_multicast);
3885 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3886 			   I40E_GLPRT_BPRCL(hw->port),
3887 			   pf->stat_offsets_loaded,
3888 			   &osd->eth.rx_broadcast,
3889 			   &nsd->eth.rx_broadcast);
3890 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3891 			   I40E_GLPRT_BPTCL(hw->port),
3892 			   pf->stat_offsets_loaded,
3893 			   &osd->eth.tx_broadcast,
3894 			   &nsd->eth.tx_broadcast);
3895 
3896 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3897 			   pf->stat_offsets_loaded,
3898 			   &osd->tx_dropped_link_down,
3899 			   &nsd->tx_dropped_link_down);
3900 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3901 			   pf->stat_offsets_loaded,
3902 			   &osd->mac_local_faults,
3903 			   &nsd->mac_local_faults);
3904 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3905 			   pf->stat_offsets_loaded,
3906 			   &osd->mac_remote_faults,
3907 			   &nsd->mac_remote_faults);
3908 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3909 			   pf->stat_offsets_loaded,
3910 			   &osd->rx_length_errors,
3911 			   &nsd->rx_length_errors);
3912 
3913 	/* Flow control (LFC) stats */
3914 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3915 			   pf->stat_offsets_loaded,
3916 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3917 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3918 			   pf->stat_offsets_loaded,
3919 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3920 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3921 			   pf->stat_offsets_loaded,
3922 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3923 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3924 			   pf->stat_offsets_loaded,
3925 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3926 
3927 	/* Packet size stats rx */
3928 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3929 			   I40E_GLPRT_PRC64L(hw->port),
3930 			   pf->stat_offsets_loaded,
3931 			   &osd->rx_size_64, &nsd->rx_size_64);
3932 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3933 			   I40E_GLPRT_PRC127L(hw->port),
3934 			   pf->stat_offsets_loaded,
3935 			   &osd->rx_size_127, &nsd->rx_size_127);
3936 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3937 			   I40E_GLPRT_PRC255L(hw->port),
3938 			   pf->stat_offsets_loaded,
3939 			   &osd->rx_size_255, &nsd->rx_size_255);
3940 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3941 			   I40E_GLPRT_PRC511L(hw->port),
3942 			   pf->stat_offsets_loaded,
3943 			   &osd->rx_size_511, &nsd->rx_size_511);
3944 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3945 			   I40E_GLPRT_PRC1023L(hw->port),
3946 			   pf->stat_offsets_loaded,
3947 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3948 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3949 			   I40E_GLPRT_PRC1522L(hw->port),
3950 			   pf->stat_offsets_loaded,
3951 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3952 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3953 			   I40E_GLPRT_PRC9522L(hw->port),
3954 			   pf->stat_offsets_loaded,
3955 			   &osd->rx_size_big, &nsd->rx_size_big);
3956 
3957 	/* Packet size stats tx */
3958 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3959 			   I40E_GLPRT_PTC64L(hw->port),
3960 			   pf->stat_offsets_loaded,
3961 			   &osd->tx_size_64, &nsd->tx_size_64);
3962 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3963 			   I40E_GLPRT_PTC127L(hw->port),
3964 			   pf->stat_offsets_loaded,
3965 			   &osd->tx_size_127, &nsd->tx_size_127);
3966 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3967 			   I40E_GLPRT_PTC255L(hw->port),
3968 			   pf->stat_offsets_loaded,
3969 			   &osd->tx_size_255, &nsd->tx_size_255);
3970 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3971 			   I40E_GLPRT_PTC511L(hw->port),
3972 			   pf->stat_offsets_loaded,
3973 			   &osd->tx_size_511, &nsd->tx_size_511);
3974 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3975 			   I40E_GLPRT_PTC1023L(hw->port),
3976 			   pf->stat_offsets_loaded,
3977 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3978 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3979 			   I40E_GLPRT_PTC1522L(hw->port),
3980 			   pf->stat_offsets_loaded,
3981 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3982 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3983 			   I40E_GLPRT_PTC9522L(hw->port),
3984 			   pf->stat_offsets_loaded,
3985 			   &osd->tx_size_big, &nsd->tx_size_big);
3986 
3987 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3988 			   pf->stat_offsets_loaded,
3989 			   &osd->rx_undersize, &nsd->rx_undersize);
3990 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3991 			   pf->stat_offsets_loaded,
3992 			   &osd->rx_fragments, &nsd->rx_fragments);
3993 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3994 			   pf->stat_offsets_loaded,
3995 			   &osd->rx_oversize, &nsd->rx_oversize);
3996 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3997 			   pf->stat_offsets_loaded,
3998 			   &osd->rx_jabber, &nsd->rx_jabber);
3999 	pf->stat_offsets_loaded = true;
4000 	/* End hw stats */
4001 
4002 	/* Update vsi stats */
4003 	ixl_update_eth_stats(vsi);
4004 
4005 	/* OS statistics */
4006 	// ERJ - these are per-port, update all vsis?
4007 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4008 }
4009 
4010 /*
4011 ** Tasklet handler for MSIX Adminq interrupts
4012 **  - do outside interrupt since it might sleep
4013 */
4014 static void
4015 ixl_do_adminq(void *context, int pending)
4016 {
4017 	struct ixl_pf			*pf = context;
4018 	struct i40e_hw			*hw = &pf->hw;
4019 	struct ixl_vsi			*vsi = &pf->vsi;
4020 	struct i40e_arq_event_info	event;
4021 	i40e_status			ret;
4022 	u32				reg, loop = 0;
4023 	u16				opcode, result;
4024 
4025 	event.buf_len = IXL_AQ_BUF_SZ;
4026 	event.msg_buf = malloc(event.buf_len,
4027 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4028 	if (!event.msg_buf) {
4029 		printf("Unable to allocate adminq memory\n");
4030 		return;
4031 	}
4032 
4033 	/* clean and process any events */
4034 	do {
4035 		ret = i40e_clean_arq_element(hw, &event, &result);
4036 		if (ret)
4037 			break;
4038 		opcode = LE16_TO_CPU(event.desc.opcode);
4039 		switch (opcode) {
4040 		case i40e_aqc_opc_get_link_status:
4041 			vsi->link_up = ixl_config_link(hw);
4042 			ixl_update_link_status(pf);
4043 			break;
4044 		case i40e_aqc_opc_send_msg_to_pf:
4045 			/* process pf/vf communication here */
4046 			break;
4047 		case i40e_aqc_opc_event_lan_overflow:
4048 			break;
4049 		default:
4050 #ifdef IXL_DEBUG
4051 			printf("AdminQ unknown event %x\n", opcode);
4052 #endif
4053 			break;
4054 		}
4055 
4056 	} while (result && (loop++ < IXL_ADM_LIMIT));
4057 
4058 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4059 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4060 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4061 	free(event.msg_buf, M_DEVBUF);
4062 
4063 	if (pf->msix > 1)
4064 		ixl_enable_adminq(&pf->hw);
4065 	else
4066 		ixl_enable_intr(vsi);
4067 }
4068 
4069 static int
4070 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4071 {
4072 	struct ixl_pf	*pf;
4073 	int		error, input = 0;
4074 
4075 	error = sysctl_handle_int(oidp, &input, 0, req);
4076 
4077 	if (error || !req->newptr)
4078 		return (error);
4079 
4080 	if (input == 1) {
4081 		pf = (struct ixl_pf *)arg1;
4082 		ixl_print_debug_info(pf);
4083 	}
4084 
4085 	return (error);
4086 }
4087 
4088 static void
4089 ixl_print_debug_info(struct ixl_pf *pf)
4090 {
4091 	struct i40e_hw		*hw = &pf->hw;
4092 	struct ixl_vsi		*vsi = &pf->vsi;
4093 	struct ixl_queue	*que = vsi->queues;
4094 	struct rx_ring		*rxr = &que->rxr;
4095 	struct tx_ring		*txr = &que->txr;
4096 	u32			reg;
4097 
4098 
4099 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4100 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4101 	printf("RX next check = %x\n", rxr->next_check);
4102 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4103 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4104 	printf("TX desc avail = %x\n", txr->avail);
4105 
4106 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4107 	 printf("RX Bytes = %x\n", reg);
4108 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4109 	 printf("Port RX Bytes = %x\n", reg);
4110 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4111 	 printf("RX discard = %x\n", reg);
4112 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4113 	 printf("Port RX discard = %x\n", reg);
4114 
4115 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4116 	 printf("TX errors = %x\n", reg);
4117 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4118 	 printf("TX Bytes = %x\n", reg);
4119 
4120 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4121 	 printf("RX undersize = %x\n", reg);
4122 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4123 	 printf("RX fragments = %x\n", reg);
4124 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4125 	 printf("RX oversize = %x\n", reg);
4126 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4127 	 printf("RX length error = %x\n", reg);
4128 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4129 	 printf("mac remote fault = %x\n", reg);
4130 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4131 	 printf("mac local fault = %x\n", reg);
4132 }
4133 
4134 /**
4135  * Update VSI-specific ethernet statistics counters.
4136  **/
4137 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4138 {
4139 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4140 	struct i40e_hw *hw = &pf->hw;
4141 	struct i40e_eth_stats *es;
4142 	struct i40e_eth_stats *oes;
4143 	int i;
4144 	uint64_t tx_discards;
4145 	struct i40e_hw_port_stats *nsd;
4146 	u16 stat_idx = vsi->info.stat_counter_idx;
4147 
4148 	es = &vsi->eth_stats;
4149 	oes = &vsi->eth_stats_offsets;
4150 	nsd = &pf->stats;
4151 
4152 	/* Gather up the stats that the hw collects */
4153 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4154 			   vsi->stat_offsets_loaded,
4155 			   &oes->tx_errors, &es->tx_errors);
4156 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4157 			   vsi->stat_offsets_loaded,
4158 			   &oes->rx_discards, &es->rx_discards);
4159 
4160 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4161 			   I40E_GLV_GORCL(stat_idx),
4162 			   vsi->stat_offsets_loaded,
4163 			   &oes->rx_bytes, &es->rx_bytes);
4164 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4165 			   I40E_GLV_UPRCL(stat_idx),
4166 			   vsi->stat_offsets_loaded,
4167 			   &oes->rx_unicast, &es->rx_unicast);
4168 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4169 			   I40E_GLV_MPRCL(stat_idx),
4170 			   vsi->stat_offsets_loaded,
4171 			   &oes->rx_multicast, &es->rx_multicast);
4172 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4173 			   I40E_GLV_BPRCL(stat_idx),
4174 			   vsi->stat_offsets_loaded,
4175 			   &oes->rx_broadcast, &es->rx_broadcast);
4176 
4177 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4178 			   I40E_GLV_GOTCL(stat_idx),
4179 			   vsi->stat_offsets_loaded,
4180 			   &oes->tx_bytes, &es->tx_bytes);
4181 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4182 			   I40E_GLV_UPTCL(stat_idx),
4183 			   vsi->stat_offsets_loaded,
4184 			   &oes->tx_unicast, &es->tx_unicast);
4185 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4186 			   I40E_GLV_MPTCL(stat_idx),
4187 			   vsi->stat_offsets_loaded,
4188 			   &oes->tx_multicast, &es->tx_multicast);
4189 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4190 			   I40E_GLV_BPTCL(stat_idx),
4191 			   vsi->stat_offsets_loaded,
4192 			   &oes->tx_broadcast, &es->tx_broadcast);
4193 	vsi->stat_offsets_loaded = true;
4194 
4195 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4196 	for (i = 0; i < vsi->num_queues; i++)
4197 		tx_discards += vsi->queues[i].txr.br->br_drops;
4198 
4199 	/* Update ifnet stats */
4200 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4201 	                   es->rx_multicast +
4202 			   es->rx_broadcast);
4203 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4204 	                   es->tx_multicast +
4205 			   es->tx_broadcast);
4206 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4207 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4208 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4209 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4210 
4211 	IXL_SET_OERRORS(vsi, es->tx_errors);
4212 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4213 	IXL_SET_OQDROPS(vsi, tx_discards);
4214 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4215 	IXL_SET_COLLISIONS(vsi, 0);
4216 }
4217 
4218 /**
4219  * Reset all of the stats for the given pf
4220  **/
4221 void ixl_pf_reset_stats(struct ixl_pf *pf)
4222 {
4223 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4224 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4225 	pf->stat_offsets_loaded = false;
4226 }
4227 
4228 /**
4229  * Resets all stats of the given vsi
4230  **/
4231 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4232 {
4233 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4234 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4235 	vsi->stat_offsets_loaded = false;
4236 }
4237 
4238 /**
4239  * Read and update a 48 bit stat from the hw
4240  *
4241  * Since the device stats are not reset at PFReset, they likely will not
4242  * be zeroed when the driver starts.  We'll save the first values read
4243  * and use them as offsets to be subtracted from the raw values in order
4244  * to report stats that count from zero.
4245  **/
4246 static void
4247 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4248 	bool offset_loaded, u64 *offset, u64 *stat)
4249 {
4250 	u64 new_data;
4251 
4252 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4253 	new_data = rd64(hw, loreg);
4254 #else
4255 	/*
4256 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4257 	 * 10 don't support 8 byte bus reads/writes.
4258 	 */
4259 	new_data = rd32(hw, loreg);
4260 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4261 #endif
4262 
4263 	if (!offset_loaded)
4264 		*offset = new_data;
4265 	if (new_data >= *offset)
4266 		*stat = new_data - *offset;
4267 	else
4268 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4269 	*stat &= 0xFFFFFFFFFFFFULL;
4270 }
4271 
4272 /**
4273  * Read and update a 32 bit stat from the hw
4274  **/
4275 static void
4276 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4277 	bool offset_loaded, u64 *offset, u64 *stat)
4278 {
4279 	u32 new_data;
4280 
4281 	new_data = rd32(hw, reg);
4282 	if (!offset_loaded)
4283 		*offset = new_data;
4284 	if (new_data >= *offset)
4285 		*stat = (u32)(new_data - *offset);
4286 	else
4287 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4288 }
4289 
4290 /*
4291 ** Set flow control using sysctl:
4292 ** 	0 - off
4293 **	1 - rx pause
4294 **	2 - tx pause
4295 **	3 - full
4296 */
4297 static int
4298 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4299 {
4300 	/*
4301 	 * TODO: ensure flow control is disabled if
4302 	 * priority flow control is enabled
4303 	 *
4304 	 * TODO: ensure tx CRC by hardware should be enabled
4305 	 * if tx flow control is enabled.
4306 	 */
4307 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4308 	struct i40e_hw *hw = &pf->hw;
4309 	device_t dev = pf->dev;
4310 	int requested_fc = 0, error = 0;
4311 	enum i40e_status_code aq_error = 0;
4312 	u8 fc_aq_err = 0;
4313 
4314 	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4315 	if (aq_error) {
4316 		device_printf(dev,
4317 		    "%s: Error retrieving link info from aq, %d\n",
4318 		    __func__, aq_error);
4319 		return (EAGAIN);
4320 	}
4321 
4322 	/* Read in new mode */
4323 	requested_fc = hw->fc.current_mode;
4324 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4325 	if ((error) || (req->newptr == NULL))
4326 		return (error);
4327 	if (requested_fc < 0 || requested_fc > 3) {
4328 		device_printf(dev,
4329 		    "Invalid fc mode; valid modes are 0 through 3\n");
4330 		return (EINVAL);
4331 	}
4332 
4333 	/*
4334 	** Changing flow control mode currently does not work on
4335 	** 40GBASE-CR4 PHYs
4336 	*/
4337 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4338 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4339 		device_printf(dev, "Changing flow control mode unsupported"
4340 		    " on 40GBase-CR4 media.\n");
4341 		return (ENODEV);
4342 	}
4343 
4344 	/* Set fc ability for port */
4345 	hw->fc.requested_mode = requested_fc;
4346 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4347 	if (aq_error) {
4348 		device_printf(dev,
4349 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4350 		    __func__, aq_error, fc_aq_err);
4351 		return (EAGAIN);
4352 	}
4353 
4354 	if (hw->fc.current_mode != hw->fc.requested_mode) {
4355 		device_printf(dev, "%s: FC set failure:\n", __func__);
4356 		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4357 		    __func__,
4358 		    ixl_fc_string[hw->fc.current_mode],
4359 		    ixl_fc_string[hw->fc.requested_mode]);
4360 	}
4361 
4362 	return (0);
4363 }
4364 
4365 static int
4366 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4367 {
4368 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4369 	struct i40e_hw *hw = &pf->hw;
4370 	int error = 0, index = 0;
4371 
4372 	char *speeds[] = {
4373 		"Unknown",
4374 		"100M",
4375 		"1G",
4376 		"10G",
4377 		"40G",
4378 		"20G"
4379 	};
4380 
4381 	ixl_update_link_status(pf);
4382 
4383 	switch (hw->phy.link_info.link_speed) {
4384 	case I40E_LINK_SPEED_100MB:
4385 		index = 1;
4386 		break;
4387 	case I40E_LINK_SPEED_1GB:
4388 		index = 2;
4389 		break;
4390 	case I40E_LINK_SPEED_10GB:
4391 		index = 3;
4392 		break;
4393 	case I40E_LINK_SPEED_40GB:
4394 		index = 4;
4395 		break;
4396 	case I40E_LINK_SPEED_20GB:
4397 		index = 5;
4398 		break;
4399 	case I40E_LINK_SPEED_UNKNOWN:
4400 	default:
4401 		index = 0;
4402 		break;
4403 	}
4404 
4405 	error = sysctl_handle_string(oidp, speeds[index],
4406 	    strlen(speeds[index]), req);
4407 	return (error);
4408 }
4409 
4410 static int
4411 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4412 {
4413 	struct i40e_hw *hw = &pf->hw;
4414 	device_t dev = pf->dev;
4415 	struct i40e_aq_get_phy_abilities_resp abilities;
4416 	struct i40e_aq_set_phy_config config;
4417 	enum i40e_status_code aq_error = 0;
4418 
4419 	/* Get current capability information */
4420 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4421 	if (aq_error) {
4422 		device_printf(dev, "%s: Error getting phy capabilities %d,"
4423 		    " aq error: %d\n", __func__, aq_error,
4424 		    hw->aq.asq_last_status);
4425 		return (EAGAIN);
4426 	}
4427 
4428 	/* Prepare new config */
4429 	bzero(&config, sizeof(config));
4430 	config.phy_type = abilities.phy_type;
4431 	config.abilities = abilities.abilities
4432 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4433 	config.eee_capability = abilities.eee_capability;
4434 	config.eeer = abilities.eeer_val;
4435 	config.low_power_ctrl = abilities.d3_lpan;
4436 	/* Translate into aq cmd link_speed */
4437 	if (speeds & 0x4)
4438 		config.link_speed |= I40E_LINK_SPEED_10GB;
4439 	if (speeds & 0x2)
4440 		config.link_speed |= I40E_LINK_SPEED_1GB;
4441 	if (speeds & 0x1)
4442 		config.link_speed |= I40E_LINK_SPEED_100MB;
4443 
4444 	/* Do aq command & restart link */
4445 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4446 	if (aq_error) {
4447 		device_printf(dev, "%s: Error setting new phy config %d,"
4448 		    " aq error: %d\n", __func__, aq_error,
4449 		    hw->aq.asq_last_status);
4450 		return (EAGAIN);
4451 	}
4452 
4453 	/*
4454 	** This seems a bit heavy handed, but we
4455 	** need to get a reinit on some devices
4456 	*/
4457 	IXL_PF_LOCK(pf);
4458 	ixl_stop(pf);
4459 	ixl_init_locked(pf);
4460 	IXL_PF_UNLOCK(pf);
4461 
4462 	return (0);
4463 }
4464 
4465 /*
4466 ** Control link advertise speed:
4467 **	Flags:
4468 **	0x1 - advertise 100 Mb
4469 **	0x2 - advertise 1G
4470 **	0x4 - advertise 10G
4471 **
4472 ** Does not work on 40G devices.
4473 */
4474 static int
4475 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4476 {
4477 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4478 	struct i40e_hw *hw = &pf->hw;
4479 	device_t dev = pf->dev;
4480 	int requested_ls = 0;
4481 	int error = 0;
4482 
4483 	/*
4484 	** FW doesn't support changing advertised speed
4485 	** for 40G devices; speed is always 40G.
4486 	*/
4487 	if (i40e_is_40G_device(hw->device_id))
4488 		return (ENODEV);
4489 
4490 	/* Read in new mode */
4491 	requested_ls = pf->advertised_speed;
4492 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4493 	if ((error) || (req->newptr == NULL))
4494 		return (error);
4495 	if (requested_ls < 1 || requested_ls > 7) {
4496 		device_printf(dev,
4497 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4498 		return (EINVAL);
4499 	}
4500 
4501 	/* Exit if no change */
4502 	if (pf->advertised_speed == requested_ls)
4503 		return (0);
4504 
4505 	error = ixl_set_advertised_speeds(pf, requested_ls);
4506 	if (error)
4507 		return (error);
4508 
4509 	pf->advertised_speed = requested_ls;
4510 	ixl_update_link_status(pf);
4511 	return (0);
4512 }
4513 
4514 /*
4515 ** Get the width and transaction speed of
4516 ** the bus this adapter is plugged into.
4517 */
4518 static u16
4519 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4520 {
4521         u16                     link;
4522         u32                     offset;
4523 
4524 
4525         /* Get the PCI Express Capabilities offset */
4526         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4527 
4528         /* ...and read the Link Status Register */
4529         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4530 
4531         switch (link & I40E_PCI_LINK_WIDTH) {
4532         case I40E_PCI_LINK_WIDTH_1:
4533                 hw->bus.width = i40e_bus_width_pcie_x1;
4534                 break;
4535         case I40E_PCI_LINK_WIDTH_2:
4536                 hw->bus.width = i40e_bus_width_pcie_x2;
4537                 break;
4538         case I40E_PCI_LINK_WIDTH_4:
4539                 hw->bus.width = i40e_bus_width_pcie_x4;
4540                 break;
4541         case I40E_PCI_LINK_WIDTH_8:
4542                 hw->bus.width = i40e_bus_width_pcie_x8;
4543                 break;
4544         default:
4545                 hw->bus.width = i40e_bus_width_unknown;
4546                 break;
4547         }
4548 
4549         switch (link & I40E_PCI_LINK_SPEED) {
4550         case I40E_PCI_LINK_SPEED_2500:
4551                 hw->bus.speed = i40e_bus_speed_2500;
4552                 break;
4553         case I40E_PCI_LINK_SPEED_5000:
4554                 hw->bus.speed = i40e_bus_speed_5000;
4555                 break;
4556         case I40E_PCI_LINK_SPEED_8000:
4557                 hw->bus.speed = i40e_bus_speed_8000;
4558                 break;
4559         default:
4560                 hw->bus.speed = i40e_bus_speed_unknown;
4561                 break;
4562         }
4563 
4564 
4565         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4566             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4567             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4568             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4569             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4570             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4571             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4572             ("Unknown"));
4573 
4574         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4575             (hw->bus.speed < i40e_bus_speed_8000)) {
4576                 device_printf(dev, "PCI-Express bandwidth available"
4577                     " for this device\n     is not sufficient for"
4578                     " normal operation.\n");
4579                 device_printf(dev, "For expected performance a x8 "
4580                     "PCIE Gen3 slot is required.\n");
4581         }
4582 
4583         return (link);
4584 }
4585 
4586 static int
4587 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4588 {
4589 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4590 	struct i40e_hw	*hw = &pf->hw;
4591 	char		buf[32];
4592 
4593 	snprintf(buf, sizeof(buf),
4594 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4595 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4596 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4597 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4598 	    IXL_NVM_VERSION_HI_SHIFT,
4599 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4600 	    IXL_NVM_VERSION_LO_SHIFT,
4601 	    hw->nvm.eetrack);
4602 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4603 }
4604 
4605 
4606 #ifdef IXL_DEBUG_SYSCTL
4607 static int
4608 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4609 {
4610 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4611 	struct i40e_hw *hw = &pf->hw;
4612 	struct i40e_link_status link_status;
4613 	char buf[512];
4614 
4615 	enum i40e_status_code aq_error = 0;
4616 
4617 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4618 	if (aq_error) {
4619 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4620 		return (EPERM);
4621 	}
4622 
4623 	sprintf(buf, "\n"
4624 	    "PHY Type : %#04x\n"
4625 	    "Speed    : %#04x\n"
4626 	    "Link info: %#04x\n"
4627 	    "AN info  : %#04x\n"
4628 	    "Ext info : %#04x",
4629 	    link_status.phy_type, link_status.link_speed,
4630 	    link_status.link_info, link_status.an_info,
4631 	    link_status.ext_info);
4632 
4633 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4634 }
4635 
4636 static int
4637 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4638 {
4639 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4640 	struct i40e_hw *hw = &pf->hw;
4641 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4642 	char buf[512];
4643 
4644 	enum i40e_status_code aq_error = 0;
4645 
4646 	// TODO: Print out list of qualified modules as well?
4647 	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4648 	if (aq_error) {
4649 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4650 		return (EPERM);
4651 	}
4652 
4653 	sprintf(buf, "\n"
4654 	    "PHY Type : %#010x\n"
4655 	    "Speed    : %#04x\n"
4656 	    "Abilities: %#04x\n"
4657 	    "EEE cap  : %#06x\n"
4658 	    "EEER reg : %#010x\n"
4659 	    "D3 Lpan  : %#04x",
4660 	    abilities_resp.phy_type, abilities_resp.link_speed,
4661 	    abilities_resp.abilities, abilities_resp.eee_capability,
4662 	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4663 
4664 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4665 }
4666 
4667 static int
4668 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4669 {
4670 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4671 	struct ixl_vsi *vsi = &pf->vsi;
4672 	struct ixl_mac_filter *f;
4673 	char *buf, *buf_i;
4674 
4675 	int error = 0;
4676 	int ftl_len = 0;
4677 	int ftl_counter = 0;
4678 	int buf_len = 0;
4679 	int entry_len = 42;
4680 
4681 	SLIST_FOREACH(f, &vsi->ftl, next) {
4682 		ftl_len++;
4683 	}
4684 
4685 	if (ftl_len < 1) {
4686 		sysctl_handle_string(oidp, "(none)", 6, req);
4687 		return (0);
4688 	}
4689 
4690 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4691 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4692 
4693 	sprintf(buf_i++, "\n");
4694 	SLIST_FOREACH(f, &vsi->ftl, next) {
4695 		sprintf(buf_i,
4696 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4697 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4698 		buf_i += entry_len;
4699 		/* don't print '\n' for last entry */
4700 		if (++ftl_counter != ftl_len) {
4701 			sprintf(buf_i, "\n");
4702 			buf_i++;
4703 		}
4704 	}
4705 
4706 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4707 	if (error)
4708 		printf("sysctl error: %d\n", error);
4709 	free(buf, M_DEVBUF);
4710 	return error;
4711 }
4712 
4713 #define IXL_SW_RES_SIZE 0x14
4714 static int
4715 ixl_res_alloc_cmp(const void *a, const void *b)
4716 {
4717 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4718 	one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4719 	two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4720 
4721 	return ((int)one->resource_type - (int)two->resource_type);
4722 }
4723 
4724 static int
4725 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4726 {
4727 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4728 	struct i40e_hw *hw = &pf->hw;
4729 	device_t dev = pf->dev;
4730 	struct sbuf *buf;
4731 	int error = 0;
4732 
4733 	u8 num_entries;
4734 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4735 
4736 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4737 	if (!buf) {
4738 		device_printf(dev, "Could not allocate sbuf for output.\n");
4739 		return (ENOMEM);
4740 	}
4741 
4742 	bzero(resp, sizeof(resp));
4743 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4744 				resp,
4745 				IXL_SW_RES_SIZE,
4746 				NULL);
4747 	if (error) {
4748 		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4749 		    __func__, error, hw->aq.asq_last_status);
4750 		sbuf_delete(buf);
4751 		return error;
4752 	}
4753 
4754 	/* Sort entries by type for display */
4755 	qsort(resp, num_entries,
4756 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4757 	    &ixl_res_alloc_cmp);
4758 
4759 	sbuf_cat(buf, "\n");
4760 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4761 	sbuf_printf(buf,
4762 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4763 	    "     | (this)     | (all) | (this) | (all)       \n");
4764 	for (int i = 0; i < num_entries; i++) {
4765 		sbuf_printf(buf,
4766 		    "%#4x | %10d   %5d   %6d   %12d",
4767 		    resp[i].resource_type,
4768 		    resp[i].guaranteed,
4769 		    resp[i].total,
4770 		    resp[i].used,
4771 		    resp[i].total_unalloced);
4772 		if (i < num_entries - 1)
4773 			sbuf_cat(buf, "\n");
4774 	}
4775 
4776 	error = sbuf_finish(buf);
4777 	if (error) {
4778 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4779 		sbuf_delete(buf);
4780 		return error;
4781 	}
4782 
4783 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4784 	if (error)
4785 		device_printf(dev, "sysctl error: %d\n", error);
4786 	sbuf_delete(buf);
4787 	return error;
4788 }
4789 
4790 /*
4791 ** Caller must init and delete sbuf; this function will clear and
4792 ** finish it for caller.
4793 */
4794 static char *
4795 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4796 {
4797 	sbuf_clear(s);
4798 
4799 	if (seid == 0 && uplink)
4800 		sbuf_cat(s, "Network");
4801 	else if (seid == 0)
4802 		sbuf_cat(s, "Host");
4803 	else if (seid == 1)
4804 		sbuf_cat(s, "EMP");
4805 	else if (seid <= 5)
4806 		sbuf_printf(s, "MAC %d", seid - 2);
4807 	else if (seid <= 15)
4808 		sbuf_cat(s, "Reserved");
4809 	else if (seid <= 31)
4810 		sbuf_printf(s, "PF %d", seid - 16);
4811 	else if (seid <= 159)
4812 		sbuf_printf(s, "VF %d", seid - 32);
4813 	else if (seid <= 287)
4814 		sbuf_cat(s, "Reserved");
4815 	else if (seid <= 511)
4816 		sbuf_cat(s, "Other"); // for other structures
4817 	else if (seid <= 895)
4818 		sbuf_printf(s, "VSI %d", seid - 512);
4819 	else if (seid <= 1023)
4820 		sbuf_printf(s, "Reserved");
4821 	else
4822 		sbuf_cat(s, "Invalid");
4823 
4824 	sbuf_finish(s);
4825 	return sbuf_data(s);
4826 }
4827 
4828 static int
4829 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4830 {
4831 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4832 	struct i40e_hw *hw = &pf->hw;
4833 	device_t dev = pf->dev;
4834 	struct sbuf *buf;
4835 	struct sbuf *nmbuf;
4836 	int error = 0;
4837 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4838 
4839 	u16 next = 0;
4840 	struct i40e_aqc_get_switch_config_resp *sw_config;
4841 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4842 
4843 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4844 	if (!buf) {
4845 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4846 		return (ENOMEM);
4847 	}
4848 
4849 	error = i40e_aq_get_switch_config(hw, sw_config,
4850 	    sizeof(aq_buf), &next, NULL);
4851 	if (error) {
4852 		device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4853 		    __func__, error, hw->aq.asq_last_status);
4854 		sbuf_delete(buf);
4855 		return error;
4856 	}
4857 
4858 	nmbuf = sbuf_new_auto();
4859 	if (!nmbuf) {
4860 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4861 		return (ENOMEM);
4862 	}
4863 
4864 	sbuf_cat(buf, "\n");
4865 	// Assuming <= 255 elements in switch
4866 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4867 	/* Exclude:
4868 	** Revision -- all elements are revision 1 for now
4869 	*/
4870 	sbuf_printf(buf,
4871 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4872 	    "                |          |          | (uplink)\n");
4873 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4874 		// "%4d (%8s) | %8s   %8s   %#8x",
4875 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4876 		sbuf_cat(buf, " ");
4877 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4878 		sbuf_cat(buf, " | ");
4879 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4880 		sbuf_cat(buf, "   ");
4881 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4882 		sbuf_cat(buf, "   ");
4883 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4884 		if (i < sw_config->header.num_reported - 1)
4885 			sbuf_cat(buf, "\n");
4886 	}
4887 	sbuf_delete(nmbuf);
4888 
4889 	error = sbuf_finish(buf);
4890 	if (error) {
4891 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4892 		sbuf_delete(buf);
4893 		return error;
4894 	}
4895 
4896 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4897 	if (error)
4898 		device_printf(dev, "sysctl error: %d\n", error);
4899 	sbuf_delete(buf);
4900 
4901 	return (error);
4902 }
4903 
4904 /*
4905 ** Dump TX desc given index.
4906 ** Doesn't work; don't use.
4907 ** TODO: Also needs a queue index input!
4908 **/
4909 static int
4910 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4911 {
4912 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4913 	device_t dev = pf->dev;
4914 	struct sbuf *buf;
4915 	int error = 0;
4916 
4917 	u16 desc_idx = 0;
4918 
4919 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4920 	if (!buf) {
4921 		device_printf(dev, "Could not allocate sbuf for output.\n");
4922 		return (ENOMEM);
4923 	}
4924 
4925 	/* Read in index */
4926 	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4927 	if (error)
4928 		return (error);
4929 	if (req->newptr == NULL)
4930 		return (EIO); // fix
4931 	if (desc_idx > 1024) { // fix
4932 		device_printf(dev,
4933 		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4934 		return (EINVAL);
4935 	}
4936 
4937 	// Don't use this sysctl yet
4938 	if (TRUE)
4939 		return (ENODEV);
4940 
4941 	sbuf_cat(buf, "\n");
4942 
4943 	// set to queue 1?
4944 	struct ixl_queue *que = pf->vsi.queues;
4945 	struct tx_ring *txr = &(que[1].txr);
4946 	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4947 
4948 	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4949 	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4950 	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4951 
4952 	error = sbuf_finish(buf);
4953 	if (error) {
4954 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4955 		sbuf_delete(buf);
4956 		return error;
4957 	}
4958 
4959 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4960 	if (error)
4961 		device_printf(dev, "sysctl error: %d\n", error);
4962 	sbuf_delete(buf);
4963 	return error;
4964 }
4965 #endif /* IXL_DEBUG_SYSCTL */
4966 
4967