xref: /freebsd/sys/dev/ixl/if_ixl.c (revision ff0ba87247820afbdfdc1b307c803f7923d0e4d3)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2014, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixl_pf.h"
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 char ixl_driver_version[] = "1.2.8";
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *  Last field stores an index into ixl_strings
50  *  Last entry must be all 0s
51  *
52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53  *********************************************************************/
54 
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
56 {
57 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64 	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65 	/* required last entry */
66 	{0, 0, 0, 0, 0}
67 };
68 
69 /*********************************************************************
70  *  Table of branding strings
71  *********************************************************************/
72 
73 static char    *ixl_strings[] = {
74 	"Intel(R) Ethernet Connection XL710 Driver"
75 };
76 
77 
78 /*********************************************************************
79  *  Function prototypes
80  *********************************************************************/
81 static int      ixl_probe(device_t);
82 static int      ixl_attach(device_t);
83 static int      ixl_detach(device_t);
84 static int      ixl_shutdown(device_t);
85 static int	ixl_get_hw_capabilities(struct ixl_pf *);
86 static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void	ixl_init(void *);
89 static void	ixl_init_locked(struct ixl_pf *);
90 static void     ixl_stop(struct ixl_pf *);
91 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int      ixl_media_change(struct ifnet *);
93 static void     ixl_update_link_status(struct ixl_pf *);
94 static int      ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int	ixl_setup_stations(struct ixl_pf *);
97 static int	ixl_setup_vsi(struct ixl_vsi *);
98 static int	ixl_initialize_vsi(struct ixl_vsi *);
99 static int	ixl_assign_vsi_msix(struct ixl_pf *);
100 static int	ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int	ixl_init_msix(struct ixl_pf *);
102 static void	ixl_configure_msix(struct ixl_pf *);
103 static void	ixl_configure_itr(struct ixl_pf *);
104 static void	ixl_configure_legacy(struct ixl_pf *);
105 static void	ixl_free_pci_resources(struct ixl_pf *);
106 static void	ixl_local_timer(void *);
107 static int	ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool	ixl_config_link(struct i40e_hw *);
109 static void	ixl_config_rss(struct ixl_vsi *);
110 static void	ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void	ixl_set_queue_tx_itr(struct ixl_queue *);
112 static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
113 
114 static void	ixl_enable_rings(struct ixl_vsi *);
115 static void	ixl_disable_rings(struct ixl_vsi *);
116 static void     ixl_enable_intr(struct ixl_vsi *);
117 static void     ixl_disable_intr(struct ixl_vsi *);
118 
119 static void     ixl_enable_adminq(struct i40e_hw *);
120 static void     ixl_disable_adminq(struct i40e_hw *);
121 static void     ixl_enable_queue(struct i40e_hw *, int);
122 static void     ixl_disable_queue(struct i40e_hw *, int);
123 static void     ixl_enable_legacy(struct i40e_hw *);
124 static void     ixl_disable_legacy(struct i40e_hw *);
125 
126 static void     ixl_set_promisc(struct ixl_vsi *);
127 static void     ixl_add_multi(struct ixl_vsi *);
128 static void     ixl_del_multi(struct ixl_vsi *);
129 static void	ixl_register_vlan(void *, struct ifnet *, u16);
130 static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
131 static void	ixl_setup_vlan_filters(struct ixl_vsi *);
132 
133 static void	ixl_init_filters(struct ixl_vsi *);
134 static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
136 static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
137 static void	ixl_del_hw_filters(struct ixl_vsi *, int);
138 static struct ixl_mac_filter *
139 		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
140 static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
141 
142 /* Sysctl debug interface */
143 static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
144 static void	ixl_print_debug_info(struct ixl_pf *);
145 
146 /* The MSI/X Interrupt handlers */
147 static void	ixl_intr(void *);
148 static void	ixl_msix_que(void *);
149 static void	ixl_msix_adminq(void *);
150 static void	ixl_handle_mdd_event(struct ixl_pf *);
151 
152 /* Deferred interrupt tasklets */
153 static void	ixl_do_adminq(void *, int);
154 
155 /* Sysctl handlers */
156 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
157 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
158 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
159 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
160 
161 /* Statistics */
162 static void     ixl_add_hw_stats(struct ixl_pf *);
163 static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
164 		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
165 static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
166 		    struct sysctl_oid_list *,
167 		    struct i40e_eth_stats *);
168 static void	ixl_update_stats_counters(struct ixl_pf *);
169 static void	ixl_update_eth_stats(struct ixl_vsi *);
170 static void	ixl_pf_reset_stats(struct ixl_pf *);
171 static void	ixl_vsi_reset_stats(struct ixl_vsi *);
172 static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
173 		    u64 *, u64 *);
174 static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
175 		    u64 *, u64 *);
176 
177 #ifdef IXL_DEBUG
178 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
179 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
180 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
181 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
182 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
183 static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
184 #endif
185 
186 /*********************************************************************
187  *  FreeBSD Device Interface Entry Points
188  *********************************************************************/
189 
190 static device_method_t ixl_methods[] = {
191 	/* Device interface */
192 	DEVMETHOD(device_probe, ixl_probe),
193 	DEVMETHOD(device_attach, ixl_attach),
194 	DEVMETHOD(device_detach, ixl_detach),
195 	DEVMETHOD(device_shutdown, ixl_shutdown),
196 	{0, 0}
197 };
198 
199 static driver_t ixl_driver = {
200 	"ixl", ixl_methods, sizeof(struct ixl_pf),
201 };
202 
203 devclass_t ixl_devclass;
204 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
205 
206 MODULE_DEPEND(ixl, pci, 1, 1, 1);
207 MODULE_DEPEND(ixl, ether, 1, 1, 1);
208 
209 /*
210 ** Global reset mutex
211 */
212 static struct mtx ixl_reset_mtx;
213 
214 /*
215 ** TUNEABLE PARAMETERS:
216 */
217 
218 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
219                    "IXL driver parameters");
220 
221 /*
222  * MSIX should be the default for best performance,
223  * but this allows it to be forced off for testing.
224  */
225 static int ixl_enable_msix = 1;
226 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
228     "Enable MSI-X interrupts");
229 
230 /*
231 ** Number of descriptors per ring:
232 **   - TX and RX are the same size
233 */
234 static int ixl_ringsz = DEFAULT_RING;
235 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
237     &ixl_ringsz, 0, "Descriptor Ring Size");
238 
239 /*
240 ** This can be set manually, if left as 0 the
241 ** number of queues will be calculated based
242 ** on cpus and msix vectors available.
243 */
244 int ixl_max_queues = 0;
245 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
247     &ixl_max_queues, 0, "Number of Queues");
248 
249 /*
250 ** Controls for Interrupt Throttling
251 **	- true/false for dynamic adjustment
252 ** 	- default values for static ITR
253 */
254 int ixl_dynamic_rx_itr = 0;
255 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
256 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
257     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
258 
259 int ixl_dynamic_tx_itr = 0;
260 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
262     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
263 
264 int ixl_rx_itr = IXL_ITR_8K;
265 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
267     &ixl_rx_itr, 0, "RX Interrupt Rate");
268 
269 int ixl_tx_itr = IXL_ITR_4K;
270 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
272     &ixl_tx_itr, 0, "TX Interrupt Rate");
273 
274 #ifdef IXL_FDIR
275 static int ixl_enable_fdir = 1;
276 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
277 /* Rate at which we sample */
278 int ixl_atr_rate = 20;
279 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
280 #endif
281 
282 
283 static char *ixl_fc_string[6] = {
284 	"None",
285 	"Rx",
286 	"Tx",
287 	"Full",
288 	"Priority",
289 	"Default"
290 };
291 
292 
293 /*********************************************************************
294  *  Device identification routine
295  *
296  *  ixl_probe determines if the driver should be loaded on
297  *  the hardware based on PCI vendor/device id of the device.
298  *
299  *  return BUS_PROBE_DEFAULT on success, positive on failure
300  *********************************************************************/
301 
302 static int
303 ixl_probe(device_t dev)
304 {
305 	ixl_vendor_info_t *ent;
306 
307 	u16	pci_vendor_id, pci_device_id;
308 	u16	pci_subvendor_id, pci_subdevice_id;
309 	char	device_name[256];
310 	static bool lock_init = FALSE;
311 
312 	INIT_DEBUGOUT("ixl_probe: begin");
313 
314 	pci_vendor_id = pci_get_vendor(dev);
315 	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
316 		return (ENXIO);
317 
318 	pci_device_id = pci_get_device(dev);
319 	pci_subvendor_id = pci_get_subvendor(dev);
320 	pci_subdevice_id = pci_get_subdevice(dev);
321 
322 	ent = ixl_vendor_info_array;
323 	while (ent->vendor_id != 0) {
324 		if ((pci_vendor_id == ent->vendor_id) &&
325 		    (pci_device_id == ent->device_id) &&
326 
327 		    ((pci_subvendor_id == ent->subvendor_id) ||
328 		     (ent->subvendor_id == 0)) &&
329 
330 		    ((pci_subdevice_id == ent->subdevice_id) ||
331 		     (ent->subdevice_id == 0))) {
332 			sprintf(device_name, "%s, Version - %s",
333 				ixl_strings[ent->index],
334 				ixl_driver_version);
335 			device_set_desc_copy(dev, device_name);
336 			/* One shot mutex init */
337 			if (lock_init == FALSE) {
338 				lock_init = TRUE;
339 				mtx_init(&ixl_reset_mtx,
340 				    "ixl_reset",
341 				    "IXL RESET Lock", MTX_DEF);
342 			}
343 			return (BUS_PROBE_DEFAULT);
344 		}
345 		ent++;
346 	}
347 	return (ENXIO);
348 }
349 
350 /*********************************************************************
351  *  Device initialization routine
352  *
353  *  The attach entry point is called when the driver is being loaded.
354  *  This routine identifies the type of hardware, allocates all resources
355  *  and initializes the hardware.
356  *
357  *  return 0 on success, positive on failure
358  *********************************************************************/
359 
360 static int
361 ixl_attach(device_t dev)
362 {
363 	struct ixl_pf	*pf;
364 	struct i40e_hw	*hw;
365 	struct ixl_vsi *vsi;
366 	u16		bus;
367 	int             error = 0;
368 
369 	INIT_DEBUGOUT("ixl_attach: begin");
370 
371 	/* Allocate, clear, and link in our primary soft structure */
372 	pf = device_get_softc(dev);
373 	pf->dev = pf->osdep.dev = dev;
374 	hw = &pf->hw;
375 
376 	/*
377 	** Note this assumes we have a single embedded VSI,
378 	** this could be enhanced later to allocate multiple
379 	*/
380 	vsi = &pf->vsi;
381 	vsi->dev = pf->dev;
382 
383 	/* Core Lock Init*/
384 	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
385 
386 	/* Set up the timer callout */
387 	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
388 
389 	/* Set up sysctls */
390 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
391 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
392 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
393 	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
394 
395 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
396 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
398 	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
399 
400 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
403 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
404 
405 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
408 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
409 
410 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
411 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412 	    OID_AUTO, "rx_itr", CTLFLAG_RW,
413 	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
414 
415 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
416 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
418 	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
419 
420 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
421 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 	    OID_AUTO, "tx_itr", CTLFLAG_RW,
423 	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
424 
425 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
426 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
427 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
428 	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
429 
430 #ifdef IXL_DEBUG
431 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
432 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
433 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
434 	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
435 
436 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
437 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
438 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
439 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
440 
441 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
442 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
443 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
444 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
445 
446 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
449 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
450 
451 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
454 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
455 
456 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
459 	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
460 #endif
461 
462 	/* Save off the PCI information */
463 	hw->vendor_id = pci_get_vendor(dev);
464 	hw->device_id = pci_get_device(dev);
465 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
466 	hw->subsystem_vendor_id =
467 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
468 	hw->subsystem_device_id =
469 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
470 
471 	hw->bus.device = pci_get_slot(dev);
472 	hw->bus.func = pci_get_function(dev);
473 
474 	/* Do PCI setup - map BAR0, etc */
475 	if (ixl_allocate_pci_resources(pf)) {
476 		device_printf(dev, "Allocation of PCI resources failed\n");
477 		error = ENXIO;
478 		goto err_out;
479 	}
480 
481 	/* Create for initial debugging use */
482 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484 	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
485 	    ixl_debug_info, "I", "Debug Information");
486 
487 
488 	/* Establish a clean starting point */
489 	i40e_clear_hw(hw);
490 	error = i40e_pf_reset(hw);
491 	if (error) {
492 		device_printf(dev,"PF reset failure %x\n", error);
493 		error = EIO;
494 		goto err_out;
495 	}
496 
497 	/* For now always do an initial CORE reset on first device */
498 	{
499 		static int	ixl_dev_count;
500 		static int	ixl_dev_track[32];
501 		u32		my_dev;
502 		int		i, found = FALSE;
503 		u16		bus = pci_get_bus(dev);
504 
505 		mtx_lock(&ixl_reset_mtx);
506 		my_dev = (bus << 8) | hw->bus.device;
507 
508 		for (i = 0; i < ixl_dev_count; i++) {
509 			if (ixl_dev_track[i] == my_dev)
510 				found = TRUE;
511 		}
512 
513                 if (!found) {
514                         u32 reg;
515 
516                         ixl_dev_track[ixl_dev_count] = my_dev;
517                         ixl_dev_count++;
518 
519 			INIT_DEBUGOUT("Initial CORE RESET\n");
520                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
521                         ixl_flush(hw);
522                         i = 50;
523                         do {
524 				i40e_msec_delay(50);
525                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
526                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
527                                         break;
528                         } while (i--);
529 
530                         /* paranoia */
531                         wr32(hw, I40E_PF_ATQLEN, 0);
532                         wr32(hw, I40E_PF_ATQBAL, 0);
533                         wr32(hw, I40E_PF_ATQBAH, 0);
534                         i40e_clear_pxe_mode(hw);
535                 }
536                 mtx_unlock(&ixl_reset_mtx);
537 	}
538 
539 	/* Set admin queue parameters */
540 	hw->aq.num_arq_entries = IXL_AQ_LEN;
541 	hw->aq.num_asq_entries = IXL_AQ_LEN;
542 	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
543 	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
544 
545 	/* Initialize the shared code */
546 	error = i40e_init_shared_code(hw);
547 	if (error) {
548 		device_printf(dev,"Unable to initialize the shared code\n");
549 		error = EIO;
550 		goto err_out;
551 	}
552 
553 	/* Set up the admin queue */
554 	error = i40e_init_adminq(hw);
555 	if (error) {
556 		device_printf(dev, "The driver for the device stopped "
557 		    "because the NVM image is newer than expected.\n"
558 		    "You must install the most recent version of "
559 		    " the network driver.\n");
560 		goto err_out;
561 	}
562 	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
563 
564         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
565 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
566 		device_printf(dev, "The driver for the device detected "
567 		    "a newer version of the NVM image than expected.\n"
568 		    "Please install the most recent version of the network driver.\n");
569 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
570 	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
571 		device_printf(dev, "The driver for the device detected "
572 		    "an older version of the NVM image than expected.\n"
573 		    "Please update the NVM image.\n");
574 
575 	/* Clear PXE mode */
576 	i40e_clear_pxe_mode(hw);
577 
578 	/* Get capabilities from the device */
579 	error = ixl_get_hw_capabilities(pf);
580 	if (error) {
581 		device_printf(dev, "HW capabilities failure!\n");
582 		goto err_get_cap;
583 	}
584 
585 	/* Set up host memory cache */
586 	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
587 	if (error) {
588 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
589 		goto err_get_cap;
590 	}
591 
592 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
593 	if (error) {
594 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
595 		goto err_mac_hmc;
596 	}
597 
598 	/* Disable LLDP from the firmware */
599 	i40e_aq_stop_lldp(hw, TRUE, NULL);
600 
601 	i40e_get_mac_addr(hw, hw->mac.addr);
602 	error = i40e_validate_mac_addr(hw->mac.addr);
603 	if (error) {
604 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
605 		goto err_mac_hmc;
606 	}
607 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
608 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
609 
610 	/* Set up VSI and queues */
611 	if (ixl_setup_stations(pf) != 0) {
612 		device_printf(dev, "setup stations failed!\n");
613 		error = ENOMEM;
614 		goto err_mac_hmc;
615 	}
616 
617 	/* Initialize mac filter list for VSI */
618 	SLIST_INIT(&vsi->ftl);
619 
620 	/* Set up interrupt routing here */
621 	if (pf->msix > 1)
622 		error = ixl_assign_vsi_msix(pf);
623 	else
624 		error = ixl_assign_vsi_legacy(pf);
625 	if (error)
626 		goto err_late;
627 
628 	i40e_msec_delay(75);
629 	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
630 	if (error) {
631 		device_printf(dev, "link restart failed, aq_err=%d\n",
632 		    pf->hw.aq.asq_last_status);
633 	}
634 
635 	/* Determine link state */
636 	vsi->link_up = ixl_config_link(hw);
637 
638 	/* Report if Unqualified modules are found */
639 	if ((vsi->link_up == FALSE) &&
640 	    (pf->hw.phy.link_info.link_info &
641 	    I40E_AQ_MEDIA_AVAILABLE) &&
642 	    (!(pf->hw.phy.link_info.an_info &
643 	    I40E_AQ_QUALIFIED_MODULE)))
644 		device_printf(dev, "Link failed because "
645 		    "an unqualified module was detected\n");
646 
647 	/* Setup OS specific network interface */
648 	if (ixl_setup_interface(dev, vsi) != 0) {
649 		device_printf(dev, "interface setup failed!\n");
650 		error = EIO;
651 		goto err_late;
652 	}
653 
654 	/* Get the bus configuration and set the shared code */
655 	bus = ixl_get_bus_info(hw, dev);
656 	i40e_set_pci_config_data(hw, bus);
657 
658 	/* Initialize statistics */
659 	ixl_pf_reset_stats(pf);
660 	ixl_update_stats_counters(pf);
661 	ixl_add_hw_stats(pf);
662 
663 	/* Reset port's advertised speeds */
664 	if (!i40e_is_40G_device(hw->device_id)) {
665 		pf->advertised_speed = 0x7;
666 		ixl_set_advertised_speeds(pf, 0x7);
667 	}
668 
669 	/* Register for VLAN events */
670 	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
671 	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
672 	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
673 	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
674 
675 
676 	INIT_DEBUGOUT("ixl_attach: end");
677 	return (0);
678 
679 err_late:
680 	if (vsi->ifp != NULL)
681 		if_free(vsi->ifp);
682 err_mac_hmc:
683 	i40e_shutdown_lan_hmc(hw);
684 err_get_cap:
685 	i40e_shutdown_adminq(hw);
686 err_out:
687 	ixl_free_pci_resources(pf);
688 	ixl_free_vsi(vsi);
689 	IXL_PF_LOCK_DESTROY(pf);
690 	return (error);
691 }
692 
693 /*********************************************************************
694  *  Device removal routine
695  *
696  *  The detach entry point is called when the driver is being removed.
697  *  This routine stops the adapter and deallocates all the resources
698  *  that were allocated for driver operation.
699  *
700  *  return 0 on success, positive on failure
701  *********************************************************************/
702 
703 static int
704 ixl_detach(device_t dev)
705 {
706 	struct ixl_pf		*pf = device_get_softc(dev);
707 	struct i40e_hw		*hw = &pf->hw;
708 	struct ixl_vsi		*vsi = &pf->vsi;
709 	struct ixl_queue	*que = vsi->queues;
710 	i40e_status		status;
711 
712 	INIT_DEBUGOUT("ixl_detach: begin");
713 
714 	/* Make sure VLANS are not using driver */
715 	if (vsi->ifp->if_vlantrunk != NULL) {
716 		device_printf(dev,"Vlan in use, detach first\n");
717 		return (EBUSY);
718 	}
719 
720 	IXL_PF_LOCK(pf);
721 	ixl_stop(pf);
722 	IXL_PF_UNLOCK(pf);
723 
724 	for (int i = 0; i < vsi->num_queues; i++, que++) {
725 		if (que->tq) {
726 			taskqueue_drain(que->tq, &que->task);
727 			taskqueue_drain(que->tq, &que->tx_task);
728 			taskqueue_free(que->tq);
729 		}
730 	}
731 
732 	/* Shutdown LAN HMC */
733 	status = i40e_shutdown_lan_hmc(hw);
734 	if (status)
735 		device_printf(dev,
736 		    "Shutdown LAN HMC failed with code %d\n", status);
737 
738 	/* Shutdown admin queue */
739 	status = i40e_shutdown_adminq(hw);
740 	if (status)
741 		device_printf(dev,
742 		    "Shutdown Admin queue failed with code %d\n", status);
743 
744 	/* Unregister VLAN events */
745 	if (vsi->vlan_attach != NULL)
746 		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
747 	if (vsi->vlan_detach != NULL)
748 		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
749 
750 	ether_ifdetach(vsi->ifp);
751 	callout_drain(&pf->timer);
752 
753 
754 	ixl_free_pci_resources(pf);
755 	bus_generic_detach(dev);
756 	if_free(vsi->ifp);
757 	ixl_free_vsi(vsi);
758 	IXL_PF_LOCK_DESTROY(pf);
759 	return (0);
760 }
761 
762 /*********************************************************************
763  *
764  *  Shutdown entry point
765  *
766  **********************************************************************/
767 
768 static int
769 ixl_shutdown(device_t dev)
770 {
771 	struct ixl_pf *pf = device_get_softc(dev);
772 	IXL_PF_LOCK(pf);
773 	ixl_stop(pf);
774 	IXL_PF_UNLOCK(pf);
775 	return (0);
776 }
777 
778 
779 /*********************************************************************
780  *
781  *  Get the hardware capabilities
782  *
783  **********************************************************************/
784 
785 static int
786 ixl_get_hw_capabilities(struct ixl_pf *pf)
787 {
788 	struct i40e_aqc_list_capabilities_element_resp *buf;
789 	struct i40e_hw	*hw = &pf->hw;
790 	device_t 	dev = pf->dev;
791 	int             error, len;
792 	u16		needed;
793 	bool		again = TRUE;
794 
795 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
796 retry:
797 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
798 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
799 		device_printf(dev, "Unable to allocate cap memory\n");
800                 return (ENOMEM);
801 	}
802 
803 	/* This populates the hw struct */
804         error = i40e_aq_discover_capabilities(hw, buf, len,
805 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
806 	free(buf, M_DEVBUF);
807 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
808 	    (again == TRUE)) {
809 		/* retry once with a larger buffer */
810 		again = FALSE;
811 		len = needed;
812 		goto retry;
813 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
814 		device_printf(dev, "capability discovery failed: %d\n",
815 		    pf->hw.aq.asq_last_status);
816 		return (ENODEV);
817 	}
818 
819 	/* Capture this PF's starting queue pair */
820 	pf->qbase = hw->func_caps.base_queue;
821 
822 #ifdef IXL_DEBUG
823 	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
824 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
825 	    hw->pf_id, hw->func_caps.num_vfs,
826 	    hw->func_caps.num_msix_vectors,
827 	    hw->func_caps.num_msix_vectors_vf,
828 	    hw->func_caps.fd_filters_guaranteed,
829 	    hw->func_caps.fd_filters_best_effort,
830 	    hw->func_caps.num_tx_qp,
831 	    hw->func_caps.num_rx_qp,
832 	    hw->func_caps.base_queue);
833 #endif
834 	return (error);
835 }
836 
837 static void
838 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
839 {
840 	device_t 	dev = vsi->dev;
841 
842 	/* Enable/disable TXCSUM/TSO4 */
843 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
844 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
845 		if (mask & IFCAP_TXCSUM) {
846 			ifp->if_capenable |= IFCAP_TXCSUM;
847 			/* enable TXCSUM, restore TSO if previously enabled */
848 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
849 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
850 				ifp->if_capenable |= IFCAP_TSO4;
851 			}
852 		}
853 		else if (mask & IFCAP_TSO4) {
854 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
855 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
856 			device_printf(dev,
857 			    "TSO4 requires txcsum, enabling both...\n");
858 		}
859 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
860 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
861 		if (mask & IFCAP_TXCSUM)
862 			ifp->if_capenable &= ~IFCAP_TXCSUM;
863 		else if (mask & IFCAP_TSO4)
864 			ifp->if_capenable |= IFCAP_TSO4;
865 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
866 	    && (ifp->if_capenable & IFCAP_TSO4)) {
867 		if (mask & IFCAP_TXCSUM) {
868 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
869 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
870 			device_printf(dev,
871 			    "TSO4 requires txcsum, disabling both...\n");
872 		} else if (mask & IFCAP_TSO4)
873 			ifp->if_capenable &= ~IFCAP_TSO4;
874 	}
875 
876 	/* Enable/disable TXCSUM_IPV6/TSO6 */
877 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
878 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
879 		if (mask & IFCAP_TXCSUM_IPV6) {
880 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
881 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
882 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
883 				ifp->if_capenable |= IFCAP_TSO6;
884 			}
885 		} else if (mask & IFCAP_TSO6) {
886 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
887 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
888 			device_printf(dev,
889 			    "TSO6 requires txcsum6, enabling both...\n");
890 		}
891 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
892 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
893 		if (mask & IFCAP_TXCSUM_IPV6)
894 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
895 		else if (mask & IFCAP_TSO6)
896 			ifp->if_capenable |= IFCAP_TSO6;
897 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
898 	    && (ifp->if_capenable & IFCAP_TSO6)) {
899 		if (mask & IFCAP_TXCSUM_IPV6) {
900 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
901 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
902 			device_printf(dev,
903 			    "TSO6 requires txcsum6, disabling both...\n");
904 		} else if (mask & IFCAP_TSO6)
905 			ifp->if_capenable &= ~IFCAP_TSO6;
906 	}
907 }
908 
909 /*********************************************************************
910  *  Ioctl entry point
911  *
912  *  ixl_ioctl is called when the user wants to configure the
913  *  interface.
914  *
915  *  return 0 on success, positive on failure
916  **********************************************************************/
917 
918 static int
919 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
920 {
921 	struct ixl_vsi	*vsi = ifp->if_softc;
922 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
923 	struct ifreq	*ifr = (struct ifreq *) data;
924 #if defined(INET) || defined(INET6)
925 	struct ifaddr *ifa = (struct ifaddr *)data;
926 	bool		avoid_reset = FALSE;
927 #endif
928 	int             error = 0;
929 
930 	switch (command) {
931 
932         case SIOCSIFADDR:
933 #ifdef INET
934 		if (ifa->ifa_addr->sa_family == AF_INET)
935 			avoid_reset = TRUE;
936 #endif
937 #ifdef INET6
938 		if (ifa->ifa_addr->sa_family == AF_INET6)
939 			avoid_reset = TRUE;
940 #endif
941 #if defined(INET) || defined(INET6)
942 		/*
943 		** Calling init results in link renegotiation,
944 		** so we avoid doing it when possible.
945 		*/
946 		if (avoid_reset) {
947 			ifp->if_flags |= IFF_UP;
948 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
949 				ixl_init(pf);
950 #ifdef INET
951 			if (!(ifp->if_flags & IFF_NOARP))
952 				arp_ifinit(ifp, ifa);
953 #endif
954 		} else
955 			error = ether_ioctl(ifp, command, data);
956 		break;
957 #endif
958 	case SIOCSIFMTU:
959 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
960 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
961 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
962 			error = EINVAL;
963 		} else {
964 			IXL_PF_LOCK(pf);
965 			ifp->if_mtu = ifr->ifr_mtu;
966 			vsi->max_frame_size =
967 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
968 			    + ETHER_VLAN_ENCAP_LEN;
969 			ixl_init_locked(pf);
970 			IXL_PF_UNLOCK(pf);
971 		}
972 		break;
973 	case SIOCSIFFLAGS:
974 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
975 		IXL_PF_LOCK(pf);
976 		if (ifp->if_flags & IFF_UP) {
977 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
978 				if ((ifp->if_flags ^ pf->if_flags) &
979 				    (IFF_PROMISC | IFF_ALLMULTI)) {
980 					ixl_set_promisc(vsi);
981 				}
982 			} else
983 				ixl_init_locked(pf);
984 		} else
985 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
986 				ixl_stop(pf);
987 		pf->if_flags = ifp->if_flags;
988 		IXL_PF_UNLOCK(pf);
989 		break;
990 	case SIOCADDMULTI:
991 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
992 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
993 			IXL_PF_LOCK(pf);
994 			ixl_disable_intr(vsi);
995 			ixl_add_multi(vsi);
996 			ixl_enable_intr(vsi);
997 			IXL_PF_UNLOCK(pf);
998 		}
999 		break;
1000 	case SIOCDELMULTI:
1001 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1002 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1003 			IXL_PF_LOCK(pf);
1004 			ixl_disable_intr(vsi);
1005 			ixl_del_multi(vsi);
1006 			ixl_enable_intr(vsi);
1007 			IXL_PF_UNLOCK(pf);
1008 		}
1009 		break;
1010 	case SIOCSIFMEDIA:
1011 	case SIOCGIFMEDIA:
1012 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1013 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1014 		break;
1015 	case SIOCSIFCAP:
1016 	{
1017 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1018 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1019 
1020 		ixl_cap_txcsum_tso(vsi, ifp, mask);
1021 
1022 		if (mask & IFCAP_RXCSUM)
1023 			ifp->if_capenable ^= IFCAP_RXCSUM;
1024 		if (mask & IFCAP_RXCSUM_IPV6)
1025 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1026 		if (mask & IFCAP_LRO)
1027 			ifp->if_capenable ^= IFCAP_LRO;
1028 		if (mask & IFCAP_VLAN_HWTAGGING)
1029 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1030 		if (mask & IFCAP_VLAN_HWFILTER)
1031 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1032 		if (mask & IFCAP_VLAN_HWTSO)
1033 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1034 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1035 			IXL_PF_LOCK(pf);
1036 			ixl_init_locked(pf);
1037 			IXL_PF_UNLOCK(pf);
1038 		}
1039 		VLAN_CAPABILITIES(ifp);
1040 
1041 		break;
1042 	}
1043 
1044 	default:
1045 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1046 		error = ether_ioctl(ifp, command, data);
1047 		break;
1048 	}
1049 
1050 	return (error);
1051 }
1052 
1053 
1054 /*********************************************************************
1055  *  Init entry point
1056  *
1057  *  This routine is used in two ways. It is used by the stack as
1058  *  init entry point in network interface structure. It is also used
1059  *  by the driver as a hw/sw initialization routine to get to a
1060  *  consistent state.
1061  *
1062  *  return 0 on success, positive on failure
1063  **********************************************************************/
1064 
1065 static void
1066 ixl_init_locked(struct ixl_pf *pf)
1067 {
1068 	struct i40e_hw	*hw = &pf->hw;
1069 	struct ixl_vsi	*vsi = &pf->vsi;
1070 	struct ifnet	*ifp = vsi->ifp;
1071 	device_t 	dev = pf->dev;
1072 	struct i40e_filter_control_settings	filter;
1073 	u8		tmpaddr[ETHER_ADDR_LEN];
1074 	int		ret;
1075 
1076 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1077 	INIT_DEBUGOUT("ixl_init: begin");
1078 	ixl_stop(pf);
1079 
1080 	/* Get the latest mac address... User might use a LAA */
1081 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1082 	      I40E_ETH_LENGTH_OF_ADDRESS);
1083 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1084 	    i40e_validate_mac_addr(tmpaddr)) {
1085 		bcopy(tmpaddr, hw->mac.addr,
1086 		    I40E_ETH_LENGTH_OF_ADDRESS);
1087 		ret = i40e_aq_mac_address_write(hw,
1088 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1089 		    hw->mac.addr, NULL);
1090 		if (ret) {
1091 			device_printf(dev, "LLA address"
1092 			 "change failed!!\n");
1093 			return;
1094 		}
1095 	}
1096 
1097 	/* Set the various hardware offload abilities */
1098 	ifp->if_hwassist = 0;
1099 	if (ifp->if_capenable & IFCAP_TSO)
1100 		ifp->if_hwassist |= CSUM_TSO;
1101 	if (ifp->if_capenable & IFCAP_TXCSUM)
1102 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1103 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1104 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1105 
1106 	/* Set up the device filtering */
1107 	bzero(&filter, sizeof(filter));
1108 	filter.enable_ethtype = TRUE;
1109 	filter.enable_macvlan = TRUE;
1110 #ifdef IXL_FDIR
1111 	filter.enable_fdir = TRUE;
1112 #endif
1113 	if (i40e_set_filter_control(hw, &filter))
1114 		device_printf(dev, "set_filter_control() failed\n");
1115 
1116 	/* Set up RSS */
1117 	ixl_config_rss(vsi);
1118 
1119 	/* Setup the VSI */
1120 	ixl_setup_vsi(vsi);
1121 
1122 	/*
1123 	** Prepare the rings, hmc contexts, etc...
1124 	*/
1125 	if (ixl_initialize_vsi(vsi)) {
1126 		device_printf(dev, "initialize vsi failed!!\n");
1127 		return;
1128 	}
1129 
1130 	/* Add protocol filters to list */
1131 	ixl_init_filters(vsi);
1132 
1133 	/* Setup vlan's if needed */
1134 	ixl_setup_vlan_filters(vsi);
1135 
1136 	/* Start the local timer */
1137 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1138 
1139 	/* Set up MSI/X routing and the ITR settings */
1140 	if (ixl_enable_msix) {
1141 		ixl_configure_msix(pf);
1142 		ixl_configure_itr(pf);
1143 	} else
1144 		ixl_configure_legacy(pf);
1145 
1146 	ixl_enable_rings(vsi);
1147 
1148 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1149 
1150 	/* Set MTU in hardware*/
1151 	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1152 	    TRUE, 0, NULL);
1153 	if (aq_error)
1154 		device_printf(vsi->dev,
1155 			"aq_set_mac_config in init error, code %d\n",
1156 		    aq_error);
1157 
1158 	/* And now turn on interrupts */
1159 	ixl_enable_intr(vsi);
1160 
1161 	/* Now inform the stack we're ready */
1162 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1163 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1164 
1165 	return;
1166 }
1167 
1168 static void
1169 ixl_init(void *arg)
1170 {
1171 	struct ixl_pf *pf = arg;
1172 
1173 	IXL_PF_LOCK(pf);
1174 	ixl_init_locked(pf);
1175 	IXL_PF_UNLOCK(pf);
1176 	return;
1177 }
1178 
1179 /*
1180 **
1181 ** MSIX Interrupt Handlers and Tasklets
1182 **
1183 */
1184 static void
1185 ixl_handle_que(void *context, int pending)
1186 {
1187 	struct ixl_queue *que = context;
1188 	struct ixl_vsi *vsi = que->vsi;
1189 	struct i40e_hw  *hw = vsi->hw;
1190 	struct tx_ring  *txr = &que->txr;
1191 	struct ifnet    *ifp = vsi->ifp;
1192 	bool		more;
1193 
1194 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1195 		more = ixl_rxeof(que, IXL_RX_LIMIT);
1196 		IXL_TX_LOCK(txr);
1197 		ixl_txeof(que);
1198 		if (!drbr_empty(ifp, txr->br))
1199 			ixl_mq_start_locked(ifp, txr);
1200 		IXL_TX_UNLOCK(txr);
1201 		if (more) {
1202 			taskqueue_enqueue(que->tq, &que->task);
1203 			return;
1204 		}
1205 	}
1206 
1207 	/* Reenable this interrupt - hmmm */
1208 	ixl_enable_queue(hw, que->me);
1209 	return;
1210 }
1211 
1212 
1213 /*********************************************************************
1214  *
1215  *  Legacy Interrupt Service routine
1216  *
1217  **********************************************************************/
1218 void
1219 ixl_intr(void *arg)
1220 {
1221 	struct ixl_pf		*pf = arg;
1222 	struct i40e_hw		*hw =  &pf->hw;
1223 	struct ixl_vsi		*vsi = &pf->vsi;
1224 	struct ixl_queue	*que = vsi->queues;
1225 	struct ifnet		*ifp = vsi->ifp;
1226 	struct tx_ring		*txr = &que->txr;
1227         u32			reg, icr0, mask;
1228 	bool			more_tx, more_rx;
1229 
1230 	++que->irqs;
1231 
1232 	/* Protect against spurious interrupts */
1233 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1234 		return;
1235 
1236 	icr0 = rd32(hw, I40E_PFINT_ICR0);
1237 
1238 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1239 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1240 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1241 
1242         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1243 
1244 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1245 		taskqueue_enqueue(pf->tq, &pf->adminq);
1246 		return;
1247 	}
1248 
1249 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1250 
1251 	IXL_TX_LOCK(txr);
1252 	more_tx = ixl_txeof(que);
1253 	if (!drbr_empty(vsi->ifp, txr->br))
1254 		more_tx = 1;
1255 	IXL_TX_UNLOCK(txr);
1256 
1257 	/* re-enable other interrupt causes */
1258 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1259 
1260 	/* And now the queues */
1261 	reg = rd32(hw, I40E_QINT_RQCTL(0));
1262 	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1263 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1264 
1265 	reg = rd32(hw, I40E_QINT_TQCTL(0));
1266 	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1267 	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1268 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1269 
1270 	ixl_enable_legacy(hw);
1271 
1272 	return;
1273 }
1274 
1275 
1276 /*********************************************************************
1277  *
1278  *  MSIX VSI Interrupt Service routine
1279  *
1280  **********************************************************************/
1281 void
1282 ixl_msix_que(void *arg)
1283 {
1284 	struct ixl_queue	*que = arg;
1285 	struct ixl_vsi	*vsi = que->vsi;
1286 	struct i40e_hw	*hw = vsi->hw;
1287 	struct tx_ring	*txr = &que->txr;
1288 	bool		more_tx, more_rx;
1289 
1290 	/* Protect against spurious interrupts */
1291 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1292 		return;
1293 
1294 	++que->irqs;
1295 
1296 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1297 
1298 	IXL_TX_LOCK(txr);
1299 	more_tx = ixl_txeof(que);
1300 	/*
1301 	** Make certain that if the stack
1302 	** has anything queued the task gets
1303 	** scheduled to handle it.
1304 	*/
1305 	if (!drbr_empty(vsi->ifp, txr->br))
1306 		more_tx = 1;
1307 	IXL_TX_UNLOCK(txr);
1308 
1309 	ixl_set_queue_rx_itr(que);
1310 	ixl_set_queue_tx_itr(que);
1311 
1312 	if (more_tx || more_rx)
1313 		taskqueue_enqueue(que->tq, &que->task);
1314 	else
1315 		ixl_enable_queue(hw, que->me);
1316 
1317 	return;
1318 }
1319 
1320 
1321 /*********************************************************************
1322  *
1323  *  MSIX Admin Queue Interrupt Service routine
1324  *
1325  **********************************************************************/
1326 static void
1327 ixl_msix_adminq(void *arg)
1328 {
1329 	struct ixl_pf	*pf = arg;
1330 	struct i40e_hw	*hw = &pf->hw;
1331 	u32		reg, mask;
1332 
1333 	++pf->admin_irq;
1334 
1335 	reg = rd32(hw, I40E_PFINT_ICR0);
1336 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1337 
1338 	/* Check on the cause */
1339 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1340 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1341 
1342 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1343 		ixl_handle_mdd_event(pf);
1344 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1345 	}
1346 
1347 	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1348 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1349 
1350 	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1351 	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1352 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1353 
1354 	taskqueue_enqueue(pf->tq, &pf->adminq);
1355 	return;
1356 }
1357 
1358 /*********************************************************************
1359  *
1360  *  Media Ioctl callback
1361  *
1362  *  This routine is called whenever the user queries the status of
1363  *  the interface using ifconfig.
1364  *
1365  **********************************************************************/
1366 static void
1367 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1368 {
1369 	struct ixl_vsi	*vsi = ifp->if_softc;
1370 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1371 	struct i40e_hw  *hw = &pf->hw;
1372 
1373 	INIT_DEBUGOUT("ixl_media_status: begin");
1374 	IXL_PF_LOCK(pf);
1375 
1376 	ixl_update_link_status(pf);
1377 
1378 	ifmr->ifm_status = IFM_AVALID;
1379 	ifmr->ifm_active = IFM_ETHER;
1380 
1381 	if (!vsi->link_up) {
1382 		IXL_PF_UNLOCK(pf);
1383 		return;
1384 	}
1385 
1386 	ifmr->ifm_status |= IFM_ACTIVE;
1387 	/* Hardware is always full-duplex */
1388 	ifmr->ifm_active |= IFM_FDX;
1389 
1390 	switch (hw->phy.link_info.phy_type) {
1391 		/* 100 M */
1392 		case I40E_PHY_TYPE_100BASE_TX:
1393 			ifmr->ifm_active |= IFM_100_TX;
1394 			break;
1395 		/* 1 G */
1396 		case I40E_PHY_TYPE_1000BASE_T:
1397 			ifmr->ifm_active |= IFM_1000_T;
1398 			break;
1399 		case I40E_PHY_TYPE_1000BASE_SX:
1400 			ifmr->ifm_active |= IFM_1000_SX;
1401 			break;
1402 		case I40E_PHY_TYPE_1000BASE_LX:
1403 			ifmr->ifm_active |= IFM_1000_LX;
1404 			break;
1405 		/* 10 G */
1406 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1407 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1408 			ifmr->ifm_active |= IFM_10G_TWINAX;
1409 			break;
1410 		case I40E_PHY_TYPE_10GBASE_SR:
1411 			ifmr->ifm_active |= IFM_10G_SR;
1412 			break;
1413 		case I40E_PHY_TYPE_10GBASE_LR:
1414 			ifmr->ifm_active |= IFM_10G_LR;
1415 			break;
1416 		case I40E_PHY_TYPE_10GBASE_T:
1417 			ifmr->ifm_active |= IFM_10G_T;
1418 			break;
1419 		/* 40 G */
1420 		case I40E_PHY_TYPE_40GBASE_CR4:
1421 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1422 			ifmr->ifm_active |= IFM_40G_CR4;
1423 			break;
1424 		case I40E_PHY_TYPE_40GBASE_SR4:
1425 			ifmr->ifm_active |= IFM_40G_SR4;
1426 			break;
1427 		case I40E_PHY_TYPE_40GBASE_LR4:
1428 			ifmr->ifm_active |= IFM_40G_LR4;
1429 			break;
1430 		default:
1431 			ifmr->ifm_active |= IFM_UNKNOWN;
1432 			break;
1433 	}
1434 	/* Report flow control status as well */
1435 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1436 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1437 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1438 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1439 
1440 	IXL_PF_UNLOCK(pf);
1441 
1442 	return;
1443 }
1444 
1445 /*********************************************************************
1446  *
1447  *  Media Ioctl callback
1448  *
1449  *  This routine is called when the user changes speed/duplex using
1450  *  media/mediopt option with ifconfig.
1451  *
1452  **********************************************************************/
1453 static int
1454 ixl_media_change(struct ifnet * ifp)
1455 {
1456 	struct ixl_vsi *vsi = ifp->if_softc;
1457 	struct ifmedia *ifm = &vsi->media;
1458 
1459 	INIT_DEBUGOUT("ixl_media_change: begin");
1460 
1461 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1462 		return (EINVAL);
1463 
1464 	if_printf(ifp, "Media change is currently not supported.\n");
1465 
1466 	return (ENODEV);
1467 }
1468 
1469 
1470 #ifdef IXL_FDIR
1471 /*
1472 ** ATR: Application Targetted Receive - creates a filter
1473 **	based on TX flow info that will keep the receive
1474 **	portion of the flow on the same queue. Based on the
1475 **	implementation this is only available for TCP connections
1476 */
1477 void
1478 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1479 {
1480 	struct ixl_vsi			*vsi = que->vsi;
1481 	struct tx_ring			*txr = &que->txr;
1482 	struct i40e_filter_program_desc	*FDIR;
1483 	u32				ptype, dtype;
1484 	int				idx;
1485 
1486 	/* check if ATR is enabled and sample rate */
1487 	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1488 		return;
1489 	/*
1490 	** We sample all TCP SYN/FIN packets,
1491 	** or at the selected sample rate
1492 	*/
1493 	txr->atr_count++;
1494 	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1495 	    (txr->atr_count < txr->atr_rate))
1496                 return;
1497 	txr->atr_count = 0;
1498 
1499 	/* Get a descriptor to use */
1500 	idx = txr->next_avail;
1501 	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1502 	if (++idx == que->num_desc)
1503 		idx = 0;
1504 	txr->avail--;
1505 	txr->next_avail = idx;
1506 
1507 	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1508 	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1509 
1510 	ptype |= (etype == ETHERTYPE_IP) ?
1511 	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1512 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1513 	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1514 	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1515 
1516 	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1517 
1518 	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1519 
1520 	/*
1521 	** We use the TCP TH_FIN as a trigger to remove
1522 	** the filter, otherwise its an update.
1523 	*/
1524 	dtype |= (th->th_flags & TH_FIN) ?
1525 	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1526 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1527 	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1528 	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1529 
1530 	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1531 	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1532 
1533 	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1534 	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1535 
1536 	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1537 	FDIR->dtype_cmd_cntindex = htole32(dtype);
1538 	return;
1539 }
1540 #endif
1541 
1542 
1543 static void
1544 ixl_set_promisc(struct ixl_vsi *vsi)
1545 {
1546 	struct ifnet	*ifp = vsi->ifp;
1547 	struct i40e_hw	*hw = vsi->hw;
1548 	int		err, mcnt = 0;
1549 	bool		uni = FALSE, multi = FALSE;
1550 
1551 	if (ifp->if_flags & IFF_ALLMULTI)
1552                 multi = TRUE;
1553 	else { /* Need to count the multicast addresses */
1554 		struct  ifmultiaddr *ifma;
1555 		if_maddr_rlock(ifp);
1556 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1557                         if (ifma->ifma_addr->sa_family != AF_LINK)
1558                                 continue;
1559                         if (mcnt == MAX_MULTICAST_ADDR)
1560                                 break;
1561                         mcnt++;
1562 		}
1563 		if_maddr_runlock(ifp);
1564 	}
1565 
1566 	if (mcnt >= MAX_MULTICAST_ADDR)
1567                 multi = TRUE;
1568         if (ifp->if_flags & IFF_PROMISC)
1569 		uni = TRUE;
1570 
1571 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1572 	    vsi->seid, uni, NULL);
1573 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1574 	    vsi->seid, multi, NULL);
1575 	return;
1576 }
1577 
1578 /*********************************************************************
1579  * 	Filter Routines
1580  *
1581  *	Routines for multicast and vlan filter management.
1582  *
1583  *********************************************************************/
1584 static void
1585 ixl_add_multi(struct ixl_vsi *vsi)
1586 {
1587 	struct	ifmultiaddr	*ifma;
1588 	struct ifnet		*ifp = vsi->ifp;
1589 	struct i40e_hw		*hw = vsi->hw;
1590 	int			mcnt = 0, flags;
1591 
1592 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1593 
1594 	if_maddr_rlock(ifp);
1595 	/*
1596 	** First just get a count, to decide if we
1597 	** we simply use multicast promiscuous.
1598 	*/
1599 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1600 		if (ifma->ifma_addr->sa_family != AF_LINK)
1601 			continue;
1602 		mcnt++;
1603 	}
1604 	if_maddr_runlock(ifp);
1605 
1606 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1607 		/* delete existing MC filters */
1608 		ixl_del_hw_filters(vsi, mcnt);
1609 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1610 		    vsi->seid, TRUE, NULL);
1611 		return;
1612 	}
1613 
1614 	mcnt = 0;
1615 	if_maddr_rlock(ifp);
1616 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1617 		if (ifma->ifma_addr->sa_family != AF_LINK)
1618 			continue;
1619 		ixl_add_mc_filter(vsi,
1620 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1621 		mcnt++;
1622 	}
1623 	if_maddr_runlock(ifp);
1624 	if (mcnt > 0) {
1625 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1626 		ixl_add_hw_filters(vsi, flags, mcnt);
1627 	}
1628 
1629 	IOCTL_DEBUGOUT("ixl_add_multi: end");
1630 	return;
1631 }
1632 
1633 static void
1634 ixl_del_multi(struct ixl_vsi *vsi)
1635 {
1636 	struct ifnet		*ifp = vsi->ifp;
1637 	struct ifmultiaddr	*ifma;
1638 	struct ixl_mac_filter	*f;
1639 	int			mcnt = 0;
1640 	bool		match = FALSE;
1641 
1642 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1643 
1644 	/* Search for removed multicast addresses */
1645 	if_maddr_rlock(ifp);
1646 	SLIST_FOREACH(f, &vsi->ftl, next) {
1647 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1648 			match = FALSE;
1649 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1650 				if (ifma->ifma_addr->sa_family != AF_LINK)
1651 					continue;
1652 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1653 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1654 					match = TRUE;
1655 					break;
1656 				}
1657 			}
1658 			if (match == FALSE) {
1659 				f->flags |= IXL_FILTER_DEL;
1660 				mcnt++;
1661 			}
1662 		}
1663 	}
1664 	if_maddr_runlock(ifp);
1665 
1666 	if (mcnt > 0)
1667 		ixl_del_hw_filters(vsi, mcnt);
1668 }
1669 
1670 
1671 /*********************************************************************
1672  *  Timer routine
1673  *
1674  *  This routine checks for link status,updates statistics,
1675  *  and runs the watchdog check.
1676  *
1677  **********************************************************************/
1678 
1679 static void
1680 ixl_local_timer(void *arg)
1681 {
1682 	struct ixl_pf		*pf = arg;
1683 	struct i40e_hw		*hw = &pf->hw;
1684 	struct ixl_vsi		*vsi = &pf->vsi;
1685 	struct ixl_queue	*que = vsi->queues;
1686 	device_t		dev = pf->dev;
1687 	int			hung = 0;
1688 	u32			mask;
1689 
1690 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1691 
1692 	/* Fire off the adminq task */
1693 	taskqueue_enqueue(pf->tq, &pf->adminq);
1694 
1695 	/* Update stats */
1696 	ixl_update_stats_counters(pf);
1697 
1698 	/*
1699 	** Check status of the queues
1700 	*/
1701 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1702 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1703 
1704 	for (int i = 0; i < vsi->num_queues; i++,que++) {
1705 		/* Any queues with outstanding work get a sw irq */
1706 		if (que->busy)
1707 			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1708 		/*
1709 		** Each time txeof runs without cleaning, but there
1710 		** are uncleaned descriptors it increments busy. If
1711 		** we get to 5 we declare it hung.
1712 		*/
1713 		if (que->busy == IXL_QUEUE_HUNG) {
1714 			++hung;
1715 			/* Mark the queue as inactive */
1716 			vsi->active_queues &= ~((u64)1 << que->me);
1717 			continue;
1718 		} else {
1719 			/* Check if we've come back from hung */
1720 			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1721 				vsi->active_queues |= ((u64)1 << que->me);
1722 		}
1723 		if (que->busy >= IXL_MAX_TX_BUSY) {
1724 			device_printf(dev,"Warning queue %d "
1725 			    "appears to be hung!\n", i);
1726 			que->busy = IXL_QUEUE_HUNG;
1727 			++hung;
1728 		}
1729 	}
1730 	/* Only reinit if all queues show hung */
1731 	if (hung == vsi->num_queues)
1732 		goto hung;
1733 
1734 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1735 	return;
1736 
1737 hung:
1738 	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1739 	ixl_init_locked(pf);
1740 }
1741 
1742 /*
1743 ** Note: this routine updates the OS on the link state
1744 **	the real check of the hardware only happens with
1745 **	a link interrupt.
1746 */
1747 static void
1748 ixl_update_link_status(struct ixl_pf *pf)
1749 {
1750 	struct ixl_vsi		*vsi = &pf->vsi;
1751 	struct i40e_hw		*hw = &pf->hw;
1752 	struct ifnet		*ifp = vsi->ifp;
1753 	device_t		dev = pf->dev;
1754 	enum i40e_fc_mode 	fc;
1755 
1756 
1757 	if (vsi->link_up){
1758 		if (vsi->link_active == FALSE) {
1759 			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1760 			if (bootverbose) {
1761 				fc = hw->fc.current_mode;
1762 				device_printf(dev,"Link is up %d Gbps %s,"
1763 				    " Flow Control: %s\n",
1764 				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1765 				    "Full Duplex", ixl_fc_string[fc]);
1766 			}
1767 			vsi->link_active = TRUE;
1768 			if_link_state_change(ifp, LINK_STATE_UP);
1769 		}
1770 	} else { /* Link down */
1771 		if (vsi->link_active == TRUE) {
1772 			if (bootverbose)
1773 				device_printf(dev,"Link is Down\n");
1774 			if_link_state_change(ifp, LINK_STATE_DOWN);
1775 			vsi->link_active = FALSE;
1776 		}
1777 	}
1778 
1779 	return;
1780 }
1781 
1782 /*********************************************************************
1783  *
1784  *  This routine disables all traffic on the adapter by issuing a
1785  *  global reset on the MAC and deallocates TX/RX buffers.
1786  *
1787  **********************************************************************/
1788 
1789 static void
1790 ixl_stop(struct ixl_pf *pf)
1791 {
1792 	struct ixl_vsi	*vsi = &pf->vsi;
1793 	struct ifnet	*ifp = vsi->ifp;
1794 
1795 	mtx_assert(&pf->pf_mtx, MA_OWNED);
1796 
1797 	INIT_DEBUGOUT("ixl_stop: begin\n");
1798 	ixl_disable_intr(vsi);
1799 	ixl_disable_rings(vsi);
1800 
1801 	/* Tell the stack that the interface is no longer active */
1802 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1803 
1804 	/* Stop the local timer */
1805 	callout_stop(&pf->timer);
1806 
1807 	return;
1808 }
1809 
1810 
1811 /*********************************************************************
1812  *
1813  *  Setup MSIX Interrupt resources and handlers for the VSI
1814  *
1815  **********************************************************************/
1816 static int
1817 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1818 {
1819 	device_t        dev = pf->dev;
1820 	struct 		ixl_vsi *vsi = &pf->vsi;
1821 	struct		ixl_queue *que = vsi->queues;
1822 	int 		error, rid = 0;
1823 
1824 	if (pf->msix == 1)
1825 		rid = 1;
1826 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1827 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1828 	if (pf->res == NULL) {
1829 		device_printf(dev,"Unable to allocate"
1830 		    " bus resource: vsi legacy/msi interrupt\n");
1831 		return (ENXIO);
1832 	}
1833 
1834 	/* Set the handler function */
1835 	error = bus_setup_intr(dev, pf->res,
1836 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1837 	    ixl_intr, pf, &pf->tag);
1838 	if (error) {
1839 		pf->res = NULL;
1840 		device_printf(dev, "Failed to register legacy/msi handler");
1841 		return (error);
1842 	}
1843 	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1844 	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1845 	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1846 	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1847 	    taskqueue_thread_enqueue, &que->tq);
1848 	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1849 	    device_get_nameunit(dev));
1850 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1851 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1852 	    taskqueue_thread_enqueue, &pf->tq);
1853 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1854 	    device_get_nameunit(dev));
1855 
1856 	return (0);
1857 }
1858 
1859 
1860 /*********************************************************************
1861  *
1862  *  Setup MSIX Interrupt resources and handlers for the VSI
1863  *
1864  **********************************************************************/
1865 static int
1866 ixl_assign_vsi_msix(struct ixl_pf *pf)
1867 {
1868 	device_t	dev = pf->dev;
1869 	struct 		ixl_vsi *vsi = &pf->vsi;
1870 	struct 		ixl_queue *que = vsi->queues;
1871 	struct		tx_ring	 *txr;
1872 	int 		error, rid, vector = 0;
1873 
1874 	/* Admin Que is vector 0*/
1875 	rid = vector + 1;
1876 	pf->res = bus_alloc_resource_any(dev,
1877     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1878 	if (!pf->res) {
1879 		device_printf(dev,"Unable to allocate"
1880     	    " bus resource: Adminq interrupt [%d]\n", rid);
1881 		return (ENXIO);
1882 	}
1883 	/* Set the adminq vector and handler */
1884 	error = bus_setup_intr(dev, pf->res,
1885 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1886 	    ixl_msix_adminq, pf, &pf->tag);
1887 	if (error) {
1888 		pf->res = NULL;
1889 		device_printf(dev, "Failed to register Admin que handler");
1890 		return (error);
1891 	}
1892 	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1893 	pf->admvec = vector;
1894 	/* Tasklet for Admin Queue */
1895 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1896 	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1897 	    taskqueue_thread_enqueue, &pf->tq);
1898 	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1899 	    device_get_nameunit(pf->dev));
1900 	++vector;
1901 
1902 	/* Now set up the stations */
1903 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1904 		rid = vector + 1;
1905 		txr = &que->txr;
1906 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1907 		    RF_SHAREABLE | RF_ACTIVE);
1908 		if (que->res == NULL) {
1909 			device_printf(dev,"Unable to allocate"
1910 		    	    " bus resource: que interrupt [%d]\n", vector);
1911 			return (ENXIO);
1912 		}
1913 		/* Set the handler function */
1914 		error = bus_setup_intr(dev, que->res,
1915 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1916 		    ixl_msix_que, que, &que->tag);
1917 		if (error) {
1918 			que->res = NULL;
1919 			device_printf(dev, "Failed to register que handler");
1920 			return (error);
1921 		}
1922 		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1923 		/* Bind the vector to a CPU */
1924 		bus_bind_intr(dev, que->res, i);
1925 		que->msix = vector;
1926 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1927 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1928 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1929 		    taskqueue_thread_enqueue, &que->tq);
1930 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1931 		    device_get_nameunit(pf->dev));
1932 	}
1933 
1934 	return (0);
1935 }
1936 
1937 
1938 /*
1939  * Allocate MSI/X vectors
1940  */
1941 static int
1942 ixl_init_msix(struct ixl_pf *pf)
1943 {
1944 	device_t dev = pf->dev;
1945 	int rid, want, vectors, queues, available;
1946 
1947 	/* Override by tuneable */
1948 	if (ixl_enable_msix == 0)
1949 		goto msi;
1950 
1951 	/*
1952 	** When used in a virtualized environment
1953 	** PCI BUSMASTER capability may not be set
1954 	** so explicity set it here and rewrite
1955 	** the ENABLE in the MSIX control register
1956 	** at this point to cause the host to
1957 	** successfully initialize us.
1958 	*/
1959 	{
1960 		u16 pci_cmd_word;
1961 		int msix_ctrl;
1962 		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1963 		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1964 		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1965 		pci_find_cap(dev, PCIY_MSIX, &rid);
1966 		rid += PCIR_MSIX_CTRL;
1967 		msix_ctrl = pci_read_config(dev, rid, 2);
1968 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1969 		pci_write_config(dev, rid, msix_ctrl, 2);
1970 	}
1971 
1972 	/* First try MSI/X */
1973 	rid = PCIR_BAR(IXL_BAR);
1974 	pf->msix_mem = bus_alloc_resource_any(dev,
1975 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1976        	if (!pf->msix_mem) {
1977 		/* May not be enabled */
1978 		device_printf(pf->dev,
1979 		    "Unable to map MSIX table \n");
1980 		goto msi;
1981 	}
1982 
1983 	available = pci_msix_count(dev);
1984 	if (available == 0) { /* system has msix disabled */
1985 		bus_release_resource(dev, SYS_RES_MEMORY,
1986 		    rid, pf->msix_mem);
1987 		pf->msix_mem = NULL;
1988 		goto msi;
1989 	}
1990 
1991 	/* Figure out a reasonable auto config value */
1992 	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1993 
1994 	/* Override with hardcoded value if sane */
1995 	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1996 		queues = ixl_max_queues;
1997 
1998 	/*
1999 	** Want one vector (RX/TX pair) per queue
2000 	** plus an additional for the admin queue.
2001 	*/
2002 	want = queues + 1;
2003 	if (want <= available)	/* Have enough */
2004 		vectors = want;
2005 	else {
2006                	device_printf(pf->dev,
2007 		    "MSIX Configuration Problem, "
2008 		    "%d vectors available but %d wanted!\n",
2009 		    available, want);
2010 		return (0); /* Will go to Legacy setup */
2011 	}
2012 
2013 	if (pci_alloc_msix(dev, &vectors) == 0) {
2014                	device_printf(pf->dev,
2015 		    "Using MSIX interrupts with %d vectors\n", vectors);
2016 		pf->msix = vectors;
2017 		pf->vsi.num_queues = queues;
2018 		return (vectors);
2019 	}
2020 msi:
2021        	vectors = pci_msi_count(dev);
2022 	pf->vsi.num_queues = 1;
2023 	pf->msix = 1;
2024 	ixl_max_queues = 1;
2025 	ixl_enable_msix = 0;
2026        	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2027                	device_printf(pf->dev,"Using an MSI interrupt\n");
2028 	else {
2029 		pf->msix = 0;
2030                	device_printf(pf->dev,"Using a Legacy interrupt\n");
2031 	}
2032 	return (vectors);
2033 }
2034 
2035 
2036 /*
2037  * Plumb MSI/X vectors
2038  */
2039 static void
2040 ixl_configure_msix(struct ixl_pf *pf)
2041 {
2042 	struct i40e_hw	*hw = &pf->hw;
2043 	struct ixl_vsi *vsi = &pf->vsi;
2044 	u32		reg;
2045 	u16		vector = 1;
2046 
2047 	/* First set up the adminq - vector 0 */
2048 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2049 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2050 
2051 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2052 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2053 	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2054 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2055 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2056 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2057 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2058 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2059 
2060 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2061 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2062 
2063 	wr32(hw, I40E_PFINT_DYN_CTL0,
2064 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2065 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2066 
2067 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2068 
2069 	/* Next configure the queues */
2070 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2071 		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2072 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2073 
2074 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2075 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2076 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2077 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2078 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2079 		wr32(hw, I40E_QINT_RQCTL(i), reg);
2080 
2081 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2082 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2083 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2084 		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2085 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2086 		if (i == (vsi->num_queues - 1))
2087 			reg |= (IXL_QUEUE_EOL
2088 			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2089 		wr32(hw, I40E_QINT_TQCTL(i), reg);
2090 	}
2091 }
2092 
2093 /*
2094  * Configure for MSI single vector operation
2095  */
2096 static void
2097 ixl_configure_legacy(struct ixl_pf *pf)
2098 {
2099 	struct i40e_hw	*hw = &pf->hw;
2100 	u32		reg;
2101 
2102 
2103 	wr32(hw, I40E_PFINT_ITR0(0), 0);
2104 	wr32(hw, I40E_PFINT_ITR0(1), 0);
2105 
2106 
2107 	/* Setup "other" causes */
2108 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2109 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2110 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2111 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2112 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2113 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2114 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2115 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2116 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2117 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2118 	    ;
2119 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2120 
2121 	/* SW_ITR_IDX = 0, but don't change INTENA */
2122 	wr32(hw, I40E_PFINT_DYN_CTL0,
2123 	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2124 	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2125 	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2126 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2127 
2128 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2129 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2130 
2131 	/* Associate the queue pair to the vector and enable the q int */
2132 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2133 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2134 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2135 	wr32(hw, I40E_QINT_RQCTL(0), reg);
2136 
2137 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2138 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2139 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2140 	wr32(hw, I40E_QINT_TQCTL(0), reg);
2141 
2142 	/* Next enable the queue pair */
2143 	reg = rd32(hw, I40E_QTX_ENA(0));
2144 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2145 	wr32(hw, I40E_QTX_ENA(0), reg);
2146 
2147 	reg = rd32(hw, I40E_QRX_ENA(0));
2148 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2149 	wr32(hw, I40E_QRX_ENA(0), reg);
2150 }
2151 
2152 
2153 /*
2154  * Set the Initial ITR state
2155  */
2156 static void
2157 ixl_configure_itr(struct ixl_pf *pf)
2158 {
2159 	struct i40e_hw		*hw = &pf->hw;
2160 	struct ixl_vsi		*vsi = &pf->vsi;
2161 	struct ixl_queue	*que = vsi->queues;
2162 
2163 	vsi->rx_itr_setting = ixl_rx_itr;
2164 	if (ixl_dynamic_rx_itr)
2165 		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2166 	vsi->tx_itr_setting = ixl_tx_itr;
2167 	if (ixl_dynamic_tx_itr)
2168 		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2169 
2170 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2171 		struct tx_ring	*txr = &que->txr;
2172 		struct rx_ring 	*rxr = &que->rxr;
2173 
2174 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2175 		    vsi->rx_itr_setting);
2176 		rxr->itr = vsi->rx_itr_setting;
2177 		rxr->latency = IXL_AVE_LATENCY;
2178 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2179 		    vsi->tx_itr_setting);
2180 		txr->itr = vsi->tx_itr_setting;
2181 		txr->latency = IXL_AVE_LATENCY;
2182 	}
2183 }
2184 
2185 
2186 static int
2187 ixl_allocate_pci_resources(struct ixl_pf *pf)
2188 {
2189 	int             rid;
2190 	device_t        dev = pf->dev;
2191 
2192 	rid = PCIR_BAR(0);
2193 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2194 	    &rid, RF_ACTIVE);
2195 
2196 	if (!(pf->pci_mem)) {
2197 		device_printf(dev,"Unable to allocate bus resource: memory\n");
2198 		return (ENXIO);
2199 	}
2200 
2201 	pf->osdep.mem_bus_space_tag =
2202 		rman_get_bustag(pf->pci_mem);
2203 	pf->osdep.mem_bus_space_handle =
2204 		rman_get_bushandle(pf->pci_mem);
2205 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2206 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2207 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2208 
2209 	pf->hw.back = &pf->osdep;
2210 
2211 	/*
2212 	** Now setup MSI or MSI/X, should
2213 	** return us the number of supported
2214 	** vectors. (Will be 1 for MSI)
2215 	*/
2216 	pf->msix = ixl_init_msix(pf);
2217 	return (0);
2218 }
2219 
2220 static void
2221 ixl_free_pci_resources(struct ixl_pf * pf)
2222 {
2223 	struct ixl_vsi		*vsi = &pf->vsi;
2224 	struct ixl_queue	*que = vsi->queues;
2225 	device_t		dev = pf->dev;
2226 	int			rid, memrid;
2227 
2228 	memrid = PCIR_BAR(IXL_BAR);
2229 
2230 	/* We may get here before stations are setup */
2231 	if ((!ixl_enable_msix) || (que == NULL))
2232 		goto early;
2233 
2234 	/*
2235 	**  Release all msix VSI resources:
2236 	*/
2237 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2238 		rid = que->msix + 1;
2239 		if (que->tag != NULL) {
2240 			bus_teardown_intr(dev, que->res, que->tag);
2241 			que->tag = NULL;
2242 		}
2243 		if (que->res != NULL)
2244 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2245 	}
2246 
2247 early:
2248 	/* Clean the AdminQ interrupt last */
2249 	if (pf->admvec) /* we are doing MSIX */
2250 		rid = pf->admvec + 1;
2251 	else
2252 		(pf->msix != 0) ? (rid = 1):(rid = 0);
2253 
2254 	if (pf->tag != NULL) {
2255 		bus_teardown_intr(dev, pf->res, pf->tag);
2256 		pf->tag = NULL;
2257 	}
2258 	if (pf->res != NULL)
2259 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2260 
2261 	if (pf->msix)
2262 		pci_release_msi(dev);
2263 
2264 	if (pf->msix_mem != NULL)
2265 		bus_release_resource(dev, SYS_RES_MEMORY,
2266 		    memrid, pf->msix_mem);
2267 
2268 	if (pf->pci_mem != NULL)
2269 		bus_release_resource(dev, SYS_RES_MEMORY,
2270 		    PCIR_BAR(0), pf->pci_mem);
2271 
2272 	return;
2273 }
2274 
2275 static void
2276 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2277 {
2278 	/* Display supported media types */
2279 	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2280 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2281 
2282 	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2283 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2284 
2285 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2286 	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2287 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2288 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2289 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2290 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2291 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2292 	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2293 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2294 
2295 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2296 	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2297 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2298 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2299 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2300 	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2301 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2302 }
2303 
2304 /*********************************************************************
2305  *
2306  *  Setup networking device structure and register an interface.
2307  *
2308  **********************************************************************/
2309 static int
2310 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2311 {
2312 	struct ifnet		*ifp;
2313 	struct i40e_hw		*hw = vsi->hw;
2314 	struct ixl_queue	*que = vsi->queues;
2315 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2316 	enum i40e_status_code aq_error = 0;
2317 
2318 	INIT_DEBUGOUT("ixl_setup_interface: begin");
2319 
2320 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2321 	if (ifp == NULL) {
2322 		device_printf(dev, "can not allocate ifnet structure\n");
2323 		return (-1);
2324 	}
2325 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2326 	ifp->if_mtu = ETHERMTU;
2327 	ifp->if_baudrate = 4000000000;  // ??
2328 	ifp->if_init = ixl_init;
2329 	ifp->if_softc = vsi;
2330 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2331 	ifp->if_ioctl = ixl_ioctl;
2332 
2333 #if __FreeBSD_version >= 1100036
2334 	if_setgetcounterfn(ifp, ixl_get_counter);
2335 #endif
2336 
2337 	ifp->if_transmit = ixl_mq_start;
2338 
2339 	ifp->if_qflush = ixl_qflush;
2340 
2341 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2342 
2343 	vsi->max_frame_size =
2344 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2345 	    + ETHER_VLAN_ENCAP_LEN;
2346 
2347 	/*
2348 	 * Tell the upper layer(s) we support long frames.
2349 	 */
2350 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2351 
2352 	ifp->if_capabilities |= IFCAP_HWCSUM;
2353 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2354 	ifp->if_capabilities |= IFCAP_TSO;
2355 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2356 	ifp->if_capabilities |= IFCAP_LRO;
2357 
2358 	/* VLAN capabilties */
2359 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2360 			     |  IFCAP_VLAN_HWTSO
2361 			     |  IFCAP_VLAN_MTU
2362 			     |  IFCAP_VLAN_HWCSUM;
2363 	ifp->if_capenable = ifp->if_capabilities;
2364 
2365 	/*
2366 	** Don't turn this on by default, if vlans are
2367 	** created on another pseudo device (eg. lagg)
2368 	** then vlan events are not passed thru, breaking
2369 	** operation, but with HW FILTER off it works. If
2370 	** using vlans directly on the ixl driver you can
2371 	** enable this and get full hardware tag filtering.
2372 	*/
2373 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2374 
2375 	/*
2376 	 * Specify the media types supported by this adapter and register
2377 	 * callbacks to update media and link information
2378 	 */
2379 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2380 		     ixl_media_status);
2381 
2382 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2383 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2384 		/* Need delay to detect fiber correctly */
2385 		i40e_msec_delay(200);
2386 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2387 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2388 			device_printf(dev, "Unknown PHY type detected!\n");
2389 		else
2390 			ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2391 	} else if (aq_error) {
2392 		device_printf(dev, "Error getting supported media types, err %d,"
2393 		    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2394 	} else
2395 		ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2396 
2397 	/* Use autoselect media by default */
2398 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2399 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2400 
2401 	ether_ifattach(ifp, hw->mac.addr);
2402 
2403 	return (0);
2404 }
2405 
2406 static bool
2407 ixl_config_link(struct i40e_hw *hw)
2408 {
2409 	bool check;
2410 
2411 	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2412 	check = i40e_get_link_status(hw);
2413 #ifdef IXL_DEBUG
2414 	printf("Link is %s\n", check ? "up":"down");
2415 #endif
2416 	return (check);
2417 }
2418 
2419 /*********************************************************************
2420  *
2421  *  Initialize this VSI
2422  *
2423  **********************************************************************/
2424 static int
2425 ixl_setup_vsi(struct ixl_vsi *vsi)
2426 {
2427 	struct i40e_hw	*hw = vsi->hw;
2428 	device_t 	dev = vsi->dev;
2429 	struct i40e_aqc_get_switch_config_resp *sw_config;
2430 	struct i40e_vsi_context	ctxt;
2431 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2432 	int	ret = I40E_SUCCESS;
2433 	u16	next = 0;
2434 
2435 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2436 	ret = i40e_aq_get_switch_config(hw, sw_config,
2437 	    sizeof(aq_buf), &next, NULL);
2438 	if (ret) {
2439 		device_printf(dev,"aq_get_switch_config failed!!\n");
2440 		return (ret);
2441 	}
2442 #ifdef IXL_DEBUG
2443 	printf("Switch config: header reported: %d in structure, %d total\n",
2444     	    sw_config->header.num_reported, sw_config->header.num_total);
2445 	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2446 	    sw_config->element[0].element_type,
2447 	    sw_config->element[0].seid,
2448 	    sw_config->element[0].uplink_seid,
2449 	    sw_config->element[0].downlink_seid);
2450 #endif
2451 	/* Save off this important value */
2452 	vsi->seid = sw_config->element[0].seid;
2453 
2454 	memset(&ctxt, 0, sizeof(ctxt));
2455 	ctxt.seid = vsi->seid;
2456 	ctxt.pf_num = hw->pf_id;
2457 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2458 	if (ret) {
2459 		device_printf(dev,"get vsi params failed %x!!\n", ret);
2460 		return (ret);
2461 	}
2462 #ifdef IXL_DEBUG
2463 	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2464 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2465 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2466 	    ctxt.uplink_seid, ctxt.vsi_number,
2467 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2468 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2469 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2470 #endif
2471 	/*
2472 	** Set the queue and traffic class bits
2473 	**  - when multiple traffic classes are supported
2474 	**    this will need to be more robust.
2475 	*/
2476 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2477 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2478 	ctxt.info.queue_mapping[0] = 0;
2479 	ctxt.info.tc_mapping[0] = 0x0800;
2480 
2481 	/* Set VLAN receive stripping mode */
2482 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2483 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2484 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2485 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2486 	else
2487 	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2488 
2489 	/* Keep copy of VSI info in VSI for statistic counters */
2490 	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2491 
2492 	/* Reset VSI statistics */
2493 	ixl_vsi_reset_stats(vsi);
2494 	vsi->hw_filters_add = 0;
2495 	vsi->hw_filters_del = 0;
2496 
2497 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2498 	if (ret)
2499 		device_printf(dev,"update vsi params failed %x!!\n",
2500 		   hw->aq.asq_last_status);
2501 	return (ret);
2502 }
2503 
2504 
2505 /*********************************************************************
2506  *
2507  *  Initialize the VSI:  this handles contexts, which means things
2508  *  			 like the number of descriptors, buffer size,
2509  *			 plus we init the rings thru this function.
2510  *
2511  **********************************************************************/
2512 static int
2513 ixl_initialize_vsi(struct ixl_vsi *vsi)
2514 {
2515 	struct ixl_queue	*que = vsi->queues;
2516 	device_t		dev = vsi->dev;
2517 	struct i40e_hw		*hw = vsi->hw;
2518 	int			err = 0;
2519 
2520 
2521 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2522 		struct tx_ring		*txr = &que->txr;
2523 		struct rx_ring 		*rxr = &que->rxr;
2524 		struct i40e_hmc_obj_txq tctx;
2525 		struct i40e_hmc_obj_rxq rctx;
2526 		u32			txctl;
2527 		u16			size;
2528 
2529 
2530 		/* Setup the HMC TX Context  */
2531 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2532 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2533 		tctx.new_context = 1;
2534 		tctx.base = (txr->dma.pa/128);
2535 		tctx.qlen = que->num_desc;
2536 		tctx.fc_ena = 0;
2537 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2538 		/* Enable HEAD writeback */
2539 		tctx.head_wb_ena = 1;
2540 		tctx.head_wb_addr = txr->dma.pa +
2541 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2542 		tctx.rdylist_act = 0;
2543 		err = i40e_clear_lan_tx_queue_context(hw, i);
2544 		if (err) {
2545 			device_printf(dev, "Unable to clear TX context\n");
2546 			break;
2547 		}
2548 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2549 		if (err) {
2550 			device_printf(dev, "Unable to set TX context\n");
2551 			break;
2552 		}
2553 		/* Associate the ring with this PF */
2554 		txctl = I40E_QTX_CTL_PF_QUEUE;
2555 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2556 		    I40E_QTX_CTL_PF_INDX_MASK);
2557 		wr32(hw, I40E_QTX_CTL(i), txctl);
2558 		ixl_flush(hw);
2559 
2560 		/* Do ring (re)init */
2561 		ixl_init_tx_ring(que);
2562 
2563 		/* Next setup the HMC RX Context  */
2564 		if (vsi->max_frame_size <= 2048)
2565 			rxr->mbuf_sz = MCLBYTES;
2566 		else
2567 			rxr->mbuf_sz = MJUMPAGESIZE;
2568 
2569 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2570 
2571 		/* Set up an RX context for the HMC */
2572 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2573 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2574 		/* ignore header split for now */
2575 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2576 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2577 		    vsi->max_frame_size : max_rxmax;
2578 		rctx.dtype = 0;
2579 		rctx.dsize = 1;	/* do 32byte descriptors */
2580 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2581 		rctx.base = (rxr->dma.pa/128);
2582 		rctx.qlen = que->num_desc;
2583 		rctx.tphrdesc_ena = 1;
2584 		rctx.tphwdesc_ena = 1;
2585 		rctx.tphdata_ena = 0;
2586 		rctx.tphhead_ena = 0;
2587 		rctx.lrxqthresh = 2;
2588 		rctx.crcstrip = 1;
2589 		rctx.l2tsel = 1;
2590 		rctx.showiv = 1;
2591 		rctx.fc_ena = 0;
2592 		rctx.prefena = 1;
2593 
2594 		err = i40e_clear_lan_rx_queue_context(hw, i);
2595 		if (err) {
2596 			device_printf(dev,
2597 			    "Unable to clear RX context %d\n", i);
2598 			break;
2599 		}
2600 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2601 		if (err) {
2602 			device_printf(dev, "Unable to set RX context %d\n", i);
2603 			break;
2604 		}
2605 		err = ixl_init_rx_ring(que);
2606 		if (err) {
2607 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2608 			break;
2609 		}
2610 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2611 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2612 	}
2613 	return (err);
2614 }
2615 
2616 
2617 /*********************************************************************
2618  *
2619  *  Free all VSI structs.
2620  *
2621  **********************************************************************/
2622 void
2623 ixl_free_vsi(struct ixl_vsi *vsi)
2624 {
2625 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2626 	struct ixl_queue	*que = vsi->queues;
2627 	struct ixl_mac_filter *f;
2628 
2629 	/* Free station queues */
2630 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2631 		struct tx_ring *txr = &que->txr;
2632 		struct rx_ring *rxr = &que->rxr;
2633 
2634 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2635 			continue;
2636 		IXL_TX_LOCK(txr);
2637 		ixl_free_que_tx(que);
2638 		if (txr->base)
2639 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2640 		IXL_TX_UNLOCK(txr);
2641 		IXL_TX_LOCK_DESTROY(txr);
2642 
2643 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2644 			continue;
2645 		IXL_RX_LOCK(rxr);
2646 		ixl_free_que_rx(que);
2647 		if (rxr->base)
2648 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2649 		IXL_RX_UNLOCK(rxr);
2650 		IXL_RX_LOCK_DESTROY(rxr);
2651 
2652 	}
2653 	free(vsi->queues, M_DEVBUF);
2654 
2655 	/* Free VSI filter list */
2656 	while (!SLIST_EMPTY(&vsi->ftl)) {
2657 		f = SLIST_FIRST(&vsi->ftl);
2658 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2659 		free(f, M_DEVBUF);
2660 	}
2661 }
2662 
2663 
2664 /*********************************************************************
2665  *
2666  *  Allocate memory for the VSI (virtual station interface) and their
2667  *  associated queues, rings and the descriptors associated with each,
2668  *  called only once at attach.
2669  *
2670  **********************************************************************/
2671 static int
2672 ixl_setup_stations(struct ixl_pf *pf)
2673 {
2674 	device_t		dev = pf->dev;
2675 	struct ixl_vsi		*vsi;
2676 	struct ixl_queue	*que;
2677 	struct tx_ring		*txr;
2678 	struct rx_ring		*rxr;
2679 	int 			rsize, tsize;
2680 	int			error = I40E_SUCCESS;
2681 
2682 	vsi = &pf->vsi;
2683 	vsi->back = (void *)pf;
2684 	vsi->hw = &pf->hw;
2685 	vsi->id = 0;
2686 	vsi->num_vlans = 0;
2687 
2688 	/* Get memory for the station queues */
2689         if (!(vsi->queues =
2690             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2691             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2692                 device_printf(dev, "Unable to allocate queue memory\n");
2693                 error = ENOMEM;
2694                 goto early;
2695         }
2696 
2697 	for (int i = 0; i < vsi->num_queues; i++) {
2698 		que = &vsi->queues[i];
2699 		que->num_desc = ixl_ringsz;
2700 		que->me = i;
2701 		que->vsi = vsi;
2702 		/* mark the queue as active */
2703 		vsi->active_queues |= (u64)1 << que->me;
2704 		txr = &que->txr;
2705 		txr->que = que;
2706 		txr->tail = I40E_QTX_TAIL(que->me);
2707 
2708 		/* Initialize the TX lock */
2709 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2710 		    device_get_nameunit(dev), que->me);
2711 		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2712 		/* Create the TX descriptor ring */
2713 		tsize = roundup2((que->num_desc *
2714 		    sizeof(struct i40e_tx_desc)) +
2715 		    sizeof(u32), DBA_ALIGN);
2716 		if (i40e_allocate_dma_mem(&pf->hw,
2717 		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2718 			device_printf(dev,
2719 			    "Unable to allocate TX Descriptor memory\n");
2720 			error = ENOMEM;
2721 			goto fail;
2722 		}
2723 		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2724 		bzero((void *)txr->base, tsize);
2725        		/* Now allocate transmit soft structs for the ring */
2726        		if (ixl_allocate_tx_data(que)) {
2727 			device_printf(dev,
2728 			    "Critical Failure setting up TX structures\n");
2729 			error = ENOMEM;
2730 			goto fail;
2731        		}
2732 		/* Allocate a buf ring */
2733 		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2734 		    M_WAITOK, &txr->mtx);
2735 		if (txr->br == NULL) {
2736 			device_printf(dev,
2737 			    "Critical Failure setting up TX buf ring\n");
2738 			error = ENOMEM;
2739 			goto fail;
2740        		}
2741 
2742 		/*
2743 		 * Next the RX queues...
2744 		 */
2745 		rsize = roundup2(que->num_desc *
2746 		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2747 		rxr = &que->rxr;
2748 		rxr->que = que;
2749 		rxr->tail = I40E_QRX_TAIL(que->me);
2750 
2751 		/* Initialize the RX side lock */
2752 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2753 		    device_get_nameunit(dev), que->me);
2754 		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2755 
2756 		if (i40e_allocate_dma_mem(&pf->hw,
2757 		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2758 			device_printf(dev,
2759 			    "Unable to allocate RX Descriptor memory\n");
2760 			error = ENOMEM;
2761 			goto fail;
2762 		}
2763 		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2764 		bzero((void *)rxr->base, rsize);
2765 
2766         	/* Allocate receive soft structs for the ring*/
2767 		if (ixl_allocate_rx_data(que)) {
2768 			device_printf(dev,
2769 			    "Critical Failure setting up receive structs\n");
2770 			error = ENOMEM;
2771 			goto fail;
2772 		}
2773 	}
2774 
2775 	return (0);
2776 
2777 fail:
2778 	for (int i = 0; i < vsi->num_queues; i++) {
2779 		que = &vsi->queues[i];
2780 		rxr = &que->rxr;
2781 		txr = &que->txr;
2782 		if (rxr->base)
2783 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2784 		if (txr->base)
2785 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2786 	}
2787 
2788 early:
2789 	return (error);
2790 }
2791 
2792 /*
2793 ** Provide a update to the queue RX
2794 ** interrupt moderation value.
2795 */
2796 static void
2797 ixl_set_queue_rx_itr(struct ixl_queue *que)
2798 {
2799 	struct ixl_vsi	*vsi = que->vsi;
2800 	struct i40e_hw	*hw = vsi->hw;
2801 	struct rx_ring	*rxr = &que->rxr;
2802 	u16		rx_itr;
2803 	u16		rx_latency = 0;
2804 	int		rx_bytes;
2805 
2806 
2807 	/* Idle, do nothing */
2808 	if (rxr->bytes == 0)
2809 		return;
2810 
2811 	if (ixl_dynamic_rx_itr) {
2812 		rx_bytes = rxr->bytes/rxr->itr;
2813 		rx_itr = rxr->itr;
2814 
2815 		/* Adjust latency range */
2816 		switch (rxr->latency) {
2817 		case IXL_LOW_LATENCY:
2818 			if (rx_bytes > 10) {
2819 				rx_latency = IXL_AVE_LATENCY;
2820 				rx_itr = IXL_ITR_20K;
2821 			}
2822 			break;
2823 		case IXL_AVE_LATENCY:
2824 			if (rx_bytes > 20) {
2825 				rx_latency = IXL_BULK_LATENCY;
2826 				rx_itr = IXL_ITR_8K;
2827 			} else if (rx_bytes <= 10) {
2828 				rx_latency = IXL_LOW_LATENCY;
2829 				rx_itr = IXL_ITR_100K;
2830 			}
2831 			break;
2832 		case IXL_BULK_LATENCY:
2833 			if (rx_bytes <= 20) {
2834 				rx_latency = IXL_AVE_LATENCY;
2835 				rx_itr = IXL_ITR_20K;
2836 			}
2837 			break;
2838        		 }
2839 
2840 		rxr->latency = rx_latency;
2841 
2842 		if (rx_itr != rxr->itr) {
2843 			/* do an exponential smoothing */
2844 			rx_itr = (10 * rx_itr * rxr->itr) /
2845 			    ((9 * rx_itr) + rxr->itr);
2846 			rxr->itr = rx_itr & IXL_MAX_ITR;
2847 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2848 			    que->me), rxr->itr);
2849 		}
2850 	} else { /* We may have have toggled to non-dynamic */
2851 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2852 			vsi->rx_itr_setting = ixl_rx_itr;
2853 		/* Update the hardware if needed */
2854 		if (rxr->itr != vsi->rx_itr_setting) {
2855 			rxr->itr = vsi->rx_itr_setting;
2856 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2857 			    que->me), rxr->itr);
2858 		}
2859 	}
2860 	rxr->bytes = 0;
2861 	rxr->packets = 0;
2862 	return;
2863 }
2864 
2865 
2866 /*
2867 ** Provide a update to the queue TX
2868 ** interrupt moderation value.
2869 */
2870 static void
2871 ixl_set_queue_tx_itr(struct ixl_queue *que)
2872 {
2873 	struct ixl_vsi	*vsi = que->vsi;
2874 	struct i40e_hw	*hw = vsi->hw;
2875 	struct tx_ring	*txr = &que->txr;
2876 	u16		tx_itr;
2877 	u16		tx_latency = 0;
2878 	int		tx_bytes;
2879 
2880 
2881 	/* Idle, do nothing */
2882 	if (txr->bytes == 0)
2883 		return;
2884 
2885 	if (ixl_dynamic_tx_itr) {
2886 		tx_bytes = txr->bytes/txr->itr;
2887 		tx_itr = txr->itr;
2888 
2889 		switch (txr->latency) {
2890 		case IXL_LOW_LATENCY:
2891 			if (tx_bytes > 10) {
2892 				tx_latency = IXL_AVE_LATENCY;
2893 				tx_itr = IXL_ITR_20K;
2894 			}
2895 			break;
2896 		case IXL_AVE_LATENCY:
2897 			if (tx_bytes > 20) {
2898 				tx_latency = IXL_BULK_LATENCY;
2899 				tx_itr = IXL_ITR_8K;
2900 			} else if (tx_bytes <= 10) {
2901 				tx_latency = IXL_LOW_LATENCY;
2902 				tx_itr = IXL_ITR_100K;
2903 			}
2904 			break;
2905 		case IXL_BULK_LATENCY:
2906 			if (tx_bytes <= 20) {
2907 				tx_latency = IXL_AVE_LATENCY;
2908 				tx_itr = IXL_ITR_20K;
2909 			}
2910 			break;
2911 		}
2912 
2913 		txr->latency = tx_latency;
2914 
2915 		if (tx_itr != txr->itr) {
2916        	         /* do an exponential smoothing */
2917 			tx_itr = (10 * tx_itr * txr->itr) /
2918 			    ((9 * tx_itr) + txr->itr);
2919 			txr->itr = tx_itr & IXL_MAX_ITR;
2920 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2921 			    que->me), txr->itr);
2922 		}
2923 
2924 	} else { /* We may have have toggled to non-dynamic */
2925 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2926 			vsi->tx_itr_setting = ixl_tx_itr;
2927 		/* Update the hardware if needed */
2928 		if (txr->itr != vsi->tx_itr_setting) {
2929 			txr->itr = vsi->tx_itr_setting;
2930 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2931 			    que->me), txr->itr);
2932 		}
2933 	}
2934 	txr->bytes = 0;
2935 	txr->packets = 0;
2936 	return;
2937 }
2938 
2939 
2940 static void
2941 ixl_add_hw_stats(struct ixl_pf *pf)
2942 {
2943 	device_t dev = pf->dev;
2944 	struct ixl_vsi *vsi = &pf->vsi;
2945 	struct ixl_queue *queues = vsi->queues;
2946 	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2947 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2948 
2949 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2950 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2951 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2952 
2953 	struct sysctl_oid *vsi_node, *queue_node;
2954 	struct sysctl_oid_list *vsi_list, *queue_list;
2955 
2956 	struct tx_ring *txr;
2957 	struct rx_ring *rxr;
2958 
2959 	/* Driver statistics */
2960 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2961 			CTLFLAG_RD, &pf->watchdog_events,
2962 			"Watchdog timeouts");
2963 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2964 			CTLFLAG_RD, &pf->admin_irq,
2965 			"Admin Queue IRQ Handled");
2966 
2967 	/* VSI statistics */
2968 #define QUEUE_NAME_LEN 32
2969 	char queue_namebuf[QUEUE_NAME_LEN];
2970 
2971 	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2972 	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2973 	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2974 				   CTLFLAG_RD, NULL, "VSI-specific stats");
2975 	vsi_list = SYSCTL_CHILDREN(vsi_node);
2976 
2977 	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2978 
2979 	/* Queue statistics */
2980 	for (int q = 0; q < vsi->num_queues; q++) {
2981 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2982 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2983 					     CTLFLAG_RD, NULL, "Queue #");
2984 		queue_list = SYSCTL_CHILDREN(queue_node);
2985 
2986 		txr = &(queues[q].txr);
2987 		rxr = &(queues[q].rxr);
2988 
2989 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2990 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2991 				"m_defrag() failed");
2992 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2993 				CTLFLAG_RD, &(queues[q].dropped_pkts),
2994 				"Driver dropped packets");
2995 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2996 				CTLFLAG_RD, &(queues[q].irqs),
2997 				"irqs on this queue");
2998 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2999 				CTLFLAG_RD, &(queues[q].tso),
3000 				"TSO");
3001 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3002 				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3003 				"Driver tx dma failure in xmit");
3004 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3005 				CTLFLAG_RD, &(txr->no_desc),
3006 				"Queue No Descriptor Available");
3007 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3008 				CTLFLAG_RD, &(txr->total_packets),
3009 				"Queue Packets Transmitted");
3010 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3011 				CTLFLAG_RD, &(txr->tx_bytes),
3012 				"Queue Bytes Transmitted");
3013 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3014 				CTLFLAG_RD, &(rxr->rx_packets),
3015 				"Queue Packets Received");
3016 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3017 				CTLFLAG_RD, &(rxr->rx_bytes),
3018 				"Queue Bytes Received");
3019 	}
3020 
3021 	/* MAC stats */
3022 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3023 }
3024 
3025 static void
3026 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3027 	struct sysctl_oid_list *child,
3028 	struct i40e_eth_stats *eth_stats)
3029 {
3030 	struct ixl_sysctl_info ctls[] =
3031 	{
3032 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3033 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3034 			"Unicast Packets Received"},
3035 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3036 			"Multicast Packets Received"},
3037 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3038 			"Broadcast Packets Received"},
3039 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3040 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3041 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3042 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3043 			"Multicast Packets Transmitted"},
3044 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3045 			"Broadcast Packets Transmitted"},
3046 		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3047 		// end
3048 		{0,0,0}
3049 	};
3050 
3051 	struct ixl_sysctl_info *entry = ctls;
3052 	while (entry->stat != 0)
3053 	{
3054 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3055 				CTLFLAG_RD, entry->stat,
3056 				entry->description);
3057 		entry++;
3058 	}
3059 }
3060 
3061 static void
3062 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3063 	struct sysctl_oid_list *child,
3064 	struct i40e_hw_port_stats *stats)
3065 {
3066 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3067 				    CTLFLAG_RD, NULL, "Mac Statistics");
3068 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3069 
3070 	struct i40e_eth_stats *eth_stats = &stats->eth;
3071 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3072 
3073 	struct ixl_sysctl_info ctls[] =
3074 	{
3075 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3076 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3077 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3078 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3079 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3080 		/* Packet Reception Stats */
3081 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3082 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3083 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3084 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3085 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3086 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3087 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3088 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3089 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3090 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3091 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3092 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3093 		/* Packet Transmission Stats */
3094 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3095 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3096 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3097 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3098 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3099 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3100 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3101 		/* Flow control */
3102 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3103 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3104 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3105 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3106 		/* End */
3107 		{0,0,0}
3108 	};
3109 
3110 	struct ixl_sysctl_info *entry = ctls;
3111 	while (entry->stat != 0)
3112 	{
3113 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3114 				CTLFLAG_RD, entry->stat,
3115 				entry->description);
3116 		entry++;
3117 	}
3118 }
3119 
3120 /*
3121 ** ixl_config_rss - setup RSS
3122 **  - note this is done for the single vsi
3123 */
3124 static void ixl_config_rss(struct ixl_vsi *vsi)
3125 {
3126 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3127 	struct i40e_hw	*hw = vsi->hw;
3128 	u32		lut = 0;
3129 	u64		set_hena, hena;
3130 	int		i, j;
3131 
3132 	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3133 	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3134 	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3135 	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3136 
3137 	/* Fill out hash function seed */
3138 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3139                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3140 
3141 	/* Enable PCTYPES for RSS: */
3142 	set_hena =
3143 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3144 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3145 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3146 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3147 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3148 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3149 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3150 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3151 		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3152 		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3153 		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3154 
3155 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3156 	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3157 	hena |= set_hena;
3158 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3159 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3160 
3161 	/* Populate the LUT with max no. of queues in round robin fashion */
3162 	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3163 		if (j == vsi->num_queues)
3164 			j = 0;
3165 		/* lut = 4-byte sliding window of 4 lut entries */
3166 		lut = (lut << 8) | (j &
3167 		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3168 		/* On i = 3, we have 4 entries in lut; write to the register */
3169 		if ((i & 3) == 3)
3170 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3171 	}
3172 	ixl_flush(hw);
3173 }
3174 
3175 
3176 /*
3177 ** This routine is run via an vlan config EVENT,
3178 ** it enables us to use the HW Filter table since
3179 ** we can get the vlan id. This just creates the
3180 ** entry in the soft version of the VFTA, init will
3181 ** repopulate the real table.
3182 */
3183 static void
3184 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3185 {
3186 	struct ixl_vsi	*vsi = ifp->if_softc;
3187 	struct i40e_hw	*hw = vsi->hw;
3188 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3189 
3190 	if (ifp->if_softc !=  arg)   /* Not our event */
3191 		return;
3192 
3193 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3194 		return;
3195 
3196 	IXL_PF_LOCK(pf);
3197 	++vsi->num_vlans;
3198 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3199 	IXL_PF_UNLOCK(pf);
3200 }
3201 
3202 /*
3203 ** This routine is run via an vlan
3204 ** unconfig EVENT, remove our entry
3205 ** in the soft vfta.
3206 */
3207 static void
3208 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3209 {
3210 	struct ixl_vsi	*vsi = ifp->if_softc;
3211 	struct i40e_hw	*hw = vsi->hw;
3212 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3213 
3214 	if (ifp->if_softc !=  arg)
3215 		return;
3216 
3217 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3218 		return;
3219 
3220 	IXL_PF_LOCK(pf);
3221 	--vsi->num_vlans;
3222 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3223 	IXL_PF_UNLOCK(pf);
3224 }
3225 
3226 /*
3227 ** This routine updates vlan filters, called by init
3228 ** it scans the filter table and then updates the hw
3229 ** after a soft reset.
3230 */
3231 static void
3232 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3233 {
3234 	struct ixl_mac_filter	*f;
3235 	int			cnt = 0, flags;
3236 
3237 	if (vsi->num_vlans == 0)
3238 		return;
3239 	/*
3240 	** Scan the filter list for vlan entries,
3241 	** mark them for addition and then call
3242 	** for the AQ update.
3243 	*/
3244 	SLIST_FOREACH(f, &vsi->ftl, next) {
3245 		if (f->flags & IXL_FILTER_VLAN) {
3246 			f->flags |=
3247 			    (IXL_FILTER_ADD |
3248 			    IXL_FILTER_USED);
3249 			cnt++;
3250 		}
3251 	}
3252 	if (cnt == 0) {
3253 		printf("setup vlan: no filters found!\n");
3254 		return;
3255 	}
3256 	flags = IXL_FILTER_VLAN;
3257 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3258 	ixl_add_hw_filters(vsi, flags, cnt);
3259 	return;
3260 }
3261 
3262 /*
3263 ** Initialize filter list and add filters that the hardware
3264 ** needs to know about.
3265 */
3266 static void
3267 ixl_init_filters(struct ixl_vsi *vsi)
3268 {
3269 	/* Add broadcast address */
3270 	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3271 	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3272 }
3273 
3274 /*
3275 ** This routine adds mulicast filters
3276 */
3277 static void
3278 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3279 {
3280 	struct ixl_mac_filter *f;
3281 
3282 	/* Does one already exist */
3283 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3284 	if (f != NULL)
3285 		return;
3286 
3287 	f = ixl_get_filter(vsi);
3288 	if (f == NULL) {
3289 		printf("WARNING: no filter available!!\n");
3290 		return;
3291 	}
3292 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3293 	f->vlan = IXL_VLAN_ANY;
3294 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3295 	    | IXL_FILTER_MC);
3296 
3297 	return;
3298 }
3299 
3300 /*
3301 ** This routine adds macvlan filters
3302 */
3303 static void
3304 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3305 {
3306 	struct ixl_mac_filter	*f, *tmp;
3307 	device_t		dev = vsi->dev;
3308 
3309 	DEBUGOUT("ixl_add_filter: begin");
3310 
3311 	/* Does one already exist */
3312 	f = ixl_find_filter(vsi, macaddr, vlan);
3313 	if (f != NULL)
3314 		return;
3315 	/*
3316 	** Is this the first vlan being registered, if so we
3317 	** need to remove the ANY filter that indicates we are
3318 	** not in a vlan, and replace that with a 0 filter.
3319 	*/
3320 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3321 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3322 		if (tmp != NULL) {
3323 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3324 			ixl_add_filter(vsi, macaddr, 0);
3325 		}
3326 	}
3327 
3328 	f = ixl_get_filter(vsi);
3329 	if (f == NULL) {
3330 		device_printf(dev, "WARNING: no filter available!!\n");
3331 		return;
3332 	}
3333 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3334 	f->vlan = vlan;
3335 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3336 	if (f->vlan != IXL_VLAN_ANY)
3337 		f->flags |= IXL_FILTER_VLAN;
3338 
3339 	ixl_add_hw_filters(vsi, f->flags, 1);
3340 	return;
3341 }
3342 
3343 static void
3344 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3345 {
3346 	struct ixl_mac_filter *f;
3347 
3348 	f = ixl_find_filter(vsi, macaddr, vlan);
3349 	if (f == NULL)
3350 		return;
3351 
3352 	f->flags |= IXL_FILTER_DEL;
3353 	ixl_del_hw_filters(vsi, 1);
3354 
3355 	/* Check if this is the last vlan removal */
3356 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3357 		/* Switch back to a non-vlan filter */
3358 		ixl_del_filter(vsi, macaddr, 0);
3359 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3360 	}
3361 	return;
3362 }
3363 
3364 /*
3365 ** Find the filter with both matching mac addr and vlan id
3366 */
3367 static struct ixl_mac_filter *
3368 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3369 {
3370 	struct ixl_mac_filter	*f;
3371 	bool			match = FALSE;
3372 
3373 	SLIST_FOREACH(f, &vsi->ftl, next) {
3374 		if (!cmp_etheraddr(f->macaddr, macaddr))
3375 			continue;
3376 		if (f->vlan == vlan) {
3377 			match = TRUE;
3378 			break;
3379 		}
3380 	}
3381 
3382 	if (!match)
3383 		f = NULL;
3384 	return (f);
3385 }
3386 
3387 /*
3388 ** This routine takes additions to the vsi filter
3389 ** table and creates an Admin Queue call to create
3390 ** the filters in the hardware.
3391 */
3392 static void
3393 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3394 {
3395 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3396 	struct ixl_mac_filter	*f;
3397 	struct i40e_hw	*hw = vsi->hw;
3398 	device_t	dev = vsi->dev;
3399 	int		err, j = 0;
3400 
3401 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3402 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3403 	if (a == NULL) {
3404 		device_printf(dev, "add hw filter failed to get memory\n");
3405 		return;
3406 	}
3407 
3408 	/*
3409 	** Scan the filter list, each time we find one
3410 	** we add it to the admin queue array and turn off
3411 	** the add bit.
3412 	*/
3413 	SLIST_FOREACH(f, &vsi->ftl, next) {
3414 		if (f->flags == flags) {
3415 			b = &a[j]; // a pox on fvl long names :)
3416 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3417 			b->vlan_tag =
3418 			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3419 			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3420 			f->flags &= ~IXL_FILTER_ADD;
3421 			j++;
3422 		}
3423 		if (j == cnt)
3424 			break;
3425 	}
3426 	if (j > 0) {
3427 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3428 		if (err)
3429 			device_printf(dev, "aq_add_macvlan failure %d\n",
3430 			    hw->aq.asq_last_status);
3431 		else
3432 			vsi->hw_filters_add += j;
3433 	}
3434 	free(a, M_DEVBUF);
3435 	return;
3436 }
3437 
3438 /*
3439 ** This routine takes removals in the vsi filter
3440 ** table and creates an Admin Queue call to delete
3441 ** the filters in the hardware.
3442 */
3443 static void
3444 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3445 {
3446 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3447 	struct i40e_hw		*hw = vsi->hw;
3448 	device_t		dev = vsi->dev;
3449 	struct ixl_mac_filter	*f, *f_temp;
3450 	int			err, j = 0;
3451 
3452 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3453 
3454 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3455 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3456 	if (d == NULL) {
3457 		printf("del hw filter failed to get memory\n");
3458 		return;
3459 	}
3460 
3461 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3462 		if (f->flags & IXL_FILTER_DEL) {
3463 			e = &d[j]; // a pox on fvl long names :)
3464 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3465 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3466 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3467 			/* delete entry from vsi list */
3468 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3469 			free(f, M_DEVBUF);
3470 			j++;
3471 		}
3472 		if (j == cnt)
3473 			break;
3474 	}
3475 	if (j > 0) {
3476 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3477 		/* NOTE: returns ENOENT every time but seems to work fine,
3478 		   so we'll ignore that specific error. */
3479 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3480 			int sc = 0;
3481 			for (int i = 0; i < j; i++)
3482 				sc += (!d[i].error_code);
3483 			vsi->hw_filters_del += sc;
3484 			device_printf(dev,
3485 			    "Failed to remove %d/%d filters, aq error %d\n",
3486 			    j - sc, j, hw->aq.asq_last_status);
3487 		} else
3488 			vsi->hw_filters_del += j;
3489 	}
3490 	free(d, M_DEVBUF);
3491 
3492 	DEBUGOUT("ixl_del_hw_filters: end\n");
3493 	return;
3494 }
3495 
3496 
3497 static void
3498 ixl_enable_rings(struct ixl_vsi *vsi)
3499 {
3500 	struct i40e_hw	*hw = vsi->hw;
3501 	u32		reg;
3502 
3503 	for (int i = 0; i < vsi->num_queues; i++) {
3504 		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3505 
3506 		reg = rd32(hw, I40E_QTX_ENA(i));
3507 		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3508 		    I40E_QTX_ENA_QENA_STAT_MASK;
3509 		wr32(hw, I40E_QTX_ENA(i), reg);
3510 		/* Verify the enable took */
3511 		for (int j = 0; j < 10; j++) {
3512 			reg = rd32(hw, I40E_QTX_ENA(i));
3513 			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3514 				break;
3515 			i40e_msec_delay(10);
3516 		}
3517 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3518 			printf("TX queue %d disabled!\n", i);
3519 
3520 		reg = rd32(hw, I40E_QRX_ENA(i));
3521 		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3522 		    I40E_QRX_ENA_QENA_STAT_MASK;
3523 		wr32(hw, I40E_QRX_ENA(i), reg);
3524 		/* Verify the enable took */
3525 		for (int j = 0; j < 10; j++) {
3526 			reg = rd32(hw, I40E_QRX_ENA(i));
3527 			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3528 				break;
3529 			i40e_msec_delay(10);
3530 		}
3531 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3532 			printf("RX queue %d disabled!\n", i);
3533 	}
3534 }
3535 
3536 static void
3537 ixl_disable_rings(struct ixl_vsi *vsi)
3538 {
3539 	struct i40e_hw	*hw = vsi->hw;
3540 	u32		reg;
3541 
3542 	for (int i = 0; i < vsi->num_queues; i++) {
3543 		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3544 		i40e_usec_delay(500);
3545 
3546 		reg = rd32(hw, I40E_QTX_ENA(i));
3547 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3548 		wr32(hw, I40E_QTX_ENA(i), reg);
3549 		/* Verify the disable took */
3550 		for (int j = 0; j < 10; j++) {
3551 			reg = rd32(hw, I40E_QTX_ENA(i));
3552 			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3553 				break;
3554 			i40e_msec_delay(10);
3555 		}
3556 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3557 			printf("TX queue %d still enabled!\n", i);
3558 
3559 		reg = rd32(hw, I40E_QRX_ENA(i));
3560 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3561 		wr32(hw, I40E_QRX_ENA(i), reg);
3562 		/* Verify the disable took */
3563 		for (int j = 0; j < 10; j++) {
3564 			reg = rd32(hw, I40E_QRX_ENA(i));
3565 			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3566 				break;
3567 			i40e_msec_delay(10);
3568 		}
3569 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3570 			printf("RX queue %d still enabled!\n", i);
3571 	}
3572 }
3573 
3574 /**
3575  * ixl_handle_mdd_event
3576  *
3577  * Called from interrupt handler to identify possibly malicious vfs
3578  * (But also detects events from the PF, as well)
3579  **/
3580 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3581 {
3582 	struct i40e_hw *hw = &pf->hw;
3583 	device_t dev = pf->dev;
3584 	bool mdd_detected = false;
3585 	bool pf_mdd_detected = false;
3586 	u32 reg;
3587 
3588 	/* find what triggered the MDD event */
3589 	reg = rd32(hw, I40E_GL_MDET_TX);
3590 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3591 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3592 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3593 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3594 				I40E_GL_MDET_TX_EVENT_SHIFT;
3595 		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3596 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3597 		device_printf(dev,
3598 			 "Malicious Driver Detection event 0x%02x"
3599 			 " on TX queue %d pf number 0x%02x\n",
3600 			 event, queue, pf_num);
3601 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3602 		mdd_detected = true;
3603 	}
3604 	reg = rd32(hw, I40E_GL_MDET_RX);
3605 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3606 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3607 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3608 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3609 				I40E_GL_MDET_RX_EVENT_SHIFT;
3610 		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3611 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3612 		device_printf(dev,
3613 			 "Malicious Driver Detection event 0x%02x"
3614 			 " on RX queue %d of function 0x%02x\n",
3615 			 event, queue, func);
3616 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3617 		mdd_detected = true;
3618 	}
3619 
3620 	if (mdd_detected) {
3621 		reg = rd32(hw, I40E_PF_MDET_TX);
3622 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3623 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3624 			device_printf(dev,
3625 				 "MDD TX event is for this function 0x%08x",
3626 				 reg);
3627 			pf_mdd_detected = true;
3628 		}
3629 		reg = rd32(hw, I40E_PF_MDET_RX);
3630 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3631 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3632 			device_printf(dev,
3633 				 "MDD RX event is for this function 0x%08x",
3634 				 reg);
3635 			pf_mdd_detected = true;
3636 		}
3637 	}
3638 
3639 	/* re-enable mdd interrupt cause */
3640 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3641 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3642 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3643 	ixl_flush(hw);
3644 }
3645 
3646 static void
3647 ixl_enable_intr(struct ixl_vsi *vsi)
3648 {
3649 	struct i40e_hw		*hw = vsi->hw;
3650 	struct ixl_queue	*que = vsi->queues;
3651 
3652 	if (ixl_enable_msix) {
3653 		ixl_enable_adminq(hw);
3654 		for (int i = 0; i < vsi->num_queues; i++, que++)
3655 			ixl_enable_queue(hw, que->me);
3656 	} else
3657 		ixl_enable_legacy(hw);
3658 }
3659 
3660 static void
3661 ixl_disable_intr(struct ixl_vsi *vsi)
3662 {
3663 	struct i40e_hw		*hw = vsi->hw;
3664 	struct ixl_queue	*que = vsi->queues;
3665 
3666 	if (ixl_enable_msix) {
3667 		ixl_disable_adminq(hw);
3668 		for (int i = 0; i < vsi->num_queues; i++, que++)
3669 			ixl_disable_queue(hw, que->me);
3670 	} else
3671 		ixl_disable_legacy(hw);
3672 }
3673 
3674 static void
3675 ixl_enable_adminq(struct i40e_hw *hw)
3676 {
3677 	u32		reg;
3678 
3679 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3680 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3681 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3682 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3683 	ixl_flush(hw);
3684 	return;
3685 }
3686 
3687 static void
3688 ixl_disable_adminq(struct i40e_hw *hw)
3689 {
3690 	u32		reg;
3691 
3692 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3693 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3694 
3695 	return;
3696 }
3697 
3698 static void
3699 ixl_enable_queue(struct i40e_hw *hw, int id)
3700 {
3701 	u32		reg;
3702 
3703 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3704 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3705 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3706 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3707 }
3708 
3709 static void
3710 ixl_disable_queue(struct i40e_hw *hw, int id)
3711 {
3712 	u32		reg;
3713 
3714 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3715 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3716 
3717 	return;
3718 }
3719 
3720 static void
3721 ixl_enable_legacy(struct i40e_hw *hw)
3722 {
3723 	u32		reg;
3724 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3725 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3726 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3727 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3728 }
3729 
3730 static void
3731 ixl_disable_legacy(struct i40e_hw *hw)
3732 {
3733 	u32		reg;
3734 
3735 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3736 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3737 
3738 	return;
3739 }
3740 
3741 static void
3742 ixl_update_stats_counters(struct ixl_pf *pf)
3743 {
3744 	struct i40e_hw	*hw = &pf->hw;
3745 	struct ixl_vsi *vsi = &pf->vsi;
3746 
3747 	struct i40e_hw_port_stats *nsd = &pf->stats;
3748 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3749 
3750 	/* Update hw stats */
3751 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3752 			   pf->stat_offsets_loaded,
3753 			   &osd->crc_errors, &nsd->crc_errors);
3754 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3755 			   pf->stat_offsets_loaded,
3756 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3757 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3758 			   I40E_GLPRT_GORCL(hw->port),
3759 			   pf->stat_offsets_loaded,
3760 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3761 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3762 			   I40E_GLPRT_GOTCL(hw->port),
3763 			   pf->stat_offsets_loaded,
3764 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3765 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3766 			   pf->stat_offsets_loaded,
3767 			   &osd->eth.rx_discards,
3768 			   &nsd->eth.rx_discards);
3769 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3770 			   I40E_GLPRT_UPRCL(hw->port),
3771 			   pf->stat_offsets_loaded,
3772 			   &osd->eth.rx_unicast,
3773 			   &nsd->eth.rx_unicast);
3774 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3775 			   I40E_GLPRT_UPTCL(hw->port),
3776 			   pf->stat_offsets_loaded,
3777 			   &osd->eth.tx_unicast,
3778 			   &nsd->eth.tx_unicast);
3779 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3780 			   I40E_GLPRT_MPRCL(hw->port),
3781 			   pf->stat_offsets_loaded,
3782 			   &osd->eth.rx_multicast,
3783 			   &nsd->eth.rx_multicast);
3784 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3785 			   I40E_GLPRT_MPTCL(hw->port),
3786 			   pf->stat_offsets_loaded,
3787 			   &osd->eth.tx_multicast,
3788 			   &nsd->eth.tx_multicast);
3789 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3790 			   I40E_GLPRT_BPRCL(hw->port),
3791 			   pf->stat_offsets_loaded,
3792 			   &osd->eth.rx_broadcast,
3793 			   &nsd->eth.rx_broadcast);
3794 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3795 			   I40E_GLPRT_BPTCL(hw->port),
3796 			   pf->stat_offsets_loaded,
3797 			   &osd->eth.tx_broadcast,
3798 			   &nsd->eth.tx_broadcast);
3799 
3800 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3801 			   pf->stat_offsets_loaded,
3802 			   &osd->tx_dropped_link_down,
3803 			   &nsd->tx_dropped_link_down);
3804 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3805 			   pf->stat_offsets_loaded,
3806 			   &osd->mac_local_faults,
3807 			   &nsd->mac_local_faults);
3808 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3809 			   pf->stat_offsets_loaded,
3810 			   &osd->mac_remote_faults,
3811 			   &nsd->mac_remote_faults);
3812 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3813 			   pf->stat_offsets_loaded,
3814 			   &osd->rx_length_errors,
3815 			   &nsd->rx_length_errors);
3816 
3817 	/* Flow control (LFC) stats */
3818 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3819 			   pf->stat_offsets_loaded,
3820 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3821 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3822 			   pf->stat_offsets_loaded,
3823 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3824 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3825 			   pf->stat_offsets_loaded,
3826 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3827 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3828 			   pf->stat_offsets_loaded,
3829 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3830 
3831 	/* Priority flow control stats */
3832 #if 0
3833 	for (int i = 0; i < 8; i++) {
3834 		ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3835 				   pf->stat_offsets_loaded,
3836 				   &osd->priority_xon_rx[i],
3837 				   &nsd->priority_xon_rx[i]);
3838 		ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3839 				   pf->stat_offsets_loaded,
3840 				   &osd->priority_xon_tx[i],
3841 				   &nsd->priority_xon_tx[i]);
3842 		ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3843 				   pf->stat_offsets_loaded,
3844 				   &osd->priority_xoff_tx[i],
3845 				   &nsd->priority_xoff_tx[i]);
3846 		ixl_stat_update32(hw,
3847 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3848 				   pf->stat_offsets_loaded,
3849 				   &osd->priority_xon_2_xoff[i],
3850 				   &nsd->priority_xon_2_xoff[i]);
3851 	}
3852 #endif
3853 
3854 	/* Packet size stats rx */
3855 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3856 			   I40E_GLPRT_PRC64L(hw->port),
3857 			   pf->stat_offsets_loaded,
3858 			   &osd->rx_size_64, &nsd->rx_size_64);
3859 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3860 			   I40E_GLPRT_PRC127L(hw->port),
3861 			   pf->stat_offsets_loaded,
3862 			   &osd->rx_size_127, &nsd->rx_size_127);
3863 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3864 			   I40E_GLPRT_PRC255L(hw->port),
3865 			   pf->stat_offsets_loaded,
3866 			   &osd->rx_size_255, &nsd->rx_size_255);
3867 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3868 			   I40E_GLPRT_PRC511L(hw->port),
3869 			   pf->stat_offsets_loaded,
3870 			   &osd->rx_size_511, &nsd->rx_size_511);
3871 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3872 			   I40E_GLPRT_PRC1023L(hw->port),
3873 			   pf->stat_offsets_loaded,
3874 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3875 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3876 			   I40E_GLPRT_PRC1522L(hw->port),
3877 			   pf->stat_offsets_loaded,
3878 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3879 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3880 			   I40E_GLPRT_PRC9522L(hw->port),
3881 			   pf->stat_offsets_loaded,
3882 			   &osd->rx_size_big, &nsd->rx_size_big);
3883 
3884 	/* Packet size stats tx */
3885 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3886 			   I40E_GLPRT_PTC64L(hw->port),
3887 			   pf->stat_offsets_loaded,
3888 			   &osd->tx_size_64, &nsd->tx_size_64);
3889 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3890 			   I40E_GLPRT_PTC127L(hw->port),
3891 			   pf->stat_offsets_loaded,
3892 			   &osd->tx_size_127, &nsd->tx_size_127);
3893 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3894 			   I40E_GLPRT_PTC255L(hw->port),
3895 			   pf->stat_offsets_loaded,
3896 			   &osd->tx_size_255, &nsd->tx_size_255);
3897 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3898 			   I40E_GLPRT_PTC511L(hw->port),
3899 			   pf->stat_offsets_loaded,
3900 			   &osd->tx_size_511, &nsd->tx_size_511);
3901 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3902 			   I40E_GLPRT_PTC1023L(hw->port),
3903 			   pf->stat_offsets_loaded,
3904 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3905 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3906 			   I40E_GLPRT_PTC1522L(hw->port),
3907 			   pf->stat_offsets_loaded,
3908 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3909 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3910 			   I40E_GLPRT_PTC9522L(hw->port),
3911 			   pf->stat_offsets_loaded,
3912 			   &osd->tx_size_big, &nsd->tx_size_big);
3913 
3914 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3915 			   pf->stat_offsets_loaded,
3916 			   &osd->rx_undersize, &nsd->rx_undersize);
3917 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3918 			   pf->stat_offsets_loaded,
3919 			   &osd->rx_fragments, &nsd->rx_fragments);
3920 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3921 			   pf->stat_offsets_loaded,
3922 			   &osd->rx_oversize, &nsd->rx_oversize);
3923 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3924 			   pf->stat_offsets_loaded,
3925 			   &osd->rx_jabber, &nsd->rx_jabber);
3926 	pf->stat_offsets_loaded = true;
3927 	/* End hw stats */
3928 
3929 	/* Update vsi stats */
3930 	ixl_update_eth_stats(vsi);
3931 
3932 	/* OS statistics */
3933 	// ERJ - these are per-port, update all vsis?
3934 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
3935 }
3936 
3937 /*
3938 ** Tasklet handler for MSIX Adminq interrupts
3939 **  - do outside interrupt since it might sleep
3940 */
3941 static void
3942 ixl_do_adminq(void *context, int pending)
3943 {
3944 	struct ixl_pf			*pf = context;
3945 	struct i40e_hw			*hw = &pf->hw;
3946 	struct ixl_vsi			*vsi = &pf->vsi;
3947 	struct i40e_arq_event_info	event;
3948 	i40e_status			ret;
3949 	u32				reg, loop = 0;
3950 	u16				opcode, result;
3951 
3952 	event.buf_len = IXL_AQ_BUF_SZ;
3953 	event.msg_buf = malloc(event.buf_len,
3954 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3955 	if (!event.msg_buf) {
3956 		printf("Unable to allocate adminq memory\n");
3957 		return;
3958 	}
3959 
3960 	/* clean and process any events */
3961 	do {
3962 		ret = i40e_clean_arq_element(hw, &event, &result);
3963 		if (ret)
3964 			break;
3965 		opcode = LE16_TO_CPU(event.desc.opcode);
3966 		switch (opcode) {
3967 		case i40e_aqc_opc_get_link_status:
3968 			vsi->link_up = ixl_config_link(hw);
3969 			ixl_update_link_status(pf);
3970 			break;
3971 		case i40e_aqc_opc_send_msg_to_pf:
3972 			/* process pf/vf communication here */
3973 			break;
3974 		case i40e_aqc_opc_event_lan_overflow:
3975 			break;
3976 		default:
3977 #ifdef IXL_DEBUG
3978 			printf("AdminQ unknown event %x\n", opcode);
3979 #endif
3980 			break;
3981 		}
3982 
3983 	} while (result && (loop++ < IXL_ADM_LIMIT));
3984 
3985 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3986 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3987 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3988 	free(event.msg_buf, M_DEVBUF);
3989 
3990 	if (pf->msix > 1)
3991 		ixl_enable_adminq(&pf->hw);
3992 	else
3993 		ixl_enable_intr(vsi);
3994 }
3995 
3996 static int
3997 ixl_debug_info(SYSCTL_HANDLER_ARGS)
3998 {
3999 	struct ixl_pf	*pf;
4000 	int		error, input = 0;
4001 
4002 	error = sysctl_handle_int(oidp, &input, 0, req);
4003 
4004 	if (error || !req->newptr)
4005 		return (error);
4006 
4007 	if (input == 1) {
4008 		pf = (struct ixl_pf *)arg1;
4009 		ixl_print_debug_info(pf);
4010 	}
4011 
4012 	return (error);
4013 }
4014 
4015 static void
4016 ixl_print_debug_info(struct ixl_pf *pf)
4017 {
4018 	struct i40e_hw		*hw = &pf->hw;
4019 	struct ixl_vsi		*vsi = &pf->vsi;
4020 	struct ixl_queue	*que = vsi->queues;
4021 	struct rx_ring		*rxr = &que->rxr;
4022 	struct tx_ring		*txr = &que->txr;
4023 	u32			reg;
4024 
4025 
4026 	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4027 	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4028 	printf("RX next check = %x\n", rxr->next_check);
4029 	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4030 	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4031 	printf("TX desc avail = %x\n", txr->avail);
4032 
4033 	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4034 	 printf("RX Bytes = %x\n", reg);
4035 	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4036 	 printf("Port RX Bytes = %x\n", reg);
4037 	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4038 	 printf("RX discard = %x\n", reg);
4039 	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4040 	 printf("Port RX discard = %x\n", reg);
4041 
4042 	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4043 	 printf("TX errors = %x\n", reg);
4044 	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4045 	 printf("TX Bytes = %x\n", reg);
4046 
4047 	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4048 	 printf("RX undersize = %x\n", reg);
4049 	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4050 	 printf("RX fragments = %x\n", reg);
4051 	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4052 	 printf("RX oversize = %x\n", reg);
4053 	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4054 	 printf("RX length error = %x\n", reg);
4055 	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4056 	 printf("mac remote fault = %x\n", reg);
4057 	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4058 	 printf("mac local fault = %x\n", reg);
4059 }
4060 
4061 /**
4062  * Update VSI-specific ethernet statistics counters.
4063  **/
4064 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4065 {
4066 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4067 	struct i40e_hw *hw = &pf->hw;
4068 	struct i40e_eth_stats *es;
4069 	struct i40e_eth_stats *oes;
4070 	int i;
4071 	uint64_t tx_discards;
4072 	struct i40e_hw_port_stats *nsd;
4073 	u16 stat_idx = vsi->info.stat_counter_idx;
4074 
4075 	es = &vsi->eth_stats;
4076 	oes = &vsi->eth_stats_offsets;
4077 	nsd = &pf->stats;
4078 
4079 	/* Gather up the stats that the hw collects */
4080 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4081 			   vsi->stat_offsets_loaded,
4082 			   &oes->tx_errors, &es->tx_errors);
4083 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4084 			   vsi->stat_offsets_loaded,
4085 			   &oes->rx_discards, &es->rx_discards);
4086 
4087 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4088 			   I40E_GLV_GORCL(stat_idx),
4089 			   vsi->stat_offsets_loaded,
4090 			   &oes->rx_bytes, &es->rx_bytes);
4091 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4092 			   I40E_GLV_UPRCL(stat_idx),
4093 			   vsi->stat_offsets_loaded,
4094 			   &oes->rx_unicast, &es->rx_unicast);
4095 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4096 			   I40E_GLV_MPRCL(stat_idx),
4097 			   vsi->stat_offsets_loaded,
4098 			   &oes->rx_multicast, &es->rx_multicast);
4099 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4100 			   I40E_GLV_BPRCL(stat_idx),
4101 			   vsi->stat_offsets_loaded,
4102 			   &oes->rx_broadcast, &es->rx_broadcast);
4103 
4104 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4105 			   I40E_GLV_GOTCL(stat_idx),
4106 			   vsi->stat_offsets_loaded,
4107 			   &oes->tx_bytes, &es->tx_bytes);
4108 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4109 			   I40E_GLV_UPTCL(stat_idx),
4110 			   vsi->stat_offsets_loaded,
4111 			   &oes->tx_unicast, &es->tx_unicast);
4112 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4113 			   I40E_GLV_MPTCL(stat_idx),
4114 			   vsi->stat_offsets_loaded,
4115 			   &oes->tx_multicast, &es->tx_multicast);
4116 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4117 			   I40E_GLV_BPTCL(stat_idx),
4118 			   vsi->stat_offsets_loaded,
4119 			   &oes->tx_broadcast, &es->tx_broadcast);
4120 	vsi->stat_offsets_loaded = true;
4121 
4122 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4123 	for (i = 0; i < vsi->num_queues; i++)
4124 		tx_discards += vsi->queues[i].txr.br->br_drops;
4125 
4126 	/* Update ifnet stats */
4127 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4128 	                   es->rx_multicast +
4129 			   es->rx_broadcast);
4130 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4131 	                   es->tx_multicast +
4132 			   es->tx_broadcast);
4133 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4134 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4135 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4136 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4137 
4138 	IXL_SET_OERRORS(vsi, es->tx_errors);
4139 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4140 	IXL_SET_OQDROPS(vsi, tx_discards);
4141 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4142 	IXL_SET_COLLISIONS(vsi, 0);
4143 }
4144 
4145 /**
4146  * Reset all of the stats for the given pf
4147  **/
4148 void ixl_pf_reset_stats(struct ixl_pf *pf)
4149 {
4150 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4151 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4152 	pf->stat_offsets_loaded = false;
4153 }
4154 
4155 /**
4156  * Resets all stats of the given vsi
4157  **/
4158 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4159 {
4160 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4161 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4162 	vsi->stat_offsets_loaded = false;
4163 }
4164 
4165 /**
4166  * Read and update a 48 bit stat from the hw
4167  *
4168  * Since the device stats are not reset at PFReset, they likely will not
4169  * be zeroed when the driver starts.  We'll save the first values read
4170  * and use them as offsets to be subtracted from the raw values in order
4171  * to report stats that count from zero.
4172  **/
4173 static void
4174 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4175 	bool offset_loaded, u64 *offset, u64 *stat)
4176 {
4177 	u64 new_data;
4178 
4179 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4180 	new_data = rd64(hw, loreg);
4181 #else
4182 	/*
4183 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4184 	 * 10 don't support 8 byte bus reads/writes.
4185 	 */
4186 	new_data = rd32(hw, loreg);
4187 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4188 #endif
4189 
4190 	if (!offset_loaded)
4191 		*offset = new_data;
4192 	if (new_data >= *offset)
4193 		*stat = new_data - *offset;
4194 	else
4195 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4196 	*stat &= 0xFFFFFFFFFFFFULL;
4197 }
4198 
4199 /**
4200  * Read and update a 32 bit stat from the hw
4201  **/
4202 static void
4203 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4204 	bool offset_loaded, u64 *offset, u64 *stat)
4205 {
4206 	u32 new_data;
4207 
4208 	new_data = rd32(hw, reg);
4209 	if (!offset_loaded)
4210 		*offset = new_data;
4211 	if (new_data >= *offset)
4212 		*stat = (u32)(new_data - *offset);
4213 	else
4214 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4215 }
4216 
4217 /*
4218 ** Set flow control using sysctl:
4219 ** 	0 - off
4220 **	1 - rx pause
4221 **	2 - tx pause
4222 **	3 - full
4223 */
4224 static int
4225 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4226 {
4227 	/*
4228 	 * TODO: ensure flow control is disabled if
4229 	 * priority flow control is enabled
4230 	 *
4231 	 * TODO: ensure tx CRC by hardware should be enabled
4232 	 * if tx flow control is enabled.
4233 	 */
4234 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4235 	struct i40e_hw *hw = &pf->hw;
4236 	device_t dev = pf->dev;
4237 	int requested_fc = 0, error = 0;
4238 	enum i40e_status_code aq_error = 0;
4239 	u8 fc_aq_err = 0;
4240 
4241 	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4242 	if (aq_error) {
4243 		device_printf(dev,
4244 		    "%s: Error retrieving link info from aq, %d\n",
4245 		    __func__, aq_error);
4246 		return (EAGAIN);
4247 	}
4248 
4249 	/* Read in new mode */
4250 	requested_fc = hw->fc.current_mode;
4251 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4252 	if ((error) || (req->newptr == NULL))
4253 		return (error);
4254 	if (requested_fc < 0 || requested_fc > 3) {
4255 		device_printf(dev,
4256 		    "Invalid fc mode; valid modes are 0 through 3\n");
4257 		return (EINVAL);
4258 	}
4259 
4260 	/*
4261 	** Changing flow control mode currently does not work on
4262 	** 40GBASE-CR4 PHYs
4263 	*/
4264 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4265 	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4266 		device_printf(dev, "Changing flow control mode unsupported"
4267 		    " on 40GBase-CR4 media.\n");
4268 		return (ENODEV);
4269 	}
4270 
4271 	/* Set fc ability for port */
4272 	hw->fc.requested_mode = requested_fc;
4273 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4274 	if (aq_error) {
4275 		device_printf(dev,
4276 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4277 		    __func__, aq_error, fc_aq_err);
4278 		return (EAGAIN);
4279 	}
4280 
4281 	if (hw->fc.current_mode != hw->fc.requested_mode) {
4282 		device_printf(dev, "%s: FC set failure:\n", __func__);
4283 		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4284 		    __func__,
4285 		    ixl_fc_string[hw->fc.current_mode],
4286 		    ixl_fc_string[hw->fc.requested_mode]);
4287 	}
4288 
4289 	return (0);
4290 }
4291 
4292 static int
4293 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4294 {
4295 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4296 	struct i40e_hw *hw = &pf->hw;
4297 	int error = 0, index = 0;
4298 
4299 	char *speeds[] = {
4300 		"Unknown",
4301 		"100M",
4302 		"1G",
4303 		"10G",
4304 		"40G",
4305 		"20G"
4306 	};
4307 
4308 	ixl_update_link_status(pf);
4309 
4310 	switch (hw->phy.link_info.link_speed) {
4311 	case I40E_LINK_SPEED_100MB:
4312 		index = 1;
4313 		break;
4314 	case I40E_LINK_SPEED_1GB:
4315 		index = 2;
4316 		break;
4317 	case I40E_LINK_SPEED_10GB:
4318 		index = 3;
4319 		break;
4320 	case I40E_LINK_SPEED_40GB:
4321 		index = 4;
4322 		break;
4323 	case I40E_LINK_SPEED_20GB:
4324 		index = 5;
4325 		break;
4326 	case I40E_LINK_SPEED_UNKNOWN:
4327 	default:
4328 		index = 0;
4329 		break;
4330 	}
4331 
4332 	error = sysctl_handle_string(oidp, speeds[index],
4333 	    strlen(speeds[index]), req);
4334 	return (error);
4335 }
4336 
4337 static int
4338 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4339 {
4340 	struct i40e_hw *hw = &pf->hw;
4341 	device_t dev = pf->dev;
4342 	struct i40e_aq_get_phy_abilities_resp abilities;
4343 	struct i40e_aq_set_phy_config config;
4344 	enum i40e_status_code aq_error = 0;
4345 
4346 	/* Get current capability information */
4347 	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4348 	if (aq_error) {
4349 		device_printf(dev, "%s: Error getting phy capabilities %d,"
4350 		    " aq error: %d\n", __func__, aq_error,
4351 		    hw->aq.asq_last_status);
4352 		return (EAGAIN);
4353 	}
4354 
4355 	/* Prepare new config */
4356 	bzero(&config, sizeof(config));
4357 	config.phy_type = abilities.phy_type;
4358 	config.abilities = abilities.abilities
4359 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4360 	config.eee_capability = abilities.eee_capability;
4361 	config.eeer = abilities.eeer_val;
4362 	config.low_power_ctrl = abilities.d3_lpan;
4363 	/* Translate into aq cmd link_speed */
4364 	if (speeds & 0x4)
4365 		config.link_speed |= I40E_LINK_SPEED_10GB;
4366 	if (speeds & 0x2)
4367 		config.link_speed |= I40E_LINK_SPEED_1GB;
4368 	if (speeds & 0x1)
4369 		config.link_speed |= I40E_LINK_SPEED_100MB;
4370 
4371 	/* Do aq command & restart link */
4372 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4373 	if (aq_error) {
4374 		device_printf(dev, "%s: Error setting new phy config %d,"
4375 		    " aq error: %d\n", __func__, aq_error,
4376 		    hw->aq.asq_last_status);
4377 		return (EAGAIN);
4378 	}
4379 
4380 	return (0);
4381 }
4382 
4383 /*
4384 ** Control link advertise speed:
4385 **	Flags:
4386 **	0x1 - advertise 100 Mb
4387 **	0x2 - advertise 1G
4388 **	0x4 - advertise 10G
4389 **
4390 ** Does not work on 40G devices.
4391 */
4392 static int
4393 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4394 {
4395 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4396 	struct i40e_hw *hw = &pf->hw;
4397 	device_t dev = pf->dev;
4398 	int requested_ls = 0;
4399 	int error = 0;
4400 
4401 	/*
4402 	** FW doesn't support changing advertised speed
4403 	** for 40G devices; speed is always 40G.
4404 	*/
4405 	if (i40e_is_40G_device(hw->device_id))
4406 		return (ENODEV);
4407 
4408 	/* Read in new mode */
4409 	requested_ls = pf->advertised_speed;
4410 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4411 	if ((error) || (req->newptr == NULL))
4412 		return (error);
4413 	if (requested_ls < 1 || requested_ls > 7) {
4414 		device_printf(dev,
4415 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4416 		return (EINVAL);
4417 	}
4418 
4419 	/* Exit if no change */
4420 	if (pf->advertised_speed == requested_ls)
4421 		return (0);
4422 
4423 	error = ixl_set_advertised_speeds(pf, requested_ls);
4424 	if (error)
4425 		return (error);
4426 
4427 	pf->advertised_speed = requested_ls;
4428 	ixl_update_link_status(pf);
4429 	return (0);
4430 }
4431 
4432 /*
4433 ** Get the width and transaction speed of
4434 ** the bus this adapter is plugged into.
4435 */
4436 static u16
4437 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4438 {
4439         u16                     link;
4440         u32                     offset;
4441 
4442 
4443         /* Get the PCI Express Capabilities offset */
4444         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4445 
4446         /* ...and read the Link Status Register */
4447         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4448 
4449         switch (link & I40E_PCI_LINK_WIDTH) {
4450         case I40E_PCI_LINK_WIDTH_1:
4451                 hw->bus.width = i40e_bus_width_pcie_x1;
4452                 break;
4453         case I40E_PCI_LINK_WIDTH_2:
4454                 hw->bus.width = i40e_bus_width_pcie_x2;
4455                 break;
4456         case I40E_PCI_LINK_WIDTH_4:
4457                 hw->bus.width = i40e_bus_width_pcie_x4;
4458                 break;
4459         case I40E_PCI_LINK_WIDTH_8:
4460                 hw->bus.width = i40e_bus_width_pcie_x8;
4461                 break;
4462         default:
4463                 hw->bus.width = i40e_bus_width_unknown;
4464                 break;
4465         }
4466 
4467         switch (link & I40E_PCI_LINK_SPEED) {
4468         case I40E_PCI_LINK_SPEED_2500:
4469                 hw->bus.speed = i40e_bus_speed_2500;
4470                 break;
4471         case I40E_PCI_LINK_SPEED_5000:
4472                 hw->bus.speed = i40e_bus_speed_5000;
4473                 break;
4474         case I40E_PCI_LINK_SPEED_8000:
4475                 hw->bus.speed = i40e_bus_speed_8000;
4476                 break;
4477         default:
4478                 hw->bus.speed = i40e_bus_speed_unknown;
4479                 break;
4480         }
4481 
4482 
4483         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4484             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4485             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4486             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4487             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4488             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4489             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4490             ("Unknown"));
4491 
4492         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4493             (hw->bus.speed < i40e_bus_speed_8000)) {
4494                 device_printf(dev, "PCI-Express bandwidth available"
4495                     " for this device\n     is not sufficient for"
4496                     " normal operation.\n");
4497                 device_printf(dev, "For expected performance a x8 "
4498                     "PCIE Gen3 slot is required.\n");
4499         }
4500 
4501         return (link);
4502 }
4503 
4504 static int
4505 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4506 {
4507 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4508 	struct i40e_hw	*hw = &pf->hw;
4509 	char		buf[32];
4510 
4511 	snprintf(buf, sizeof(buf),
4512 	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4513 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4514 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4515 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4516 	    IXL_NVM_VERSION_HI_SHIFT,
4517 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4518 	    IXL_NVM_VERSION_LO_SHIFT,
4519 	    hw->nvm.eetrack);
4520 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4521 }
4522 
4523 
4524 #ifdef IXL_DEBUG
4525 static int
4526 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4527 {
4528 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4529 	struct i40e_hw *hw = &pf->hw;
4530 	struct i40e_link_status link_status;
4531 	char buf[512];
4532 
4533 	enum i40e_status_code aq_error = 0;
4534 
4535 	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4536 	if (aq_error) {
4537 		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4538 		return (EPERM);
4539 	}
4540 
4541 	sprintf(buf, "\n"
4542 	    "PHY Type : %#04x\n"
4543 	    "Speed    : %#04x\n"
4544 	    "Link info: %#04x\n"
4545 	    "AN info  : %#04x\n"
4546 	    "Ext info : %#04x",
4547 	    link_status.phy_type, link_status.link_speed,
4548 	    link_status.link_info, link_status.an_info,
4549 	    link_status.ext_info);
4550 
4551 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4552 }
4553 
4554 static int
4555 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4556 {
4557 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4558 	struct i40e_hw *hw = &pf->hw;
4559 	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4560 	char buf[512];
4561 
4562 	enum i40e_status_code aq_error = 0;
4563 
4564 	// TODO: Print out list of qualified modules as well?
4565 	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4566 	if (aq_error) {
4567 		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4568 		return (EPERM);
4569 	}
4570 
4571 	sprintf(buf, "\n"
4572 	    "PHY Type : %#010x\n"
4573 	    "Speed    : %#04x\n"
4574 	    "Abilities: %#04x\n"
4575 	    "EEE cap  : %#06x\n"
4576 	    "EEER reg : %#010x\n"
4577 	    "D3 Lpan  : %#04x",
4578 	    abilities_resp.phy_type, abilities_resp.link_speed,
4579 	    abilities_resp.abilities, abilities_resp.eee_capability,
4580 	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4581 
4582 	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4583 }
4584 
4585 static int
4586 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4587 {
4588 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4589 	struct ixl_vsi *vsi = &pf->vsi;
4590 	struct ixl_mac_filter *f;
4591 	char *buf, *buf_i;
4592 
4593 	int error = 0;
4594 	int ftl_len = 0;
4595 	int ftl_counter = 0;
4596 	int buf_len = 0;
4597 	int entry_len = 42;
4598 
4599 	SLIST_FOREACH(f, &vsi->ftl, next) {
4600 		ftl_len++;
4601 	}
4602 
4603 	if (ftl_len < 1) {
4604 		sysctl_handle_string(oidp, "(none)", 6, req);
4605 		return (0);
4606 	}
4607 
4608 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4609 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4610 
4611 	sprintf(buf_i++, "\n");
4612 	SLIST_FOREACH(f, &vsi->ftl, next) {
4613 		sprintf(buf_i,
4614 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4615 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4616 		buf_i += entry_len;
4617 		/* don't print '\n' for last entry */
4618 		if (++ftl_counter != ftl_len) {
4619 			sprintf(buf_i, "\n");
4620 			buf_i++;
4621 		}
4622 	}
4623 
4624 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4625 	if (error)
4626 		printf("sysctl error: %d\n", error);
4627 	free(buf, M_DEVBUF);
4628 	return error;
4629 }
4630 
4631 #define IXL_SW_RES_SIZE 0x14
4632 static int
4633 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4634 {
4635 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4636 	struct i40e_hw *hw = &pf->hw;
4637 	device_t dev = pf->dev;
4638 	struct sbuf *buf;
4639 	int error = 0;
4640 
4641 	u8 num_entries;
4642 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4643 
4644 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4645 	if (!buf) {
4646 		device_printf(dev, "Could not allocate sbuf for output.\n");
4647 		return (ENOMEM);
4648 	}
4649 
4650 	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4651 				resp,
4652 				IXL_SW_RES_SIZE,
4653 				NULL);
4654 	if (error) {
4655 		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4656 		    __func__, error, hw->aq.asq_last_status);
4657 		sbuf_delete(buf);
4658 		return error;
4659 	}
4660 	device_printf(dev, "Num_entries: %d\n", num_entries);
4661 
4662 	sbuf_cat(buf, "\n");
4663 	sbuf_printf(buf,
4664 	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4665 	    "     | (this)     | (all) | (this) | (all)       \n");
4666 	for (int i = 0; i < num_entries; i++) {
4667 		sbuf_printf(buf,
4668 		    "%#4x | %10d   %5d   %6d   %12d",
4669 		    resp[i].resource_type,
4670 		    resp[i].guaranteed,
4671 		    resp[i].total,
4672 		    resp[i].used,
4673 		    resp[i].total_unalloced);
4674 		if (i < num_entries - 1)
4675 			sbuf_cat(buf, "\n");
4676 	}
4677 
4678 	error = sbuf_finish(buf);
4679 	if (error) {
4680 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4681 		sbuf_delete(buf);
4682 		return error;
4683 	}
4684 
4685 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4686 	if (error)
4687 		device_printf(dev, "sysctl error: %d\n", error);
4688 	sbuf_delete(buf);
4689 	return error;
4690 }
4691 
4692 /*
4693 ** Caller must init and delete sbuf; this function will clear and
4694 ** finish it for caller.
4695 */
4696 static char *
4697 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4698 {
4699 	sbuf_clear(s);
4700 
4701 	if (seid == 0 && uplink)
4702 		sbuf_cat(s, "Network");
4703 	else if (seid == 0)
4704 		sbuf_cat(s, "Host");
4705 	else if (seid == 1)
4706 		sbuf_cat(s, "EMP");
4707 	else if (seid <= 5)
4708 		sbuf_printf(s, "MAC %d", seid - 2);
4709 	else if (seid <= 15)
4710 		sbuf_cat(s, "Reserved");
4711 	else if (seid <= 31)
4712 		sbuf_printf(s, "PF %d", seid - 16);
4713 	else if (seid <= 159)
4714 		sbuf_printf(s, "VF %d", seid - 32);
4715 	else if (seid <= 287)
4716 		sbuf_cat(s, "Reserved");
4717 	else if (seid <= 511)
4718 		sbuf_cat(s, "Other"); // for other structures
4719 	else if (seid <= 895)
4720 		sbuf_printf(s, "VSI %d", seid - 512);
4721 	else if (seid <= 1023)
4722 		sbuf_printf(s, "Reserved");
4723 	else
4724 		sbuf_cat(s, "Invalid");
4725 
4726 	sbuf_finish(s);
4727 	return sbuf_data(s);
4728 }
4729 
4730 static int
4731 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4732 {
4733 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4734 	struct i40e_hw *hw = &pf->hw;
4735 	device_t dev = pf->dev;
4736 	struct sbuf *buf;
4737 	struct sbuf *nmbuf;
4738 	int error = 0;
4739 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4740 
4741 	u16 next = 0;
4742 	struct i40e_aqc_get_switch_config_resp *sw_config;
4743 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4744 
4745 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4746 	if (!buf) {
4747 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4748 		return (ENOMEM);
4749 	}
4750 
4751 	error = i40e_aq_get_switch_config(hw, sw_config,
4752 	    sizeof(aq_buf), &next, NULL);
4753 	if (error) {
4754 		device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4755 		    __func__, error, hw->aq.asq_last_status);
4756 		sbuf_delete(buf);
4757 		return error;
4758 	}
4759 
4760 	nmbuf = sbuf_new_auto();
4761 	if (!nmbuf) {
4762 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4763 		return (ENOMEM);
4764 	}
4765 
4766 	sbuf_cat(buf, "\n");
4767 	// Assuming <= 255 elements in switch
4768 	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4769 	/* Exclude:
4770 	** Revision -- all elements are revision 1 for now
4771 	*/
4772 	sbuf_printf(buf,
4773 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4774 	    "                |          |          | (uplink)\n");
4775 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4776 		// "%4d (%8s) | %8s   %8s   %#8x",
4777 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4778 		sbuf_cat(buf, " ");
4779 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4780 		sbuf_cat(buf, " | ");
4781 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4782 		sbuf_cat(buf, "   ");
4783 		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4784 		sbuf_cat(buf, "   ");
4785 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4786 		if (i < sw_config->header.num_reported - 1)
4787 			sbuf_cat(buf, "\n");
4788 	}
4789 	sbuf_delete(nmbuf);
4790 
4791 	error = sbuf_finish(buf);
4792 	if (error) {
4793 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4794 		sbuf_delete(buf);
4795 		return error;
4796 	}
4797 
4798 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4799 	if (error)
4800 		device_printf(dev, "sysctl error: %d\n", error);
4801 	sbuf_delete(buf);
4802 
4803 	return (error);
4804 }
4805 
4806 /*
4807 ** Dump TX desc given index.
4808 ** Doesn't work; don't use.
4809 ** TODO: Also needs a queue index input!
4810 **/
4811 static int
4812 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4813 {
4814 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4815 	device_t dev = pf->dev;
4816 	struct sbuf *buf;
4817 	int error = 0;
4818 
4819 	u16 desc_idx = 0;
4820 
4821 	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4822 	if (!buf) {
4823 		device_printf(dev, "Could not allocate sbuf for output.\n");
4824 		return (ENOMEM);
4825 	}
4826 
4827 	/* Read in index */
4828 	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4829 	if (error)
4830 		return (error);
4831 	if (req->newptr == NULL)
4832 		return (EIO); // fix
4833 	if (desc_idx > 1024) { // fix
4834 		device_printf(dev,
4835 		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4836 		return (EINVAL);
4837 	}
4838 
4839 	// Don't use this sysctl yet
4840 	if (TRUE)
4841 		return (ENODEV);
4842 
4843 	sbuf_cat(buf, "\n");
4844 
4845 	// set to queue 1?
4846 	struct ixl_queue *que = pf->vsi.queues;
4847 	struct tx_ring *txr = &(que[1].txr);
4848 	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4849 
4850 	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4851 	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4852 	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4853 
4854 	error = sbuf_finish(buf);
4855 	if (error) {
4856 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4857 		sbuf_delete(buf);
4858 		return error;
4859 	}
4860 
4861 	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4862 	if (error)
4863 		device_printf(dev, "sysctl error: %d\n", error);
4864 	sbuf_delete(buf);
4865 	return error;
4866 }
4867 #endif
4868 
4869