xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 094fc1ed0f2627525c7b0342efcbad5be7a8546a)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 #ifdef DEV_NETMAP
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
52 
53 static int	ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
54 static u64	ixl_max_aq_speed_to_value(u8);
55 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
56 
57 /* Sysctls */
58 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
59 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
60 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
61 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
65 
66 /* Debug Sysctls */
67 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
83 #ifdef IXL_DEBUG
84 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
86 #endif
87 
88 #ifdef IXL_IW
89 extern int ixl_enable_iwarp;
90 #endif
91 
92 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
93     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
94 
95 const char * const ixl_fc_string[6] = {
96 	"None",
97 	"Rx",
98 	"Tx",
99 	"Full",
100 	"Priority",
101 	"Default"
102 };
103 
104 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
105 
106 void
107 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
108 {
109 	va_list args;
110 
111 	if (!(mask & pf->dbg_mask))
112 		return;
113 
114 	/* Re-implement device_printf() */
115 	device_print_prettyname(pf->dev);
116 	va_start(args, fmt);
117 	vprintf(fmt, args);
118 	va_end(args);
119 }
120 
121 /*
122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
123 */
124 void
125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126 {
127 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
130 
131 	sbuf_printf(buf,
132 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
135 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136 	    IXL_NVM_VERSION_HI_SHIFT,
137 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138 	    IXL_NVM_VERSION_LO_SHIFT,
139 	    hw->nvm.eetrack,
140 	    oem_ver, oem_build, oem_patch);
141 }
142 
143 void
144 ixl_print_nvm_version(struct ixl_pf *pf)
145 {
146 	struct i40e_hw *hw = &pf->hw;
147 	device_t dev = pf->dev;
148 	struct sbuf *sbuf;
149 
150 	sbuf = sbuf_new_auto();
151 	ixl_nvm_version_str(hw, sbuf);
152 	sbuf_finish(sbuf);
153 	device_printf(dev, "%s\n", sbuf_data(sbuf));
154 	sbuf_delete(sbuf);
155 }
156 
157 static void
158 ixl_configure_tx_itr(struct ixl_pf *pf)
159 {
160 	struct i40e_hw		*hw = &pf->hw;
161 	struct ixl_vsi		*vsi = &pf->vsi;
162 	struct ixl_queue	*que = vsi->queues;
163 
164 	vsi->tx_itr_setting = pf->tx_itr;
165 
166 	for (int i = 0; i < vsi->num_queues; i++, que++) {
167 		struct tx_ring	*txr = &que->txr;
168 
169 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
170 		    vsi->tx_itr_setting);
171 		txr->itr = vsi->tx_itr_setting;
172 		txr->latency = IXL_AVE_LATENCY;
173 	}
174 }
175 
176 static void
177 ixl_configure_rx_itr(struct ixl_pf *pf)
178 {
179 	struct i40e_hw		*hw = &pf->hw;
180 	struct ixl_vsi		*vsi = &pf->vsi;
181 	struct ixl_queue	*que = vsi->queues;
182 
183 	vsi->rx_itr_setting = pf->rx_itr;
184 
185 	for (int i = 0; i < vsi->num_queues; i++, que++) {
186 		struct rx_ring 	*rxr = &que->rxr;
187 
188 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
189 		    vsi->rx_itr_setting);
190 		rxr->itr = vsi->rx_itr_setting;
191 		rxr->latency = IXL_AVE_LATENCY;
192 	}
193 }
194 
195 /*
196  * Write PF ITR values to queue ITR registers.
197  */
198 void
199 ixl_configure_itr(struct ixl_pf *pf)
200 {
201 	ixl_configure_tx_itr(pf);
202 	ixl_configure_rx_itr(pf);
203 }
204 
205 
206 /*********************************************************************
207  *  Init entry point
208  *
209  *  This routine is used in two ways. It is used by the stack as
210  *  init entry point in network interface structure. It is also used
211  *  by the driver as a hw/sw initialization routine to get to a
212  *  consistent state.
213  *
214  *  return 0 on success, positive on failure
215  **********************************************************************/
216 void
217 ixl_init_locked(struct ixl_pf *pf)
218 {
219 	struct i40e_hw	*hw = &pf->hw;
220 	struct ixl_vsi	*vsi = &pf->vsi;
221 	struct ifnet	*ifp = vsi->ifp;
222 	device_t 	dev = pf->dev;
223 	struct i40e_filter_control_settings	filter;
224 	u8		tmpaddr[ETHER_ADDR_LEN];
225 	int		ret;
226 
227 	INIT_DEBUGOUT("ixl_init_locked: begin");
228 	IXL_PF_LOCK_ASSERT(pf);
229 
230 	ixl_stop_locked(pf);
231 
232 	/*
233 	 * If the aq is dead here, it probably means something outside of the driver
234 	 * did something to the adapter, like a PF reset.
235 	 * So rebuild the driver's state here if that occurs.
236 	 */
237 	if (!i40e_check_asq_alive(&pf->hw)) {
238 		device_printf(dev, "Admin Queue is down; resetting...\n");
239 		ixl_teardown_hw_structs(pf);
240 		ixl_reset(pf);
241 	}
242 
243 	/* Get the latest mac address... User might use a LAA */
244 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
245 	      I40E_ETH_LENGTH_OF_ADDRESS);
246 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
247 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
248 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
249 		bcopy(tmpaddr, hw->mac.addr,
250 		    I40E_ETH_LENGTH_OF_ADDRESS);
251 		ret = i40e_aq_mac_address_write(hw,
252 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
253 		    hw->mac.addr, NULL);
254 		if (ret) {
255 			device_printf(dev, "LLA address"
256 			 "change failed!!\n");
257 			return;
258 		}
259 	}
260 
261 	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
262 
263 	/* Set the various hardware offload abilities */
264 	ifp->if_hwassist = 0;
265 	if (ifp->if_capenable & IFCAP_TSO)
266 		ifp->if_hwassist |= CSUM_TSO;
267 	if (ifp->if_capenable & IFCAP_TXCSUM)
268 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
269 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
270 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
271 
272 	/* Set up the device filtering */
273 	bzero(&filter, sizeof(filter));
274 	filter.enable_ethtype = TRUE;
275 	filter.enable_macvlan = TRUE;
276 	filter.enable_fdir = FALSE;
277 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
278 	if (i40e_set_filter_control(hw, &filter))
279 		device_printf(dev, "i40e_set_filter_control() failed\n");
280 
281 	/* Prepare the VSI: rings, hmc contexts, etc... */
282 	if (ixl_initialize_vsi(vsi)) {
283 		device_printf(dev, "initialize vsi failed!!\n");
284 		return;
285 	}
286 
287 	/* Set up RSS */
288 	ixl_config_rss(pf);
289 
290 	/* Add protocol filters to list */
291 	ixl_init_filters(vsi);
292 
293 	/* Setup vlan's if needed */
294 	ixl_setup_vlan_filters(vsi);
295 
296 	/* Set up MSI/X routing and the ITR settings */
297 	if (pf->msix > 1) {
298 		ixl_configure_queue_intr_msix(pf);
299 		ixl_configure_itr(pf);
300 	} else
301 		ixl_configure_legacy(pf);
302 
303 	ixl_enable_rings(vsi);
304 
305 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
306 
307 	ixl_reconfigure_filters(vsi);
308 
309 	/* And now turn on interrupts */
310 	ixl_enable_intr(vsi);
311 
312 	/* Get link info */
313 	hw->phy.get_link_info = TRUE;
314 	i40e_get_link_status(hw, &pf->link_up);
315 	ixl_update_link_status(pf);
316 
317 	/* Start the local timer */
318 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
319 
320 	/* Now inform the stack we're ready */
321 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
322 
323 #ifdef IXL_IW
324 	if (ixl_enable_iwarp && pf->iw_enabled) {
325 		ret = ixl_iw_pf_init(pf);
326 		if (ret)
327 			device_printf(dev,
328 			    "initialize iwarp failed, code %d\n", ret);
329 	}
330 #endif
331 
332 }
333 
334 
335 /*********************************************************************
336  *
337  *  Get the hardware capabilities
338  *
339  **********************************************************************/
340 
341 int
342 ixl_get_hw_capabilities(struct ixl_pf *pf)
343 {
344 	struct i40e_aqc_list_capabilities_element_resp *buf;
345 	struct i40e_hw	*hw = &pf->hw;
346 	device_t 	dev = pf->dev;
347 	int             error, len;
348 	u16		needed;
349 	bool		again = TRUE;
350 
351 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
352 retry:
353 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
354 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
355 		device_printf(dev, "Unable to allocate cap memory\n");
356                 return (ENOMEM);
357 	}
358 
359 	/* This populates the hw struct */
360         error = i40e_aq_discover_capabilities(hw, buf, len,
361 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
362 	free(buf, M_DEVBUF);
363 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
364 	    (again == TRUE)) {
365 		/* retry once with a larger buffer */
366 		again = FALSE;
367 		len = needed;
368 		goto retry;
369 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
370 		device_printf(dev, "capability discovery failed: %d\n",
371 		    pf->hw.aq.asq_last_status);
372 		return (ENODEV);
373 	}
374 
375 	/* Capture this PF's starting queue pair */
376 	pf->qbase = hw->func_caps.base_queue;
377 
378 #ifdef IXL_DEBUG
379 	device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
380 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
381 	    hw->pf_id, hw->func_caps.num_vfs,
382 	    hw->func_caps.num_msix_vectors,
383 	    hw->func_caps.num_msix_vectors_vf,
384 	    hw->func_caps.fd_filters_guaranteed,
385 	    hw->func_caps.fd_filters_best_effort,
386 	    hw->func_caps.num_tx_qp,
387 	    hw->func_caps.num_rx_qp,
388 	    hw->func_caps.base_queue);
389 #endif
390 	/* Print a subset of the capability information. */
391 	device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
392 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
393 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
394 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
395 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
396 	    "MDIO shared");
397 
398 	struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
399 	osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
400 	if (osdep->i2c_intfc_num != -1)
401 		pf->has_i2c = true;
402 
403 	return (error);
404 }
405 
406 void
407 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
408 {
409 	device_t 	dev = vsi->dev;
410 
411 	/* Enable/disable TXCSUM/TSO4 */
412 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
413 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
414 		if (mask & IFCAP_TXCSUM) {
415 			ifp->if_capenable |= IFCAP_TXCSUM;
416 			/* enable TXCSUM, restore TSO if previously enabled */
417 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
418 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
419 				ifp->if_capenable |= IFCAP_TSO4;
420 			}
421 		}
422 		else if (mask & IFCAP_TSO4) {
423 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
424 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
425 			device_printf(dev,
426 			    "TSO4 requires txcsum, enabling both...\n");
427 		}
428 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
429 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
430 		if (mask & IFCAP_TXCSUM)
431 			ifp->if_capenable &= ~IFCAP_TXCSUM;
432 		else if (mask & IFCAP_TSO4)
433 			ifp->if_capenable |= IFCAP_TSO4;
434 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
435 	    && (ifp->if_capenable & IFCAP_TSO4)) {
436 		if (mask & IFCAP_TXCSUM) {
437 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
438 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
439 			device_printf(dev,
440 			    "TSO4 requires txcsum, disabling both...\n");
441 		} else if (mask & IFCAP_TSO4)
442 			ifp->if_capenable &= ~IFCAP_TSO4;
443 	}
444 
445 	/* Enable/disable TXCSUM_IPV6/TSO6 */
446 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
447 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
448 		if (mask & IFCAP_TXCSUM_IPV6) {
449 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
450 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
451 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
452 				ifp->if_capenable |= IFCAP_TSO6;
453 			}
454 		} else if (mask & IFCAP_TSO6) {
455 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
456 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
457 			device_printf(dev,
458 			    "TSO6 requires txcsum6, enabling both...\n");
459 		}
460 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
461 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
462 		if (mask & IFCAP_TXCSUM_IPV6)
463 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
464 		else if (mask & IFCAP_TSO6)
465 			ifp->if_capenable |= IFCAP_TSO6;
466 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
467 	    && (ifp->if_capenable & IFCAP_TSO6)) {
468 		if (mask & IFCAP_TXCSUM_IPV6) {
469 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
470 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
471 			device_printf(dev,
472 			    "TSO6 requires txcsum6, disabling both...\n");
473 		} else if (mask & IFCAP_TSO6)
474 			ifp->if_capenable &= ~IFCAP_TSO6;
475 	}
476 }
477 
478 /* For the set_advertise sysctl */
479 void
480 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
481 {
482 	struct i40e_hw *hw = &pf->hw;
483 	device_t dev = pf->dev;
484 	enum i40e_status_code status;
485 	struct i40e_aq_get_phy_abilities_resp abilities;
486 
487 	/* Set initial sysctl values */
488 	status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
489 					      NULL);
490 	if (status) {
491 		/* Non-fatal error */
492 		device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
493 		     __func__, status);
494 		return;
495 	}
496 
497 	pf->advertised_speed =
498 	    ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
499 }
500 
501 int
502 ixl_teardown_hw_structs(struct ixl_pf *pf)
503 {
504 	enum i40e_status_code status = 0;
505 	struct i40e_hw *hw = &pf->hw;
506 	device_t dev = pf->dev;
507 
508 	/* Shutdown LAN HMC */
509 	if (hw->hmc.hmc_obj) {
510 		status = i40e_shutdown_lan_hmc(hw);
511 		if (status) {
512 			device_printf(dev,
513 			    "init: LAN HMC shutdown failure; status %d\n", status);
514 			goto err_out;
515 		}
516 	}
517 
518 	// XXX: This gets called when we know the adminq is inactive;
519 	// so we already know it's setup when we get here.
520 
521 	/* Shutdown admin queue */
522 	status = i40e_shutdown_adminq(hw);
523 	if (status)
524 		device_printf(dev,
525 		    "init: Admin Queue shutdown failure; status %d\n", status);
526 
527 err_out:
528 	return (status);
529 }
530 
531 int
532 ixl_reset(struct ixl_pf *pf)
533 {
534 	struct i40e_hw *hw = &pf->hw;
535 	device_t dev = pf->dev;
536 	u8 set_fc_err_mask;
537 	int error = 0;
538 
539 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
540 	i40e_clear_hw(hw);
541 	error = i40e_pf_reset(hw);
542 	if (error) {
543 		device_printf(dev, "init: PF reset failure");
544 		error = EIO;
545 		goto err_out;
546 	}
547 
548 	error = i40e_init_adminq(hw);
549 	if (error) {
550 		device_printf(dev, "init: Admin queue init failure;"
551 		    " status code %d", error);
552 		error = EIO;
553 		goto err_out;
554 	}
555 
556 	i40e_clear_pxe_mode(hw);
557 
558 	error = ixl_get_hw_capabilities(pf);
559 	if (error) {
560 		device_printf(dev, "init: Error retrieving HW capabilities;"
561 		    " status code %d\n", error);
562 		goto err_out;
563 	}
564 
565 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
566 	    hw->func_caps.num_rx_qp, 0, 0);
567 	if (error) {
568 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
569 		    error);
570 		error = EIO;
571 		goto err_out;
572 	}
573 
574 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
575 	if (error) {
576 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
577 		    error);
578 		error = EIO;
579 		goto err_out;
580 	}
581 
582 	// XXX: possible fix for panic, but our failure recovery is still broken
583 	error = ixl_switch_config(pf);
584 	if (error) {
585 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
586 		     error);
587 		goto err_out;
588 	}
589 
590 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
591 	    NULL);
592         if (error) {
593 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
594 		    " aq_err %d\n", error, hw->aq.asq_last_status);
595 		error = EIO;
596 		goto err_out;
597 	}
598 
599 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
600 	if (error) {
601 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
602 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
603 		goto err_out;
604 	}
605 
606 	// XXX: (Rebuild VSIs?)
607 
608 	/* Firmware delay workaround */
609 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
610 	    (hw->aq.fw_maj_ver < 4)) {
611 		i40e_msec_delay(75);
612 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
613 		if (error) {
614 			device_printf(dev, "init: link restart failed, aq_err %d\n",
615 			    hw->aq.asq_last_status);
616 			goto err_out;
617 		}
618 	}
619 
620 
621 err_out:
622 	return (error);
623 }
624 
625 /*
626 ** MSIX Interrupt Handlers and Tasklets
627 */
628 void
629 ixl_handle_que(void *context, int pending)
630 {
631 	struct ixl_queue *que = context;
632 	struct ixl_vsi *vsi = que->vsi;
633 	struct i40e_hw  *hw = vsi->hw;
634 	struct tx_ring  *txr = &que->txr;
635 	struct ifnet    *ifp = vsi->ifp;
636 	bool		more;
637 
638 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
639 		more = ixl_rxeof(que, IXL_RX_LIMIT);
640 		IXL_TX_LOCK(txr);
641 		ixl_txeof(que);
642 		if (!drbr_empty(ifp, txr->br))
643 			ixl_mq_start_locked(ifp, txr);
644 		IXL_TX_UNLOCK(txr);
645 		if (more) {
646 			taskqueue_enqueue(que->tq, &que->task);
647 			return;
648 		}
649 	}
650 
651 	/* Reenable this interrupt - hmmm */
652 	ixl_enable_queue(hw, que->me);
653 	return;
654 }
655 
656 
657 /*********************************************************************
658  *
659  *  Legacy Interrupt Service routine
660  *
661  **********************************************************************/
662 void
663 ixl_intr(void *arg)
664 {
665 	struct ixl_pf		*pf = arg;
666 	struct i40e_hw		*hw =  &pf->hw;
667 	struct ixl_vsi		*vsi = &pf->vsi;
668 	struct ixl_queue	*que = vsi->queues;
669 	struct ifnet		*ifp = vsi->ifp;
670 	struct tx_ring		*txr = &que->txr;
671         u32			icr0;
672 	bool			more_tx, more_rx;
673 
674 	pf->admin_irq++;
675 
676 	/* Protect against spurious interrupts */
677 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
678 		return;
679 
680 	icr0 = rd32(hw, I40E_PFINT_ICR0);
681 
682 
683 #ifdef PCI_IOV
684 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
685 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
686 #endif
687 
688 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
689 		taskqueue_enqueue(pf->tq, &pf->adminq);
690 	}
691 
692 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
693 		++que->irqs;
694 
695 		more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
696 
697 		IXL_TX_LOCK(txr);
698 		more_tx = ixl_txeof(que);
699 		if (!drbr_empty(vsi->ifp, txr->br))
700 			more_tx = 1;
701 		IXL_TX_UNLOCK(txr);
702 	}
703 
704 	ixl_enable_intr0(hw);
705 }
706 
707 
708 /*********************************************************************
709  *
710  *  MSIX VSI Interrupt Service routine
711  *
712  **********************************************************************/
713 void
714 ixl_msix_que(void *arg)
715 {
716 	struct ixl_queue	*que = arg;
717 	struct ixl_vsi	*vsi = que->vsi;
718 	struct i40e_hw	*hw = vsi->hw;
719 	struct tx_ring	*txr = &que->txr;
720 	bool		more_tx, more_rx;
721 
722 	/* Protect against spurious interrupts */
723 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
724 		return;
725 
726 	++que->irqs;
727 
728 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
729 
730 	IXL_TX_LOCK(txr);
731 	more_tx = ixl_txeof(que);
732 	/*
733 	** Make certain that if the stack
734 	** has anything queued the task gets
735 	** scheduled to handle it.
736 	*/
737 	if (!drbr_empty(vsi->ifp, txr->br))
738 		more_tx = 1;
739 	IXL_TX_UNLOCK(txr);
740 
741 	ixl_set_queue_rx_itr(que);
742 	ixl_set_queue_tx_itr(que);
743 
744 	if (more_tx || more_rx)
745 		taskqueue_enqueue(que->tq, &que->task);
746 	else
747 		ixl_enable_queue(hw, que->me);
748 
749 	return;
750 }
751 
752 
753 /*********************************************************************
754  *
755  *  MSIX Admin Queue Interrupt Service routine
756  *
757  **********************************************************************/
758 void
759 ixl_msix_adminq(void *arg)
760 {
761 	struct ixl_pf	*pf = arg;
762 	struct i40e_hw	*hw = &pf->hw;
763 	device_t	dev = pf->dev;
764 	u32		reg, mask, rstat_reg;
765 	bool		do_task = FALSE;
766 
767 	++pf->admin_irq;
768 
769 	reg = rd32(hw, I40E_PFINT_ICR0);
770 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
771 
772 	/* Check on the cause */
773 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
774 		mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
775 		do_task = TRUE;
776 	}
777 
778 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
779 		ixl_handle_mdd_event(pf);
780 		mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
781 	}
782 
783 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
784 		device_printf(dev, "Reset Requested!\n");
785 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
786 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
787 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
788 		device_printf(dev, "Reset type: ");
789 		switch (rstat_reg) {
790 		/* These others might be handled similarly to an EMPR reset */
791 		case I40E_RESET_CORER:
792 			printf("CORER\n");
793 			break;
794 		case I40E_RESET_GLOBR:
795 			printf("GLOBR\n");
796 			break;
797 		case I40E_RESET_EMPR:
798 			printf("EMPR\n");
799 			atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
800 			break;
801 		default:
802 			printf("POR\n");
803 			break;
804 		}
805 		/* overload admin queue task to check reset progress */
806 		do_task = TRUE;
807 	}
808 
809 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
810 		device_printf(dev, "ECC Error detected!\n");
811 	}
812 
813 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
814 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
815 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
816 			device_printf(dev, "HMC Error detected!\n");
817 			device_printf(dev, "INFO 0x%08x\n", reg);
818 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
819 			device_printf(dev, "DATA 0x%08x\n", reg);
820 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
821 		}
822 	}
823 
824 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
825 		device_printf(dev, "PCI Exception detected!\n");
826 	}
827 
828 #ifdef PCI_IOV
829 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
830 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
831 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
832 	}
833 #endif
834 
835 	if (do_task)
836 		taskqueue_enqueue(pf->tq, &pf->adminq);
837 	else
838 		ixl_enable_intr0(hw);
839 }
840 
841 void
842 ixl_set_promisc(struct ixl_vsi *vsi)
843 {
844 	struct ifnet	*ifp = vsi->ifp;
845 	struct i40e_hw	*hw = vsi->hw;
846 	int		err, mcnt = 0;
847 	bool		uni = FALSE, multi = FALSE;
848 
849 	if (ifp->if_flags & IFF_ALLMULTI)
850                 multi = TRUE;
851 	else { /* Need to count the multicast addresses */
852 		struct  ifmultiaddr *ifma;
853 		if_maddr_rlock(ifp);
854 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
855                         if (ifma->ifma_addr->sa_family != AF_LINK)
856                                 continue;
857                         if (mcnt == MAX_MULTICAST_ADDR)
858                                 break;
859                         mcnt++;
860 		}
861 		if_maddr_runlock(ifp);
862 	}
863 
864 	if (mcnt >= MAX_MULTICAST_ADDR)
865                 multi = TRUE;
866         if (ifp->if_flags & IFF_PROMISC)
867 		uni = TRUE;
868 
869 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
870 	    vsi->seid, uni, NULL, TRUE);
871 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
872 	    vsi->seid, multi, NULL);
873 	return;
874 }
875 
876 /*********************************************************************
877  * 	Filter Routines
878  *
879  *	Routines for multicast and vlan filter management.
880  *
881  *********************************************************************/
882 void
883 ixl_add_multi(struct ixl_vsi *vsi)
884 {
885 	struct	ifmultiaddr	*ifma;
886 	struct ifnet		*ifp = vsi->ifp;
887 	struct i40e_hw		*hw = vsi->hw;
888 	int			mcnt = 0, flags;
889 
890 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
891 
892 	if_maddr_rlock(ifp);
893 	/*
894 	** First just get a count, to decide if we
895 	** we simply use multicast promiscuous.
896 	*/
897 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
898 		if (ifma->ifma_addr->sa_family != AF_LINK)
899 			continue;
900 		mcnt++;
901 	}
902 	if_maddr_runlock(ifp);
903 
904 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
905 		/* delete existing MC filters */
906 		ixl_del_hw_filters(vsi, mcnt);
907 		i40e_aq_set_vsi_multicast_promiscuous(hw,
908 		    vsi->seid, TRUE, NULL);
909 		return;
910 	}
911 
912 	mcnt = 0;
913 	if_maddr_rlock(ifp);
914 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
915 		if (ifma->ifma_addr->sa_family != AF_LINK)
916 			continue;
917 		ixl_add_mc_filter(vsi,
918 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
919 		mcnt++;
920 	}
921 	if_maddr_runlock(ifp);
922 	if (mcnt > 0) {
923 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
924 		ixl_add_hw_filters(vsi, flags, mcnt);
925 	}
926 
927 	IOCTL_DEBUGOUT("ixl_add_multi: end");
928 	return;
929 }
930 
931 void
932 ixl_del_multi(struct ixl_vsi *vsi)
933 {
934 	struct ifnet		*ifp = vsi->ifp;
935 	struct ifmultiaddr	*ifma;
936 	struct ixl_mac_filter	*f;
937 	int			mcnt = 0;
938 	bool		match = FALSE;
939 
940 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
941 
942 	/* Search for removed multicast addresses */
943 	if_maddr_rlock(ifp);
944 	SLIST_FOREACH(f, &vsi->ftl, next) {
945 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
946 			match = FALSE;
947 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
948 				if (ifma->ifma_addr->sa_family != AF_LINK)
949 					continue;
950 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
951 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
952 					match = TRUE;
953 					break;
954 				}
955 			}
956 			if (match == FALSE) {
957 				f->flags |= IXL_FILTER_DEL;
958 				mcnt++;
959 			}
960 		}
961 	}
962 	if_maddr_runlock(ifp);
963 
964 	if (mcnt > 0)
965 		ixl_del_hw_filters(vsi, mcnt);
966 }
967 
968 
969 /*********************************************************************
970  *  Timer routine
971  *
972  *  This routine checks for link status,updates statistics,
973  *  and runs the watchdog check.
974  *
975  *  Only runs when the driver is configured UP and RUNNING.
976  *
977  **********************************************************************/
978 
979 void
980 ixl_local_timer(void *arg)
981 {
982 	struct ixl_pf		*pf = arg;
983 	struct i40e_hw		*hw = &pf->hw;
984 	struct ixl_vsi		*vsi = &pf->vsi;
985 	struct ixl_queue	*que = vsi->queues;
986 	device_t		dev = pf->dev;
987 	struct tx_ring		*txr;
988 	int			hung = 0;
989 	u32			mask;
990 	s32			timer, new_timer;
991 
992 	IXL_PF_LOCK_ASSERT(pf);
993 
994 	/* Fire off the adminq task */
995 	taskqueue_enqueue(pf->tq, &pf->adminq);
996 
997 	/* Update stats */
998 	ixl_update_stats_counters(pf);
999 
1000 	/* Check status of the queues */
1001 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1002 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1003 		I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1004 
1005 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1006 		txr = &que->txr;
1007 		timer = atomic_load_acq_32(&txr->watchdog_timer);
1008 		if (timer > 0) {
1009 			new_timer = timer - hz;
1010 			if (new_timer <= 0) {
1011 				atomic_store_rel_32(&txr->watchdog_timer, -1);
1012 				device_printf(dev, "WARNING: queue %d "
1013 				    "appears to be hung!\n", que->me);
1014 				++hung;
1015 			} else {
1016 				/*
1017 				 * If this fails, that means something in the TX path has updated
1018 				 * the watchdog, so it means the TX path is still working and
1019 				 * the watchdog doesn't need to countdown.
1020 				 */
1021 				atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
1022 				/* Any queues with outstanding work get a sw irq */
1023 				wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1024 			}
1025 		}
1026 	}
1027 	/* Reset when a queue shows hung */
1028 	if (hung)
1029 		goto hung;
1030 
1031 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1032 	return;
1033 
1034 hung:
1035 	device_printf(dev, "WARNING: Resetting!\n");
1036 	pf->watchdog_events++;
1037 	ixl_init_locked(pf);
1038 }
1039 
1040 void
1041 ixl_link_up_msg(struct ixl_pf *pf)
1042 {
1043 	struct i40e_hw *hw = &pf->hw;
1044 	struct ifnet *ifp = pf->vsi.ifp;
1045 
1046 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
1047 	    ifp->if_xname,
1048 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1049 	    (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
1050 		"Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
1051 		"Clause 108 RS-FEC" : "None",
1052 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1053 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1054 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1055 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1056 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1057 		ixl_fc_string[1] : ixl_fc_string[0]);
1058 }
1059 
1060 /*
1061 ** Note: this routine updates the OS on the link state
1062 **	the real check of the hardware only happens with
1063 **	a link interrupt.
1064 */
1065 void
1066 ixl_update_link_status(struct ixl_pf *pf)
1067 {
1068 	struct ixl_vsi		*vsi = &pf->vsi;
1069 	struct ifnet		*ifp = vsi->ifp;
1070 	device_t		dev = pf->dev;
1071 
1072 	if (pf->link_up) {
1073 		if (vsi->link_active == FALSE) {
1074 			vsi->link_active = TRUE;
1075 			ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1076 			if_link_state_change(ifp, LINK_STATE_UP);
1077 			ixl_link_up_msg(pf);
1078 		}
1079 	} else { /* Link down */
1080 		if (vsi->link_active == TRUE) {
1081 			if (bootverbose)
1082 				device_printf(dev, "Link is Down\n");
1083 			if_link_state_change(ifp, LINK_STATE_DOWN);
1084 			vsi->link_active = FALSE;
1085 		}
1086 	}
1087 
1088 	return;
1089 }
1090 
1091 /*********************************************************************
1092  *
1093  *  This routine disables all traffic on the adapter by issuing a
1094  *  global reset on the MAC and deallocates TX/RX buffers.
1095  *
1096  **********************************************************************/
1097 
1098 void
1099 ixl_stop_locked(struct ixl_pf *pf)
1100 {
1101 	struct ixl_vsi	*vsi = &pf->vsi;
1102 	struct ifnet	*ifp = vsi->ifp;
1103 
1104 	INIT_DEBUGOUT("ixl_stop: begin\n");
1105 
1106 	IXL_PF_LOCK_ASSERT(pf);
1107 
1108 #ifdef IXL_IW
1109 	/* Stop iWARP device */
1110 	if (ixl_enable_iwarp && pf->iw_enabled)
1111 		ixl_iw_pf_stop(pf);
1112 #endif
1113 
1114 	/* Stop the local timer */
1115 	callout_stop(&pf->timer);
1116 
1117 	ixl_disable_rings_intr(vsi);
1118 	ixl_disable_rings(vsi);
1119 
1120 	/* Tell the stack that the interface is no longer active */
1121 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1122 }
1123 
1124 void
1125 ixl_stop(struct ixl_pf *pf)
1126 {
1127 	IXL_PF_LOCK(pf);
1128 	ixl_stop_locked(pf);
1129 	IXL_PF_UNLOCK(pf);
1130 }
1131 
1132 /*********************************************************************
1133  *
1134  *  Setup MSIX Interrupt resources and handlers for the VSI
1135  *
1136  **********************************************************************/
1137 int
1138 ixl_setup_legacy(struct ixl_pf *pf)
1139 {
1140 	device_t        dev = pf->dev;
1141 	int 		error, rid = 0;
1142 
1143 	if (pf->msix == 1)
1144 		rid = 1;
1145 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1146 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1147 	if (pf->res == NULL) {
1148 		device_printf(dev, "bus_alloc_resource_any() for"
1149 		    " legacy/msi interrupt\n");
1150 		return (ENXIO);
1151 	}
1152 
1153 	/* Set the handler function */
1154 	error = bus_setup_intr(dev, pf->res,
1155 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1156 	    ixl_intr, pf, &pf->tag);
1157 	if (error) {
1158 		pf->res = NULL;
1159 		device_printf(dev, "bus_setup_intr() for legacy/msi"
1160 		    " interrupt handler failed, error %d\n", error);
1161 		return (ENXIO);
1162 	}
1163 	error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1164 	if (error) {
1165 		/* non-fatal */
1166 		device_printf(dev, "bus_describe_intr() for Admin Queue"
1167 		    " interrupt name failed, error %d\n", error);
1168 	}
1169 
1170 	return (0);
1171 }
1172 
1173 int
1174 ixl_setup_adminq_tq(struct ixl_pf *pf)
1175 {
1176 	device_t dev = pf->dev;
1177 	int error = 0;
1178 
1179 	/* Tasklet for Admin Queue interrupts */
1180 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1181 #ifdef PCI_IOV
1182 	/* VFLR Tasklet */
1183 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1184 #endif
1185 	/* Create and start Admin Queue taskqueue */
1186 	pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1187 	    taskqueue_thread_enqueue, &pf->tq);
1188 	if (!pf->tq) {
1189 		device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1190 		return (ENOMEM);
1191 	}
1192 	error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1193 	    device_get_nameunit(dev));
1194 	if (error) {
1195 		device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1196 		    error);
1197 		taskqueue_free(pf->tq);
1198 		return (error);
1199 	}
1200 	return (0);
1201 }
1202 
1203 int
1204 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1205 {
1206 	struct ixl_queue *que = vsi->queues;
1207 	device_t dev = vsi->dev;
1208 #ifdef  RSS
1209 	int		cpu_id = 0;
1210         cpuset_t	cpu_mask;
1211 #endif
1212 
1213 	/* Create queue tasks and start queue taskqueues */
1214 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1215 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1216 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1217 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1218 		    taskqueue_thread_enqueue, &que->tq);
1219 #ifdef RSS
1220 		CPU_SETOF(cpu_id, &cpu_mask);
1221 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1222 		    &cpu_mask, "%s (bucket %d)",
1223 		    device_get_nameunit(dev), cpu_id);
1224 #else
1225 		taskqueue_start_threads(&que->tq, 1, PI_NET,
1226 		    "%s (que %d)", device_get_nameunit(dev), que->me);
1227 #endif
1228 	}
1229 
1230 	return (0);
1231 }
1232 
1233 void
1234 ixl_free_adminq_tq(struct ixl_pf *pf)
1235 {
1236 	if (pf->tq) {
1237 		taskqueue_free(pf->tq);
1238 		pf->tq = NULL;
1239 	}
1240 }
1241 
1242 void
1243 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1244 {
1245 	struct ixl_queue *que = vsi->queues;
1246 
1247 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1248 		if (que->tq) {
1249 			taskqueue_free(que->tq);
1250 			que->tq = NULL;
1251 		}
1252 	}
1253 }
1254 
1255 int
1256 ixl_setup_adminq_msix(struct ixl_pf *pf)
1257 {
1258 	device_t dev = pf->dev;
1259 	int rid, error = 0;
1260 
1261 	/* Admin IRQ rid is 1, vector is 0 */
1262 	rid = 1;
1263 	/* Get interrupt resource from bus */
1264 	pf->res = bus_alloc_resource_any(dev,
1265     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1266 	if (!pf->res) {
1267 		device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1268 		    " interrupt failed [rid=%d]\n", rid);
1269 		return (ENXIO);
1270 	}
1271 	/* Then associate interrupt with handler */
1272 	error = bus_setup_intr(dev, pf->res,
1273 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1274 	    ixl_msix_adminq, pf, &pf->tag);
1275 	if (error) {
1276 		pf->res = NULL;
1277 		device_printf(dev, "bus_setup_intr() for Admin Queue"
1278 		    " interrupt handler failed, error %d\n", error);
1279 		return (ENXIO);
1280 	}
1281 	error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1282 	if (error) {
1283 		/* non-fatal */
1284 		device_printf(dev, "bus_describe_intr() for Admin Queue"
1285 		    " interrupt name failed, error %d\n", error);
1286 	}
1287 	pf->admvec = 0;
1288 
1289 	return (0);
1290 }
1291 
1292 /*
1293  * Allocate interrupt resources from bus and associate an interrupt handler
1294  * to those for the VSI's queues.
1295  */
1296 int
1297 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1298 {
1299 	device_t	dev = vsi->dev;
1300 	struct 		ixl_queue *que = vsi->queues;
1301 	struct		tx_ring	 *txr;
1302 	int 		error, rid, vector = 1;
1303 
1304 	/* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1305 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1306 		int cpu_id = i;
1307 		rid = vector + 1;
1308 		txr = &que->txr;
1309 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1310 		    RF_SHAREABLE | RF_ACTIVE);
1311 		if (!que->res) {
1312 			device_printf(dev, "bus_alloc_resource_any() for"
1313 			    " Queue %d interrupt failed [rid=%d]\n",
1314 			    que->me, rid);
1315 			return (ENXIO);
1316 		}
1317 		/* Set the handler function */
1318 		error = bus_setup_intr(dev, que->res,
1319 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1320 		    ixl_msix_que, que, &que->tag);
1321 		if (error) {
1322 			device_printf(dev, "bus_setup_intr() for Queue %d"
1323 			    " interrupt handler failed, error %d\n",
1324 			    que->me, error);
1325 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1326 			return (error);
1327 		}
1328 		error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1329 		if (error) {
1330 			device_printf(dev, "bus_describe_intr() for Queue %d"
1331 			    " interrupt name failed, error %d\n",
1332 			    que->me, error);
1333 		}
1334 		/* Bind the vector to a CPU */
1335 #ifdef RSS
1336 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1337 #endif
1338 		error = bus_bind_intr(dev, que->res, cpu_id);
1339 		if (error) {
1340 			device_printf(dev, "bus_bind_intr() for Queue %d"
1341 			    " to CPU %d failed, error %d\n",
1342 			    que->me, cpu_id, error);
1343 		}
1344 		que->msix = vector;
1345 	}
1346 
1347 	return (0);
1348 }
1349 
1350 /*
1351  * When used in a virtualized environment PCI BUSMASTER capability may not be set
1352  * so explicity set it here and rewrite the ENABLE in the MSIX control register
1353  * at this point to cause the host to successfully initialize us.
1354  */
1355 void
1356 ixl_set_busmaster(device_t dev)
1357 {
1358 	u16 pci_cmd_word;
1359 
1360 	pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1361 	pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1362 	pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1363 }
1364 
1365 /*
1366  * rewrite the ENABLE in the MSIX control register
1367  * to cause the host to successfully initialize us.
1368  */
1369 void
1370 ixl_set_msix_enable(device_t dev)
1371 {
1372 	int msix_ctrl, rid;
1373 
1374 	pci_find_cap(dev, PCIY_MSIX, &rid);
1375 	rid += PCIR_MSIX_CTRL;
1376 	msix_ctrl = pci_read_config(dev, rid, 2);
1377 	msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1378 	pci_write_config(dev, rid, msix_ctrl, 2);
1379 }
1380 
1381 /*
1382  * Allocate MSI/X vectors from the OS.
1383  * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1384  */
1385 int
1386 ixl_init_msix(struct ixl_pf *pf)
1387 {
1388 	device_t dev = pf->dev;
1389 	struct i40e_hw *hw = &pf->hw;
1390 	int auto_max_queues;
1391 	int rid, want, vectors, queues, available;
1392 #ifdef IXL_IW
1393 	int iw_want, iw_vectors;
1394 
1395 	pf->iw_msix = 0;
1396 #endif
1397 
1398 	/* Override by tuneable */
1399 	if (!pf->enable_msix)
1400 		goto no_msix;
1401 
1402 	/* Ensure proper operation in virtualized environment */
1403 	ixl_set_busmaster(dev);
1404 
1405 	/* First try MSI/X */
1406 	rid = PCIR_BAR(IXL_MSIX_BAR);
1407 	pf->msix_mem = bus_alloc_resource_any(dev,
1408 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1409        	if (!pf->msix_mem) {
1410 		/* May not be enabled */
1411 		device_printf(pf->dev,
1412 		    "Unable to map MSIX table\n");
1413 		goto no_msix;
1414 	}
1415 
1416 	available = pci_msix_count(dev);
1417 	if (available < 2) {
1418 		/* system has msix disabled (0), or only one vector (1) */
1419 		bus_release_resource(dev, SYS_RES_MEMORY,
1420 		    rid, pf->msix_mem);
1421 		pf->msix_mem = NULL;
1422 		goto no_msix;
1423 	}
1424 
1425 	/* Clamp max number of queues based on:
1426 	 * - # of MSI-X vectors available
1427 	 * - # of cpus available
1428 	 * - # of queues that can be assigned to the LAN VSI
1429 	 */
1430 	auto_max_queues = min(mp_ncpus, available - 1);
1431 	if (hw->mac.type == I40E_MAC_X722)
1432 		auto_max_queues = min(auto_max_queues, 128);
1433 	else
1434 		auto_max_queues = min(auto_max_queues, 64);
1435 
1436 	/* Override with tunable value if tunable is less than autoconfig count */
1437 	if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1438 		queues = pf->max_queues;
1439 	/* Use autoconfig amount if that's lower */
1440 	else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1441 		device_printf(dev, "ixl_max_queues (%d) is too large, using "
1442 		    "autoconfig amount (%d)...\n",
1443 		    pf->max_queues, auto_max_queues);
1444 		queues = auto_max_queues;
1445 	}
1446 	/* Limit maximum auto-configured queues to 8 if no user value is set */
1447 	else
1448 		queues = min(auto_max_queues, 8);
1449 
1450 #ifdef  RSS
1451 	/* If we're doing RSS, clamp at the number of RSS buckets */
1452 	if (queues > rss_getnumbuckets())
1453 		queues = rss_getnumbuckets();
1454 #endif
1455 
1456 	/*
1457 	** Want one vector (RX/TX pair) per queue
1458 	** plus an additional for the admin queue.
1459 	*/
1460 	want = queues + 1;
1461 	if (want <= available)	/* Have enough */
1462 		vectors = want;
1463 	else {
1464                	device_printf(pf->dev,
1465 		    "MSIX Configuration Problem, "
1466 		    "%d vectors available but %d wanted!\n",
1467 		    available, want);
1468 		pf->msix_mem = NULL;
1469 		goto no_msix; /* Will go to Legacy setup */
1470 	}
1471 
1472 #ifdef IXL_IW
1473 	if (ixl_enable_iwarp) {
1474 		/* iWARP wants additional vector for CQP */
1475 		iw_want = mp_ncpus + 1;
1476 		available -= vectors;
1477 		if (available > 0) {
1478 			iw_vectors = (available >= iw_want) ?
1479 				iw_want : available;
1480 			vectors += iw_vectors;
1481 		} else
1482 			iw_vectors = 0;
1483 	}
1484 #endif
1485 
1486 	ixl_set_msix_enable(dev);
1487 	if (pci_alloc_msix(dev, &vectors) == 0) {
1488                	device_printf(pf->dev,
1489 		    "Using MSIX interrupts with %d vectors\n", vectors);
1490 		pf->msix = vectors;
1491 #ifdef IXL_IW
1492 		if (ixl_enable_iwarp)
1493 			pf->iw_msix = iw_vectors;
1494 #endif
1495 
1496 		pf->vsi.num_queues = queues;
1497 #ifdef RSS
1498 		/*
1499 		 * If we're doing RSS, the number of queues needs to
1500 		 * match the number of RSS buckets that are configured.
1501 		 *
1502 		 * + If there's more queues than RSS buckets, we'll end
1503 		 *   up with queues that get no traffic.
1504 		 *
1505 		 * + If there's more RSS buckets than queues, we'll end
1506 		 *   up having multiple RSS buckets map to the same queue,
1507 		 *   so there'll be some contention.
1508 		 */
1509 		if (queues != rss_getnumbuckets()) {
1510 			device_printf(dev,
1511 			    "%s: queues (%d) != RSS buckets (%d)"
1512 			    "; performance will be impacted.\n",
1513 			    __func__, queues, rss_getnumbuckets());
1514 		}
1515 #endif
1516 		return (vectors);
1517 	}
1518 no_msix:
1519 	vectors = pci_msi_count(dev);
1520 	pf->vsi.num_queues = 1;
1521 	pf->max_queues = 1;
1522 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1523 		device_printf(pf->dev, "Using an MSI interrupt\n");
1524 	else {
1525 		vectors = 0;
1526 		device_printf(pf->dev, "Using a Legacy interrupt\n");
1527 	}
1528 	return (vectors);
1529 }
1530 
1531 /*
1532  * Configure admin queue/misc interrupt cause registers in hardware.
1533  */
1534 void
1535 ixl_configure_intr0_msix(struct ixl_pf *pf)
1536 {
1537 	struct i40e_hw *hw = &pf->hw;
1538 	u32 reg;
1539 
1540 	/* First set up the adminq - vector 0 */
1541 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
1542 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
1543 
1544 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1545 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
1546 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1547 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1548 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1549 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
1550 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1551 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1552 
1553 	/*
1554 	 * 0x7FF is the end of the queue list.
1555 	 * This means we won't use MSI-X vector 0 for a queue interrupt
1556 	 * in MSIX mode.
1557 	 */
1558 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1559 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1560 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1561 
1562 	wr32(hw, I40E_PFINT_DYN_CTL0,
1563 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1564 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1565 
1566 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1567 }
1568 
1569 /*
1570  * Configure queue interrupt cause registers in hardware.
1571  */
1572 void
1573 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1574 {
1575 	struct i40e_hw	*hw = &pf->hw;
1576 	struct ixl_vsi *vsi = &pf->vsi;
1577 	u32		reg;
1578 	u16		vector = 1;
1579 
1580 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
1581 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1582 		/* First queue type is RX / 0 */
1583 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1584 
1585 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1586 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1587 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1588 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1589 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1590 		wr32(hw, I40E_QINT_RQCTL(i), reg);
1591 
1592 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1593 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1594 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1595 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1596 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1597 		wr32(hw, I40E_QINT_TQCTL(i), reg);
1598 	}
1599 }
1600 
1601 /*
1602  * Configure for MSI single vector operation
1603  */
1604 void
1605 ixl_configure_legacy(struct ixl_pf *pf)
1606 {
1607 	struct i40e_hw	*hw = &pf->hw;
1608 	struct ixl_vsi	*vsi = &pf->vsi;
1609 	struct ixl_queue *que = vsi->queues;
1610 	struct rx_ring 	*rxr = &que->rxr;
1611 	struct tx_ring	*txr = &que->txr;
1612 	u32 reg;
1613 
1614 	/* Configure ITR */
1615 	vsi->tx_itr_setting = pf->tx_itr;
1616 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1617 	    vsi->tx_itr_setting);
1618 	txr->itr = vsi->tx_itr_setting;
1619 
1620 	vsi->rx_itr_setting = pf->rx_itr;
1621 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1622 	    vsi->rx_itr_setting);
1623 	rxr->itr = vsi->rx_itr_setting;
1624 
1625 	/* Setup "other" causes */
1626 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1627 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1628 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
1629 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1630 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
1631 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1632 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1633 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1634 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
1635 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1636 	    ;
1637 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1638 
1639 	/* No ITR for non-queue interrupts */
1640 	wr32(hw, I40E_PFINT_STAT_CTL0,
1641 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1642 
1643 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1644 	wr32(hw, I40E_PFINT_LNKLST0, 0);
1645 
1646 	/* Associate the queue pair to the vector and enable the q int */
1647 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1648 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1649 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1650 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1651 
1652 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1653 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1654 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1655 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1656 }
1657 
1658 int
1659 ixl_allocate_pci_resources(struct ixl_pf *pf)
1660 {
1661 	int             rid;
1662 	struct i40e_hw *hw = &pf->hw;
1663 	device_t        dev = pf->dev;
1664 
1665 	/* Map BAR0 */
1666 	rid = PCIR_BAR(0);
1667 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1668 	    &rid, RF_ACTIVE);
1669 
1670 	if (!(pf->pci_mem)) {
1671 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1672 		return (ENXIO);
1673 	}
1674 
1675 	/* Save off the PCI information */
1676 	hw->vendor_id = pci_get_vendor(dev);
1677 	hw->device_id = pci_get_device(dev);
1678 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1679 	hw->subsystem_vendor_id =
1680 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1681 	hw->subsystem_device_id =
1682 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1683 
1684 	hw->bus.device = pci_get_slot(dev);
1685 	hw->bus.func = pci_get_function(dev);
1686 
1687 	/* Save off register access information */
1688 	pf->osdep.mem_bus_space_tag =
1689 		rman_get_bustag(pf->pci_mem);
1690 	pf->osdep.mem_bus_space_handle =
1691 		rman_get_bushandle(pf->pci_mem);
1692 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1693 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
1694 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1695 
1696 	pf->hw.back = &pf->osdep;
1697 
1698 	return (0);
1699 }
1700 
1701 /*
1702  * Teardown and release the admin queue/misc vector
1703  * interrupt.
1704  */
1705 int
1706 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1707 {
1708 	device_t		dev = pf->dev;
1709 	int			rid, error = 0;
1710 
1711 	if (pf->admvec) /* we are doing MSIX */
1712 		rid = pf->admvec + 1;
1713 	else
1714 		(pf->msix != 0) ? (rid = 1):(rid = 0);
1715 
1716 	if (pf->tag != NULL) {
1717 		bus_teardown_intr(dev, pf->res, pf->tag);
1718 		if (error) {
1719 			device_printf(dev, "bus_teardown_intr() for"
1720 			    " interrupt 0 failed\n");
1721 			// return (ENXIO);
1722 		}
1723 		pf->tag = NULL;
1724 	}
1725 	if (pf->res != NULL) {
1726 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1727 		if (error) {
1728 			device_printf(dev, "bus_release_resource() for"
1729 			    " interrupt 0 failed [rid=%d]\n", rid);
1730 			// return (ENXIO);
1731 		}
1732 		pf->res = NULL;
1733 	}
1734 
1735 	return (0);
1736 }
1737 
1738 int
1739 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1740 {
1741 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
1742 	struct ixl_queue	*que = vsi->queues;
1743 	device_t		dev = vsi->dev;
1744 	int			rid, error = 0;
1745 
1746 	/* We may get here before stations are setup */
1747 	if ((pf->msix < 2) || (que == NULL))
1748 		return (0);
1749 
1750 	/* Release all MSIX queue resources */
1751 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1752 		rid = que->msix + 1;
1753 		if (que->tag != NULL) {
1754 			error = bus_teardown_intr(dev, que->res, que->tag);
1755 			if (error) {
1756 				device_printf(dev, "bus_teardown_intr() for"
1757 				    " Queue %d interrupt failed\n",
1758 				    que->me);
1759 				// return (ENXIO);
1760 			}
1761 			que->tag = NULL;
1762 		}
1763 		if (que->res != NULL) {
1764 			error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1765 			if (error) {
1766 				device_printf(dev, "bus_release_resource() for"
1767 				    " Queue %d interrupt failed [rid=%d]\n",
1768 				    que->me, rid);
1769 				// return (ENXIO);
1770 			}
1771 			que->res = NULL;
1772 		}
1773 	}
1774 
1775 	return (0);
1776 }
1777 
1778 void
1779 ixl_free_pci_resources(struct ixl_pf *pf)
1780 {
1781 	device_t		dev = pf->dev;
1782 	int			memrid;
1783 
1784 	ixl_teardown_queue_msix(&pf->vsi);
1785 	ixl_teardown_adminq_msix(pf);
1786 
1787 	if (pf->msix > 0)
1788 		pci_release_msi(dev);
1789 
1790 	memrid = PCIR_BAR(IXL_MSIX_BAR);
1791 
1792 	if (pf->msix_mem != NULL)
1793 		bus_release_resource(dev, SYS_RES_MEMORY,
1794 		    memrid, pf->msix_mem);
1795 
1796 	if (pf->pci_mem != NULL)
1797 		bus_release_resource(dev, SYS_RES_MEMORY,
1798 		    PCIR_BAR(0), pf->pci_mem);
1799 
1800 	return;
1801 }
1802 
1803 void
1804 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1805 {
1806 	/* Display supported media types */
1807 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1808 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1809 
1810 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1811 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1812 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1813 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1814 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1815 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1816 
1817 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1818 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1819 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1820 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1821 
1822 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1823 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1824 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1825 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1826 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1827 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1828 
1829 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1830 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1831 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1832 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1833 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1834 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1835 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1836 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1837 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1838 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1839 
1840 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1841 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1842 
1843 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1844 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1845 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1846 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1847 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1848 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1849 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1850 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1851 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1852 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1853 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1854 
1855 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1856 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1857 
1858 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1859 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1860 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1861 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1862 
1863 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1864 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1865 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1866 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1867 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1868 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1869 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1870 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
1871 }
1872 
1873 /*********************************************************************
1874  *
1875  *  Setup networking device structure and register an interface.
1876  *
1877  **********************************************************************/
1878 int
1879 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1880 {
1881 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
1882 	struct ifnet		*ifp;
1883 	struct i40e_hw		*hw = vsi->hw;
1884 	struct ixl_queue	*que = vsi->queues;
1885 	struct i40e_aq_get_phy_abilities_resp abilities;
1886 	enum i40e_status_code aq_error = 0;
1887 
1888 	INIT_DEBUGOUT("ixl_setup_interface: begin");
1889 
1890 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
1891 	if (ifp == NULL) {
1892 		device_printf(dev, "can not allocate ifnet structure\n");
1893 		return (-1);
1894 	}
1895 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1896 	ifp->if_mtu = ETHERMTU;
1897 	ifp->if_init = ixl_init;
1898 	ifp->if_softc = vsi;
1899 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1900 	ifp->if_ioctl = ixl_ioctl;
1901 
1902 #if __FreeBSD_version >= 1100036
1903 	if_setgetcounterfn(ifp, ixl_get_counter);
1904 #endif
1905 
1906 	ifp->if_transmit = ixl_mq_start;
1907 
1908 	ifp->if_qflush = ixl_qflush;
1909 
1910 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1911 
1912 	vsi->max_frame_size =
1913 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1914 	    + ETHER_VLAN_ENCAP_LEN;
1915 
1916 	/* Set TSO limits */
1917 	ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1918 	ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1919 	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1920 
1921 	/*
1922 	 * Tell the upper layer(s) we support long frames.
1923 	 */
1924 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1925 
1926 	ifp->if_capabilities |= IFCAP_HWCSUM;
1927 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1928 	ifp->if_capabilities |= IFCAP_TSO;
1929 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1930 	ifp->if_capabilities |= IFCAP_LRO;
1931 
1932 	/* VLAN capabilties */
1933 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1934 			     |  IFCAP_VLAN_HWTSO
1935 			     |  IFCAP_VLAN_MTU
1936 			     |  IFCAP_VLAN_HWCSUM;
1937 	ifp->if_capenable = ifp->if_capabilities;
1938 
1939 	/*
1940 	** Don't turn this on by default, if vlans are
1941 	** created on another pseudo device (eg. lagg)
1942 	** then vlan events are not passed thru, breaking
1943 	** operation, but with HW FILTER off it works. If
1944 	** using vlans directly on the ixl driver you can
1945 	** enable this and get full hardware tag filtering.
1946 	*/
1947 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1948 
1949 	/*
1950 	 * Specify the media types supported by this adapter and register
1951 	 * callbacks to update media and link information
1952 	 */
1953 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1954 		     ixl_media_status);
1955 
1956 	aq_error = i40e_aq_get_phy_capabilities(hw,
1957 	    FALSE, TRUE, &abilities, NULL);
1958 	/* May need delay to detect fiber correctly */
1959 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1960 		i40e_msec_delay(200);
1961 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1962 		    TRUE, &abilities, NULL);
1963 	}
1964 	if (aq_error) {
1965 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1966 			device_printf(dev, "Unknown PHY type detected!\n");
1967 		else
1968 			device_printf(dev,
1969 			    "Error getting supported media types, err %d,"
1970 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1971 		return (0);
1972 	}
1973 	pf->supported_speeds = abilities.link_speed;
1974 	ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1975 
1976 	ixl_add_ifmedia(vsi, hw->phy.phy_types);
1977 
1978 	/* Use autoselect media by default */
1979 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1980 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1981 
1982 	ether_ifattach(ifp, hw->mac.addr);
1983 
1984 	return (0);
1985 }
1986 
1987 /*
1988 ** Run when the Admin Queue gets a link state change interrupt.
1989 */
1990 void
1991 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1992 {
1993 	struct i40e_hw	*hw = &pf->hw;
1994 	device_t dev = pf->dev;
1995 	struct i40e_aqc_get_link_status *status =
1996 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1997 
1998 	/* Request link status from adapter */
1999 	hw->phy.get_link_info = TRUE;
2000 	i40e_get_link_status(hw, &pf->link_up);
2001 
2002 	/* Print out message if an unqualified module is found */
2003 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2004 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2005 	    (!(status->link_info & I40E_AQ_LINK_UP)))
2006 		device_printf(dev, "Link failed because "
2007 		    "an unqualified module was detected!\n");
2008 
2009 	/* Update OS link info */
2010 	ixl_update_link_status(pf);
2011 }
2012 
2013 /*********************************************************************
2014  *
2015  *  Get Firmware Switch configuration
2016  *	- this will need to be more robust when more complex
2017  *	  switch configurations are enabled.
2018  *
2019  **********************************************************************/
2020 int
2021 ixl_switch_config(struct ixl_pf *pf)
2022 {
2023 	struct i40e_hw	*hw = &pf->hw;
2024 	struct ixl_vsi	*vsi = &pf->vsi;
2025 	device_t 	dev = vsi->dev;
2026 	struct i40e_aqc_get_switch_config_resp *sw_config;
2027 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2028 	int	ret;
2029 	u16	next = 0;
2030 
2031 	memset(&aq_buf, 0, sizeof(aq_buf));
2032 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2033 	ret = i40e_aq_get_switch_config(hw, sw_config,
2034 	    sizeof(aq_buf), &next, NULL);
2035 	if (ret) {
2036 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
2037 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2038 		return (ret);
2039 	}
2040 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2041 		device_printf(dev,
2042 		    "Switch config: header reported: %d in structure, %d total\n",
2043 		    sw_config->header.num_reported, sw_config->header.num_total);
2044 		for (int i = 0; i < sw_config->header.num_reported; i++) {
2045 			device_printf(dev,
2046 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2047 			    sw_config->element[i].element_type,
2048 			    sw_config->element[i].seid,
2049 			    sw_config->element[i].uplink_seid,
2050 			    sw_config->element[i].downlink_seid);
2051 		}
2052 	}
2053 	/* Simplified due to a single VSI */
2054 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2055 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2056 	vsi->seid = sw_config->element[0].seid;
2057 	return (ret);
2058 }
2059 
2060 /*********************************************************************
2061  *
2062  *  Initialize the VSI:  this handles contexts, which means things
2063  *  			 like the number of descriptors, buffer size,
2064  *			 plus we init the rings thru this function.
2065  *
2066  **********************************************************************/
2067 int
2068 ixl_initialize_vsi(struct ixl_vsi *vsi)
2069 {
2070 	struct ixl_pf		*pf = vsi->back;
2071 	struct ixl_queue	*que = vsi->queues;
2072 	device_t		dev = vsi->dev;
2073 	struct i40e_hw		*hw = vsi->hw;
2074 	struct i40e_vsi_context	ctxt;
2075 	int 			tc_queues;
2076 	int			err = 0;
2077 
2078 	memset(&ctxt, 0, sizeof(ctxt));
2079 	ctxt.seid = vsi->seid;
2080 	if (pf->veb_seid != 0)
2081 		ctxt.uplink_seid = pf->veb_seid;
2082 	ctxt.pf_num = hw->pf_id;
2083 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2084 	if (err) {
2085 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2086 		    " aq_error %d\n", err, hw->aq.asq_last_status);
2087 		return (err);
2088 	}
2089 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2090 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2091 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2092 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2093 	    ctxt.uplink_seid, ctxt.vsi_number,
2094 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2095 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2096 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2097 	/*
2098 	** Set the queue and traffic class bits
2099 	**  - when multiple traffic classes are supported
2100 	**    this will need to be more robust.
2101 	*/
2102 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2103 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2104 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
2105 	ctxt.info.queue_mapping[0] = 0;
2106 	/*
2107 	 * This VSI will only use traffic class 0; start traffic class 0's
2108 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2109 	 * the driver may not use all of them).
2110 	 */
2111 	tc_queues = bsrl(pf->qtag.num_allocated);
2112 	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2113 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2114 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2115 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2116 
2117 	/* Set VLAN receive stripping mode */
2118 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2119 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2120 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2121 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2122 	else
2123 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2124 
2125 #ifdef IXL_IW
2126 	/* Set TCP Enable for iWARP capable VSI */
2127 	if (ixl_enable_iwarp && pf->iw_enabled) {
2128 		ctxt.info.valid_sections |=
2129 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2130 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2131 	}
2132 #endif
2133 	/* Save VSI number and info for use later */
2134 	vsi->vsi_num = ctxt.vsi_number;
2135 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2136 
2137 	/* Reset VSI statistics */
2138 	ixl_vsi_reset_stats(vsi);
2139 	vsi->hw_filters_add = 0;
2140 	vsi->hw_filters_del = 0;
2141 
2142 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2143 
2144 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2145 	if (err) {
2146 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2147 		    " aq_error %d\n", err, hw->aq.asq_last_status);
2148 		return (err);
2149 	}
2150 
2151 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2152 		struct tx_ring		*txr = &que->txr;
2153 		struct rx_ring 		*rxr = &que->rxr;
2154 		struct i40e_hmc_obj_txq tctx;
2155 		struct i40e_hmc_obj_rxq rctx;
2156 		u32			txctl;
2157 		u16			size;
2158 
2159 		/* Setup the HMC TX Context  */
2160 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2161 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2162 		tctx.new_context = 1;
2163 		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2164 		tctx.qlen = que->num_desc;
2165 		tctx.fc_ena = 0;
2166 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2167 		/* Enable HEAD writeback */
2168 		tctx.head_wb_ena = 1;
2169 		tctx.head_wb_addr = txr->dma.pa +
2170 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2171 		tctx.rdylist_act = 0;
2172 		err = i40e_clear_lan_tx_queue_context(hw, i);
2173 		if (err) {
2174 			device_printf(dev, "Unable to clear TX context\n");
2175 			break;
2176 		}
2177 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2178 		if (err) {
2179 			device_printf(dev, "Unable to set TX context\n");
2180 			break;
2181 		}
2182 		/* Associate the ring with this PF */
2183 		txctl = I40E_QTX_CTL_PF_QUEUE;
2184 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2185 		    I40E_QTX_CTL_PF_INDX_MASK);
2186 		wr32(hw, I40E_QTX_CTL(i), txctl);
2187 		ixl_flush(hw);
2188 
2189 		/* Do ring (re)init */
2190 		ixl_init_tx_ring(que);
2191 
2192 		/* Next setup the HMC RX Context  */
2193 		if (vsi->max_frame_size <= MCLBYTES)
2194 			rxr->mbuf_sz = MCLBYTES;
2195 		else
2196 			rxr->mbuf_sz = MJUMPAGESIZE;
2197 
2198 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2199 
2200 		/* Set up an RX context for the HMC */
2201 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2202 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2203 		/* ignore header split for now */
2204 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2205 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2206 		    vsi->max_frame_size : max_rxmax;
2207 		rctx.dtype = 0;
2208 		rctx.dsize = 1;	/* do 32byte descriptors */
2209 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2210 		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2211 		rctx.qlen = que->num_desc;
2212 		rctx.tphrdesc_ena = 1;
2213 		rctx.tphwdesc_ena = 1;
2214 		rctx.tphdata_ena = 0;
2215 		rctx.tphhead_ena = 0;
2216 		rctx.lrxqthresh = 2;
2217 		rctx.crcstrip = 1;
2218 		rctx.l2tsel = 1;
2219 		rctx.showiv = 1;
2220 		rctx.fc_ena = 0;
2221 		rctx.prefena = 1;
2222 
2223 		err = i40e_clear_lan_rx_queue_context(hw, i);
2224 		if (err) {
2225 			device_printf(dev,
2226 			    "Unable to clear RX context %d\n", i);
2227 			break;
2228 		}
2229 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2230 		if (err) {
2231 			device_printf(dev, "Unable to set RX context %d\n", i);
2232 			break;
2233 		}
2234 		err = ixl_init_rx_ring(que);
2235 		if (err) {
2236 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2237 			break;
2238 		}
2239 #ifdef DEV_NETMAP
2240 		/* preserve queue */
2241 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2242 			struct netmap_adapter *na = NA(vsi->ifp);
2243 			struct netmap_kring *kring = &na->rx_rings[i];
2244 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2245 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2246 		} else
2247 #endif /* DEV_NETMAP */
2248 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2249 	}
2250 	return (err);
2251 }
2252 
2253 
2254 /*********************************************************************
2255  *
2256  *  Free all VSI structs.
2257  *
2258  **********************************************************************/
2259 void
2260 ixl_free_vsi(struct ixl_vsi *vsi)
2261 {
2262 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2263 	struct ixl_queue	*que = vsi->queues;
2264 
2265 	/* Free station queues */
2266 	if (!vsi->queues)
2267 		goto free_filters;
2268 
2269 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2270 		struct tx_ring *txr = &que->txr;
2271 		struct rx_ring *rxr = &que->rxr;
2272 
2273 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2274 			continue;
2275 		IXL_TX_LOCK(txr);
2276 		ixl_free_que_tx(que);
2277 		if (txr->base)
2278 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2279 		IXL_TX_UNLOCK(txr);
2280 		IXL_TX_LOCK_DESTROY(txr);
2281 
2282 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2283 			continue;
2284 		IXL_RX_LOCK(rxr);
2285 		ixl_free_que_rx(que);
2286 		if (rxr->base)
2287 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2288 		IXL_RX_UNLOCK(rxr);
2289 		IXL_RX_LOCK_DESTROY(rxr);
2290 	}
2291 	free(vsi->queues, M_DEVBUF);
2292 
2293 free_filters:
2294 	/* Free VSI filter list */
2295 	ixl_free_mac_filters(vsi);
2296 }
2297 
2298 void
2299 ixl_free_mac_filters(struct ixl_vsi *vsi)
2300 {
2301 	struct ixl_mac_filter *f;
2302 
2303 	while (!SLIST_EMPTY(&vsi->ftl)) {
2304 		f = SLIST_FIRST(&vsi->ftl);
2305 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2306 		free(f, M_DEVBUF);
2307 	}
2308 }
2309 
2310 /*
2311  * Fill out fields in queue struct and setup tx/rx memory and structs
2312  */
2313 static int
2314 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2315 {
2316 	device_t dev = pf->dev;
2317 	struct i40e_hw *hw = &pf->hw;
2318 	struct ixl_vsi *vsi = &pf->vsi;
2319 	struct tx_ring *txr = &que->txr;
2320 	struct rx_ring *rxr = &que->rxr;
2321 	int error = 0;
2322 	int rsize, tsize;
2323 
2324 	que->num_desc = pf->ringsz;
2325 	que->me = index;
2326 	que->vsi = vsi;
2327 
2328 	txr->que = que;
2329 	txr->tail = I40E_QTX_TAIL(que->me);
2330 
2331 	/* Initialize the TX lock */
2332 	snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2333 	    device_get_nameunit(dev), que->me);
2334 	mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2335 	/* Create the TX descriptor ring */
2336 	tsize = roundup2((que->num_desc *
2337 	    sizeof(struct i40e_tx_desc)) +
2338 	    sizeof(u32), DBA_ALIGN);
2339 	if (i40e_allocate_dma_mem(hw,
2340 	    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2341 		device_printf(dev,
2342 		    "Unable to allocate TX Descriptor memory\n");
2343 		error = ENOMEM;
2344 		goto fail;
2345 	}
2346 	txr->base = (struct i40e_tx_desc *)txr->dma.va;
2347 	bzero((void *)txr->base, tsize);
2348 	/* Now allocate transmit soft structs for the ring */
2349 	if (ixl_allocate_tx_data(que)) {
2350 		device_printf(dev,
2351 		    "Critical Failure setting up TX structures\n");
2352 		error = ENOMEM;
2353 		goto fail;
2354 	}
2355 	/* Allocate a buf ring */
2356 	txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2357 	    M_NOWAIT, &txr->mtx);
2358 	if (txr->br == NULL) {
2359 		device_printf(dev,
2360 		    "Critical Failure setting up TX buf ring\n");
2361 		error = ENOMEM;
2362 		goto fail;
2363 	}
2364 
2365 	rsize = roundup2(que->num_desc *
2366 	    sizeof(union i40e_rx_desc), DBA_ALIGN);
2367 	rxr->que = que;
2368 	rxr->tail = I40E_QRX_TAIL(que->me);
2369 
2370 	/* Initialize the RX side lock */
2371 	snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2372 	    device_get_nameunit(dev), que->me);
2373 	mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2374 
2375 	if (i40e_allocate_dma_mem(hw,
2376 	    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2377 		device_printf(dev,
2378 		    "Unable to allocate RX Descriptor memory\n");
2379 		error = ENOMEM;
2380 		goto fail;
2381 	}
2382 	rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2383 	bzero((void *)rxr->base, rsize);
2384 	/* Allocate receive soft structs for the ring*/
2385 	if (ixl_allocate_rx_data(que)) {
2386 		device_printf(dev,
2387 		    "Critical Failure setting up receive structs\n");
2388 		error = ENOMEM;
2389 		goto fail;
2390 	}
2391 
2392 	return (0);
2393 fail:
2394 	if (rxr->base)
2395 		i40e_free_dma_mem(&pf->hw, &rxr->dma);
2396 	if (mtx_initialized(&rxr->mtx))
2397 		mtx_destroy(&rxr->mtx);
2398 	if (txr->br) {
2399 		buf_ring_free(txr->br, M_DEVBUF);
2400 		txr->br = NULL;
2401 	}
2402 	if (txr->base)
2403 		i40e_free_dma_mem(&pf->hw, &txr->dma);
2404 	if (mtx_initialized(&txr->mtx))
2405 		mtx_destroy(&txr->mtx);
2406 
2407 	return (error);
2408 }
2409 
2410 /*********************************************************************
2411  *
2412  *  Allocate memory for the VSI (virtual station interface) and their
2413  *  associated queues, rings and the descriptors associated with each,
2414  *  called only once at attach.
2415  *
2416  **********************************************************************/
2417 int
2418 ixl_setup_stations(struct ixl_pf *pf)
2419 {
2420 	device_t		dev = pf->dev;
2421 	struct ixl_vsi		*vsi;
2422 	struct ixl_queue	*que;
2423 	int			error = 0;
2424 
2425 	vsi = &pf->vsi;
2426 	vsi->back = (void *)pf;
2427 	vsi->hw = &pf->hw;
2428 	vsi->id = 0;
2429 	vsi->num_vlans = 0;
2430 	vsi->back = pf;
2431 
2432 	/* Get memory for the station queues */
2433         if (!(vsi->queues =
2434             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2435             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2436                 device_printf(dev, "Unable to allocate queue memory\n");
2437                 error = ENOMEM;
2438                 return (error);
2439         }
2440 
2441 	/* Then setup each queue */
2442 	for (int i = 0; i < vsi->num_queues; i++) {
2443 		que = &vsi->queues[i];
2444 		error = ixl_setup_queue(que, pf, i);
2445 		if (error)
2446 			return (error);
2447 	}
2448 
2449 	return (0);
2450 }
2451 
2452 /*
2453 ** Provide a update to the queue RX
2454 ** interrupt moderation value.
2455 */
2456 void
2457 ixl_set_queue_rx_itr(struct ixl_queue *que)
2458 {
2459 	struct ixl_vsi	*vsi = que->vsi;
2460 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
2461 	struct i40e_hw	*hw = vsi->hw;
2462 	struct rx_ring	*rxr = &que->rxr;
2463 	u16		rx_itr;
2464 	u16		rx_latency = 0;
2465 	int		rx_bytes;
2466 
2467 	/* Idle, do nothing */
2468 	if (rxr->bytes == 0)
2469 		return;
2470 
2471 	if (pf->dynamic_rx_itr) {
2472 		rx_bytes = rxr->bytes/rxr->itr;
2473 		rx_itr = rxr->itr;
2474 
2475 		/* Adjust latency range */
2476 		switch (rxr->latency) {
2477 		case IXL_LOW_LATENCY:
2478 			if (rx_bytes > 10) {
2479 				rx_latency = IXL_AVE_LATENCY;
2480 				rx_itr = IXL_ITR_20K;
2481 			}
2482 			break;
2483 		case IXL_AVE_LATENCY:
2484 			if (rx_bytes > 20) {
2485 				rx_latency = IXL_BULK_LATENCY;
2486 				rx_itr = IXL_ITR_8K;
2487 			} else if (rx_bytes <= 10) {
2488 				rx_latency = IXL_LOW_LATENCY;
2489 				rx_itr = IXL_ITR_100K;
2490 			}
2491 			break;
2492 		case IXL_BULK_LATENCY:
2493 			if (rx_bytes <= 20) {
2494 				rx_latency = IXL_AVE_LATENCY;
2495 				rx_itr = IXL_ITR_20K;
2496 			}
2497 			break;
2498        		 }
2499 
2500 		rxr->latency = rx_latency;
2501 
2502 		if (rx_itr != rxr->itr) {
2503 			/* do an exponential smoothing */
2504 			rx_itr = (10 * rx_itr * rxr->itr) /
2505 			    ((9 * rx_itr) + rxr->itr);
2506 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
2507 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2508 			    que->me), rxr->itr);
2509 		}
2510 	} else { /* We may have have toggled to non-dynamic */
2511 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2512 			vsi->rx_itr_setting = pf->rx_itr;
2513 		/* Update the hardware if needed */
2514 		if (rxr->itr != vsi->rx_itr_setting) {
2515 			rxr->itr = vsi->rx_itr_setting;
2516 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2517 			    que->me), rxr->itr);
2518 		}
2519 	}
2520 	rxr->bytes = 0;
2521 	rxr->packets = 0;
2522 	return;
2523 }
2524 
2525 
2526 /*
2527 ** Provide a update to the queue TX
2528 ** interrupt moderation value.
2529 */
2530 void
2531 ixl_set_queue_tx_itr(struct ixl_queue *que)
2532 {
2533 	struct ixl_vsi	*vsi = que->vsi;
2534 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
2535 	struct i40e_hw	*hw = vsi->hw;
2536 	struct tx_ring	*txr = &que->txr;
2537 	u16		tx_itr;
2538 	u16		tx_latency = 0;
2539 	int		tx_bytes;
2540 
2541 
2542 	/* Idle, do nothing */
2543 	if (txr->bytes == 0)
2544 		return;
2545 
2546 	if (pf->dynamic_tx_itr) {
2547 		tx_bytes = txr->bytes/txr->itr;
2548 		tx_itr = txr->itr;
2549 
2550 		switch (txr->latency) {
2551 		case IXL_LOW_LATENCY:
2552 			if (tx_bytes > 10) {
2553 				tx_latency = IXL_AVE_LATENCY;
2554 				tx_itr = IXL_ITR_20K;
2555 			}
2556 			break;
2557 		case IXL_AVE_LATENCY:
2558 			if (tx_bytes > 20) {
2559 				tx_latency = IXL_BULK_LATENCY;
2560 				tx_itr = IXL_ITR_8K;
2561 			} else if (tx_bytes <= 10) {
2562 				tx_latency = IXL_LOW_LATENCY;
2563 				tx_itr = IXL_ITR_100K;
2564 			}
2565 			break;
2566 		case IXL_BULK_LATENCY:
2567 			if (tx_bytes <= 20) {
2568 				tx_latency = IXL_AVE_LATENCY;
2569 				tx_itr = IXL_ITR_20K;
2570 			}
2571 			break;
2572 		}
2573 
2574 		txr->latency = tx_latency;
2575 
2576 		if (tx_itr != txr->itr) {
2577        	         /* do an exponential smoothing */
2578 			tx_itr = (10 * tx_itr * txr->itr) /
2579 			    ((9 * tx_itr) + txr->itr);
2580 			txr->itr = min(tx_itr, IXL_MAX_ITR);
2581 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2582 			    que->me), txr->itr);
2583 		}
2584 
2585 	} else { /* We may have have toggled to non-dynamic */
2586 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2587 			vsi->tx_itr_setting = pf->tx_itr;
2588 		/* Update the hardware if needed */
2589 		if (txr->itr != vsi->tx_itr_setting) {
2590 			txr->itr = vsi->tx_itr_setting;
2591 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2592 			    que->me), txr->itr);
2593 		}
2594 	}
2595 	txr->bytes = 0;
2596 	txr->packets = 0;
2597 	return;
2598 }
2599 
2600 void
2601 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2602     struct sysctl_ctx_list *ctx, const char *sysctl_name)
2603 {
2604 	struct sysctl_oid *tree;
2605 	struct sysctl_oid_list *child;
2606 	struct sysctl_oid_list *vsi_list;
2607 
2608 	tree = device_get_sysctl_tree(pf->dev);
2609 	child = SYSCTL_CHILDREN(tree);
2610 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2611 				   CTLFLAG_RD, NULL, "VSI Number");
2612 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2613 
2614 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2615 }
2616 
2617 #ifdef IXL_DEBUG
2618 /**
2619  * ixl_sysctl_qtx_tail_handler
2620  * Retrieves I40E_QTX_TAIL value from hardware
2621  * for a sysctl.
2622  */
2623 static int
2624 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2625 {
2626 	struct ixl_queue *que;
2627 	int error;
2628 	u32 val;
2629 
2630 	que = ((struct ixl_queue *)oidp->oid_arg1);
2631 	if (!que) return 0;
2632 
2633 	val = rd32(que->vsi->hw, que->txr.tail);
2634 	error = sysctl_handle_int(oidp, &val, 0, req);
2635 	if (error || !req->newptr)
2636 		return error;
2637 	return (0);
2638 }
2639 
2640 /**
2641  * ixl_sysctl_qrx_tail_handler
2642  * Retrieves I40E_QRX_TAIL value from hardware
2643  * for a sysctl.
2644  */
2645 static int
2646 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2647 {
2648 	struct ixl_queue *que;
2649 	int error;
2650 	u32 val;
2651 
2652 	que = ((struct ixl_queue *)oidp->oid_arg1);
2653 	if (!que) return 0;
2654 
2655 	val = rd32(que->vsi->hw, que->rxr.tail);
2656 	error = sysctl_handle_int(oidp, &val, 0, req);
2657 	if (error || !req->newptr)
2658 		return error;
2659 	return (0);
2660 }
2661 #endif
2662 
2663 /*
2664  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2665  * Writes to the ITR registers immediately.
2666  */
2667 static int
2668 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2669 {
2670 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2671 	device_t dev = pf->dev;
2672 	int error = 0;
2673 	int requested_tx_itr;
2674 
2675 	requested_tx_itr = pf->tx_itr;
2676 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2677 	if ((error) || (req->newptr == NULL))
2678 		return (error);
2679 	if (pf->dynamic_tx_itr) {
2680 		device_printf(dev,
2681 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
2682 		    return (EINVAL);
2683 	}
2684 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2685 		device_printf(dev,
2686 		    "Invalid TX itr value; value must be between 0 and %d\n",
2687 		        IXL_MAX_ITR);
2688 		return (EINVAL);
2689 	}
2690 
2691 	pf->tx_itr = requested_tx_itr;
2692 	ixl_configure_tx_itr(pf);
2693 
2694 	return (error);
2695 }
2696 
2697 /*
2698  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2699  * Writes to the ITR registers immediately.
2700  */
2701 static int
2702 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2703 {
2704 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2705 	device_t dev = pf->dev;
2706 	int error = 0;
2707 	int requested_rx_itr;
2708 
2709 	requested_rx_itr = pf->rx_itr;
2710 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2711 	if ((error) || (req->newptr == NULL))
2712 		return (error);
2713 	if (pf->dynamic_rx_itr) {
2714 		device_printf(dev,
2715 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
2716 		    return (EINVAL);
2717 	}
2718 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2719 		device_printf(dev,
2720 		    "Invalid RX itr value; value must be between 0 and %d\n",
2721 		        IXL_MAX_ITR);
2722 		return (EINVAL);
2723 	}
2724 
2725 	pf->rx_itr = requested_rx_itr;
2726 	ixl_configure_rx_itr(pf);
2727 
2728 	return (error);
2729 }
2730 
2731 void
2732 ixl_add_hw_stats(struct ixl_pf *pf)
2733 {
2734 	device_t dev = pf->dev;
2735 	struct ixl_vsi *vsi = &pf->vsi;
2736 	struct ixl_queue *queues = vsi->queues;
2737 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2738 
2739 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2740 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2741 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2742 	struct sysctl_oid_list *vsi_list;
2743 
2744 	struct sysctl_oid *queue_node;
2745 	struct sysctl_oid_list *queue_list;
2746 
2747 	struct tx_ring *txr;
2748 	struct rx_ring *rxr;
2749 	char queue_namebuf[QUEUE_NAME_LEN];
2750 
2751 	/* Driver statistics */
2752 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2753 			CTLFLAG_RD, &pf->watchdog_events,
2754 			"Watchdog timeouts");
2755 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2756 			CTLFLAG_RD, &pf->admin_irq,
2757 			"Admin Queue IRQ Handled");
2758 
2759 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2760 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2761 
2762 	/* Queue statistics */
2763 	for (int q = 0; q < vsi->num_queues; q++) {
2764 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2765 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2766 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2767 		queue_list = SYSCTL_CHILDREN(queue_node);
2768 
2769 		txr = &(queues[q].txr);
2770 		rxr = &(queues[q].rxr);
2771 
2772 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2773 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2774 				"m_defrag() failed");
2775 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2776 				CTLFLAG_RD, &(queues[q].irqs),
2777 				"irqs on this queue");
2778 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2779 				CTLFLAG_RD, &(queues[q].tso),
2780 				"TSO");
2781 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2782 				CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2783 				"Driver tx dma failure in xmit");
2784 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2785 				CTLFLAG_RD, &(queues[q].mss_too_small),
2786 				"TSO sends with an MSS less than 64");
2787 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2788 				CTLFLAG_RD, &(txr->no_desc),
2789 				"Queue No Descriptor Available");
2790 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2791 				CTLFLAG_RD, &(txr->total_packets),
2792 				"Queue Packets Transmitted");
2793 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2794 				CTLFLAG_RD, &(txr->tx_bytes),
2795 				"Queue Bytes Transmitted");
2796 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2797 				CTLFLAG_RD, &(rxr->rx_packets),
2798 				"Queue Packets Received");
2799 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2800 				CTLFLAG_RD, &(rxr->rx_bytes),
2801 				"Queue Bytes Received");
2802 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2803 				CTLFLAG_RD, &(rxr->desc_errs),
2804 				"Queue Rx Descriptor Errors");
2805 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2806 				CTLFLAG_RD, &(rxr->itr), 0,
2807 				"Queue Rx ITR Interval");
2808 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2809 				CTLFLAG_RD, &(txr->itr), 0,
2810 				"Queue Tx ITR Interval");
2811 #ifdef IXL_DEBUG
2812 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2813 				CTLFLAG_RD, &(rxr->not_done),
2814 				"Queue Rx Descriptors not Done");
2815 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2816 				CTLFLAG_RD, &(rxr->next_refresh), 0,
2817 				"Queue Rx Descriptors not Done");
2818 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2819 				CTLFLAG_RD, &(rxr->next_check), 0,
2820 				"Queue Rx Descriptors not Done");
2821 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2822 				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2823 				sizeof(struct ixl_queue),
2824 				ixl_sysctl_qtx_tail_handler, "IU",
2825 				"Queue Transmit Descriptor Tail");
2826 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2827 				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2828 				sizeof(struct ixl_queue),
2829 				ixl_sysctl_qrx_tail_handler, "IU",
2830 				"Queue Receive Descriptor Tail");
2831 #endif
2832 	}
2833 
2834 	/* MAC stats */
2835 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2836 }
2837 
2838 void
2839 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2840 	struct sysctl_oid_list *child,
2841 	struct i40e_eth_stats *eth_stats)
2842 {
2843 	struct ixl_sysctl_info ctls[] =
2844 	{
2845 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2846 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2847 			"Unicast Packets Received"},
2848 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2849 			"Multicast Packets Received"},
2850 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2851 			"Broadcast Packets Received"},
2852 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2853 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2854 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2855 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
2856 			"Multicast Packets Transmitted"},
2857 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
2858 			"Broadcast Packets Transmitted"},
2859 		// end
2860 		{0,0,0}
2861 	};
2862 
2863 	struct ixl_sysctl_info *entry = ctls;
2864 	while (entry->stat != 0)
2865 	{
2866 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2867 				CTLFLAG_RD, entry->stat,
2868 				entry->description);
2869 		entry++;
2870 	}
2871 }
2872 
2873 void
2874 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2875 	struct sysctl_oid_list *child,
2876 	struct i40e_hw_port_stats *stats)
2877 {
2878 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2879 				    CTLFLAG_RD, NULL, "Mac Statistics");
2880 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2881 
2882 	struct i40e_eth_stats *eth_stats = &stats->eth;
2883 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2884 
2885 	struct ixl_sysctl_info ctls[] =
2886 	{
2887 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
2888 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2889 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2890 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2891 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2892 		/* Packet Reception Stats */
2893 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2894 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2895 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2896 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2897 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2898 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2899 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2900 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2901 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2902 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2903 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2904 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2905 		/* Packet Transmission Stats */
2906 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2907 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2908 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2909 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2910 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2911 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2912 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2913 		/* Flow control */
2914 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2915 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2916 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2917 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2918 		/* End */
2919 		{0,0,0}
2920 	};
2921 
2922 	struct ixl_sysctl_info *entry = ctls;
2923 	while (entry->stat != 0)
2924 	{
2925 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2926 				CTLFLAG_RD, entry->stat,
2927 				entry->description);
2928 		entry++;
2929 	}
2930 }
2931 
2932 void
2933 ixl_set_rss_key(struct ixl_pf *pf)
2934 {
2935 	struct i40e_hw *hw = &pf->hw;
2936 	struct ixl_vsi *vsi = &pf->vsi;
2937 	device_t	dev = pf->dev;
2938 	enum i40e_status_code status;
2939 #ifdef RSS
2940 	u32		rss_seed[IXL_RSS_KEY_SIZE_REG];
2941 #else
2942 	u32             rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2943 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
2944 			    0x35897377, 0x328b25e1, 0x4fa98922,
2945 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2946 			    0x0, 0x0, 0x0};
2947 #endif
2948 
2949 #ifdef RSS
2950         /* Fetch the configured RSS key */
2951         rss_getkey((uint8_t *) &rss_seed);
2952 #endif
2953 	/* Fill out hash function seed */
2954 	if (hw->mac.type == I40E_MAC_X722) {
2955 		struct i40e_aqc_get_set_rss_key_data key_data;
2956 		bcopy(rss_seed, key_data.standard_rss_key, 40);
2957 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2958 		if (status)
2959 			device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2960 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2961 	} else {
2962 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2963 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2964 	}
2965 }
2966 
2967 /*
2968  * Configure enabled PCTYPES for RSS.
2969  */
2970 void
2971 ixl_set_rss_pctypes(struct ixl_pf *pf)
2972 {
2973 	struct i40e_hw *hw = &pf->hw;
2974 	u64		set_hena = 0, hena;
2975 
2976 #ifdef RSS
2977 	u32		rss_hash_config;
2978 
2979 	rss_hash_config = rss_gethashconfig();
2980 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2981                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2982 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2983                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2984 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2985                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2986 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2987                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2988 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2989 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2990 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2991                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2992         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2993                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2994 #else
2995 	if (hw->mac.type == I40E_MAC_X722)
2996 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
2997 	else
2998 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2999 #endif
3000 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3001 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3002 	hena |= set_hena;
3003 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
3004 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3005 
3006 }
3007 
3008 void
3009 ixl_set_rss_hlut(struct ixl_pf *pf)
3010 {
3011 	struct i40e_hw	*hw = &pf->hw;
3012 	device_t	dev = pf->dev;
3013 	struct ixl_vsi *vsi = &pf->vsi;
3014 	int		i, que_id;
3015 	int		lut_entry_width;
3016 	u32		lut = 0;
3017 	enum i40e_status_code status;
3018 
3019 	if (hw->mac.type == I40E_MAC_X722)
3020 		lut_entry_width = 7;
3021 	else
3022 		lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3023 
3024 	/* Populate the LUT with max no. of queues in round robin fashion */
3025 	u8 hlut_buf[512];
3026 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3027 #ifdef RSS
3028 		/*
3029 		 * Fetch the RSS bucket id for the given indirection entry.
3030 		 * Cap it at the number of configured buckets (which is
3031 		 * num_queues.)
3032 		 */
3033 		que_id = rss_get_indirection_to_bucket(i);
3034 		que_id = que_id % vsi->num_queues;
3035 #else
3036 		que_id = i % vsi->num_queues;
3037 #endif
3038 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
3039 		hlut_buf[i] = lut;
3040 	}
3041 
3042 	if (hw->mac.type == I40E_MAC_X722) {
3043 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3044 		if (status)
3045 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3046 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3047 	} else {
3048 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3049 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3050 		ixl_flush(hw);
3051 	}
3052 }
3053 
3054 /*
3055 ** Setup the PF's RSS parameters.
3056 */
3057 void
3058 ixl_config_rss(struct ixl_pf *pf)
3059 {
3060 	ixl_set_rss_key(pf);
3061 	ixl_set_rss_pctypes(pf);
3062 	ixl_set_rss_hlut(pf);
3063 }
3064 
3065 /*
3066 ** This routine is run via an vlan config EVENT,
3067 ** it enables us to use the HW Filter table since
3068 ** we can get the vlan id. This just creates the
3069 ** entry in the soft version of the VFTA, init will
3070 ** repopulate the real table.
3071 */
3072 void
3073 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3074 {
3075 	struct ixl_vsi	*vsi = ifp->if_softc;
3076 	struct i40e_hw	*hw = vsi->hw;
3077 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3078 
3079 	if (ifp->if_softc !=  arg)   /* Not our event */
3080 		return;
3081 
3082 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3083 		return;
3084 
3085 	IXL_PF_LOCK(pf);
3086 	++vsi->num_vlans;
3087 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3088 	IXL_PF_UNLOCK(pf);
3089 }
3090 
3091 /*
3092 ** This routine is run via an vlan
3093 ** unconfig EVENT, remove our entry
3094 ** in the soft vfta.
3095 */
3096 void
3097 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3098 {
3099 	struct ixl_vsi	*vsi = ifp->if_softc;
3100 	struct i40e_hw	*hw = vsi->hw;
3101 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3102 
3103 	if (ifp->if_softc !=  arg)
3104 		return;
3105 
3106 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3107 		return;
3108 
3109 	IXL_PF_LOCK(pf);
3110 	--vsi->num_vlans;
3111 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3112 	IXL_PF_UNLOCK(pf);
3113 }
3114 
3115 /*
3116 ** This routine updates vlan filters, called by init
3117 ** it scans the filter table and then updates the hw
3118 ** after a soft reset.
3119 */
3120 void
3121 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3122 {
3123 	struct ixl_mac_filter	*f;
3124 	int			cnt = 0, flags;
3125 
3126 	if (vsi->num_vlans == 0)
3127 		return;
3128 	/*
3129 	** Scan the filter list for vlan entries,
3130 	** mark them for addition and then call
3131 	** for the AQ update.
3132 	*/
3133 	SLIST_FOREACH(f, &vsi->ftl, next) {
3134 		if (f->flags & IXL_FILTER_VLAN) {
3135 			f->flags |=
3136 			    (IXL_FILTER_ADD |
3137 			    IXL_FILTER_USED);
3138 			cnt++;
3139 		}
3140 	}
3141 	if (cnt == 0) {
3142 		printf("setup vlan: no filters found!\n");
3143 		return;
3144 	}
3145 	flags = IXL_FILTER_VLAN;
3146 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3147 	ixl_add_hw_filters(vsi, flags, cnt);
3148 	return;
3149 }
3150 
3151 /*
3152 ** Initialize filter list and add filters that the hardware
3153 ** needs to know about.
3154 **
3155 ** Requires VSI's filter list & seid to be set before calling.
3156 */
3157 void
3158 ixl_init_filters(struct ixl_vsi *vsi)
3159 {
3160 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3161 
3162 	/* Add broadcast address */
3163 	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3164 
3165 	/*
3166 	 * Prevent Tx flow control frames from being sent out by
3167 	 * non-firmware transmitters.
3168 	 * This affects every VSI in the PF.
3169 	 */
3170 	if (pf->enable_tx_fc_filter)
3171 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3172 }
3173 
3174 /*
3175 ** This routine adds mulicast filters
3176 */
3177 void
3178 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3179 {
3180 	struct ixl_mac_filter *f;
3181 
3182 	/* Does one already exist */
3183 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3184 	if (f != NULL)
3185 		return;
3186 
3187 	f = ixl_get_filter(vsi);
3188 	if (f == NULL) {
3189 		printf("WARNING: no filter available!!\n");
3190 		return;
3191 	}
3192 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3193 	f->vlan = IXL_VLAN_ANY;
3194 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3195 	    | IXL_FILTER_MC);
3196 
3197 	return;
3198 }
3199 
3200 void
3201 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3202 {
3203 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3204 }
3205 
3206 /*
3207 ** This routine adds macvlan filters
3208 */
3209 void
3210 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3211 {
3212 	struct ixl_mac_filter	*f, *tmp;
3213 	struct ixl_pf		*pf;
3214 	device_t		dev;
3215 
3216 	DEBUGOUT("ixl_add_filter: begin");
3217 
3218 	pf = vsi->back;
3219 	dev = pf->dev;
3220 
3221 	/* Does one already exist */
3222 	f = ixl_find_filter(vsi, macaddr, vlan);
3223 	if (f != NULL)
3224 		return;
3225 	/*
3226 	** Is this the first vlan being registered, if so we
3227 	** need to remove the ANY filter that indicates we are
3228 	** not in a vlan, and replace that with a 0 filter.
3229 	*/
3230 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3231 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3232 		if (tmp != NULL) {
3233 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3234 			ixl_add_filter(vsi, macaddr, 0);
3235 		}
3236 	}
3237 
3238 	f = ixl_get_filter(vsi);
3239 	if (f == NULL) {
3240 		device_printf(dev, "WARNING: no filter available!!\n");
3241 		return;
3242 	}
3243 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3244 	f->vlan = vlan;
3245 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3246 	if (f->vlan != IXL_VLAN_ANY)
3247 		f->flags |= IXL_FILTER_VLAN;
3248 	else
3249 		vsi->num_macs++;
3250 
3251 	ixl_add_hw_filters(vsi, f->flags, 1);
3252 	return;
3253 }
3254 
3255 void
3256 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3257 {
3258 	struct ixl_mac_filter *f;
3259 
3260 	f = ixl_find_filter(vsi, macaddr, vlan);
3261 	if (f == NULL)
3262 		return;
3263 
3264 	f->flags |= IXL_FILTER_DEL;
3265 	ixl_del_hw_filters(vsi, 1);
3266 	vsi->num_macs--;
3267 
3268 	/* Check if this is the last vlan removal */
3269 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3270 		/* Switch back to a non-vlan filter */
3271 		ixl_del_filter(vsi, macaddr, 0);
3272 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3273 	}
3274 	return;
3275 }
3276 
3277 /*
3278 ** Find the filter with both matching mac addr and vlan id
3279 */
3280 struct ixl_mac_filter *
3281 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3282 {
3283 	struct ixl_mac_filter	*f;
3284 	bool			match = FALSE;
3285 
3286 	SLIST_FOREACH(f, &vsi->ftl, next) {
3287 		if (!cmp_etheraddr(f->macaddr, macaddr))
3288 			continue;
3289 		if (f->vlan == vlan) {
3290 			match = TRUE;
3291 			break;
3292 		}
3293 	}
3294 
3295 	if (!match)
3296 		f = NULL;
3297 	return (f);
3298 }
3299 
3300 /*
3301 ** This routine takes additions to the vsi filter
3302 ** table and creates an Admin Queue call to create
3303 ** the filters in the hardware.
3304 */
3305 void
3306 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3307 {
3308 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3309 	struct ixl_mac_filter	*f;
3310 	struct ixl_pf		*pf;
3311 	struct i40e_hw		*hw;
3312 	device_t		dev;
3313 	int			err, j = 0;
3314 
3315 	pf = vsi->back;
3316 	dev = pf->dev;
3317 	hw = &pf->hw;
3318 	IXL_PF_LOCK_ASSERT(pf);
3319 
3320 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3321 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3322 	if (a == NULL) {
3323 		device_printf(dev, "add_hw_filters failed to get memory\n");
3324 		return;
3325 	}
3326 
3327 	/*
3328 	** Scan the filter list, each time we find one
3329 	** we add it to the admin queue array and turn off
3330 	** the add bit.
3331 	*/
3332 	SLIST_FOREACH(f, &vsi->ftl, next) {
3333 		if (f->flags == flags) {
3334 			b = &a[j]; // a pox on fvl long names :)
3335 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3336 			if (f->vlan == IXL_VLAN_ANY) {
3337 				b->vlan_tag = 0;
3338 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3339 			} else {
3340 				b->vlan_tag = f->vlan;
3341 				b->flags = 0;
3342 			}
3343 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3344 			f->flags &= ~IXL_FILTER_ADD;
3345 			j++;
3346 		}
3347 		if (j == cnt)
3348 			break;
3349 	}
3350 	if (j > 0) {
3351 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3352 		if (err)
3353 			device_printf(dev, "aq_add_macvlan err %d, "
3354 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3355 		else
3356 			vsi->hw_filters_add += j;
3357 	}
3358 	free(a, M_DEVBUF);
3359 	return;
3360 }
3361 
3362 /*
3363 ** This routine takes removals in the vsi filter
3364 ** table and creates an Admin Queue call to delete
3365 ** the filters in the hardware.
3366 */
3367 void
3368 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3369 {
3370 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3371 	struct ixl_pf		*pf;
3372 	struct i40e_hw		*hw;
3373 	device_t		dev;
3374 	struct ixl_mac_filter	*f, *f_temp;
3375 	int			err, j = 0;
3376 
3377 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3378 
3379 	pf = vsi->back;
3380 	hw = &pf->hw;
3381 	dev = pf->dev;
3382 
3383 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3384 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3385 	if (d == NULL) {
3386 		printf("del hw filter failed to get memory\n");
3387 		return;
3388 	}
3389 
3390 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3391 		if (f->flags & IXL_FILTER_DEL) {
3392 			e = &d[j]; // a pox on fvl long names :)
3393 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3394 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3395 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3396 			/* delete entry from vsi list */
3397 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3398 			free(f, M_DEVBUF);
3399 			j++;
3400 		}
3401 		if (j == cnt)
3402 			break;
3403 	}
3404 	if (j > 0) {
3405 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3406 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3407 			int sc = 0;
3408 			for (int i = 0; i < j; i++)
3409 				sc += (!d[i].error_code);
3410 			vsi->hw_filters_del += sc;
3411 			device_printf(dev,
3412 			    "Failed to remove %d/%d filters, aq error %d\n",
3413 			    j - sc, j, hw->aq.asq_last_status);
3414 		} else
3415 			vsi->hw_filters_del += j;
3416 	}
3417 	free(d, M_DEVBUF);
3418 
3419 	DEBUGOUT("ixl_del_hw_filters: end\n");
3420 	return;
3421 }
3422 
3423 int
3424 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3425 {
3426 	struct i40e_hw	*hw = &pf->hw;
3427 	int		error = 0;
3428 	u32		reg;
3429 	u16		pf_qidx;
3430 
3431 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3432 
3433 	ixl_dbg(pf, IXL_DBG_EN_DIS,
3434 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3435 	    pf_qidx, vsi_qidx);
3436 
3437 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3438 
3439 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3440 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3441 	    I40E_QTX_ENA_QENA_STAT_MASK;
3442 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3443 	/* Verify the enable took */
3444 	for (int j = 0; j < 10; j++) {
3445 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3446 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3447 			break;
3448 		i40e_msec_delay(10);
3449 	}
3450 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3451 		device_printf(pf->dev, "TX queue %d still disabled!\n",
3452 		    pf_qidx);
3453 		error = ETIMEDOUT;
3454 	}
3455 
3456 	return (error);
3457 }
3458 
3459 int
3460 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3461 {
3462 	struct i40e_hw	*hw = &pf->hw;
3463 	int		error = 0;
3464 	u32		reg;
3465 	u16		pf_qidx;
3466 
3467 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3468 
3469 	ixl_dbg(pf, IXL_DBG_EN_DIS,
3470 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3471 	    pf_qidx, vsi_qidx);
3472 
3473 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3474 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3475 	    I40E_QRX_ENA_QENA_STAT_MASK;
3476 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3477 	/* Verify the enable took */
3478 	for (int j = 0; j < 10; j++) {
3479 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3480 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3481 			break;
3482 		i40e_msec_delay(10);
3483 	}
3484 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3485 		device_printf(pf->dev, "RX queue %d still disabled!\n",
3486 		    pf_qidx);
3487 		error = ETIMEDOUT;
3488 	}
3489 
3490 	return (error);
3491 }
3492 
3493 int
3494 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3495 {
3496 	int error = 0;
3497 
3498 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3499 	/* Called function already prints error message */
3500 	if (error)
3501 		return (error);
3502 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3503 	return (error);
3504 }
3505 
3506 /* For PF VSI only */
3507 int
3508 ixl_enable_rings(struct ixl_vsi *vsi)
3509 {
3510 	struct ixl_pf	*pf = vsi->back;
3511 	int		error = 0;
3512 
3513 	for (int i = 0; i < vsi->num_queues; i++) {
3514 		error = ixl_enable_ring(pf, &pf->qtag, i);
3515 		if (error)
3516 			return (error);
3517 	}
3518 
3519 	return (error);
3520 }
3521 
3522 int
3523 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3524 {
3525 	struct i40e_hw	*hw = &pf->hw;
3526 	int		error = 0;
3527 	u32		reg;
3528 	u16		pf_qidx;
3529 
3530 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3531 
3532 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3533 	i40e_usec_delay(500);
3534 
3535 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3536 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3537 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3538 	/* Verify the disable took */
3539 	for (int j = 0; j < 10; j++) {
3540 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3541 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3542 			break;
3543 		i40e_msec_delay(10);
3544 	}
3545 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3546 		device_printf(pf->dev, "TX queue %d still enabled!\n",
3547 		    pf_qidx);
3548 		error = ETIMEDOUT;
3549 	}
3550 
3551 	return (error);
3552 }
3553 
3554 int
3555 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3556 {
3557 	struct i40e_hw	*hw = &pf->hw;
3558 	int		error = 0;
3559 	u32		reg;
3560 	u16		pf_qidx;
3561 
3562 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3563 
3564 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3565 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3566 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3567 	/* Verify the disable took */
3568 	for (int j = 0; j < 10; j++) {
3569 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3570 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3571 			break;
3572 		i40e_msec_delay(10);
3573 	}
3574 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3575 		device_printf(pf->dev, "RX queue %d still enabled!\n",
3576 		    pf_qidx);
3577 		error = ETIMEDOUT;
3578 	}
3579 
3580 	return (error);
3581 }
3582 
3583 int
3584 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3585 {
3586 	int error = 0;
3587 
3588 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3589 	/* Called function already prints error message */
3590 	if (error)
3591 		return (error);
3592 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3593 	return (error);
3594 }
3595 
3596 /* For PF VSI only */
3597 int
3598 ixl_disable_rings(struct ixl_vsi *vsi)
3599 {
3600 	struct ixl_pf	*pf = vsi->back;
3601 	int		error = 0;
3602 
3603 	for (int i = 0; i < vsi->num_queues; i++) {
3604 		error = ixl_disable_ring(pf, &pf->qtag, i);
3605 		if (error)
3606 			return (error);
3607 	}
3608 
3609 	return (error);
3610 }
3611 
3612 /**
3613  * ixl_handle_mdd_event
3614  *
3615  * Called from interrupt handler to identify possibly malicious vfs
3616  * (But also detects events from the PF, as well)
3617  **/
3618 void
3619 ixl_handle_mdd_event(struct ixl_pf *pf)
3620 {
3621 	struct i40e_hw *hw = &pf->hw;
3622 	device_t dev = pf->dev;
3623 	bool mdd_detected = false;
3624 	bool pf_mdd_detected = false;
3625 	u32 reg;
3626 
3627 	/* find what triggered the MDD event */
3628 	reg = rd32(hw, I40E_GL_MDET_TX);
3629 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3630 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3631 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3632 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3633 				I40E_GL_MDET_TX_EVENT_SHIFT;
3634 		u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3635 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3636 		device_printf(dev,
3637 		    "Malicious Driver Detection event %d"
3638 		    " on TX queue %d, pf number %d\n",
3639 		    event, queue, pf_num);
3640 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3641 		mdd_detected = true;
3642 	}
3643 	reg = rd32(hw, I40E_GL_MDET_RX);
3644 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3645 		u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3646 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3647 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3648 				I40E_GL_MDET_RX_EVENT_SHIFT;
3649 		u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3650 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3651 		device_printf(dev,
3652 		    "Malicious Driver Detection event %d"
3653 		    " on RX queue %d, pf number %d\n",
3654 		    event, queue, pf_num);
3655 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3656 		mdd_detected = true;
3657 	}
3658 
3659 	if (mdd_detected) {
3660 		reg = rd32(hw, I40E_PF_MDET_TX);
3661 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3662 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3663 			device_printf(dev,
3664 			    "MDD TX event is for this function!");
3665 			pf_mdd_detected = true;
3666 		}
3667 		reg = rd32(hw, I40E_PF_MDET_RX);
3668 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3669 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3670 			device_printf(dev,
3671 			    "MDD RX event is for this function!");
3672 			pf_mdd_detected = true;
3673 		}
3674 	}
3675 
3676 	/* re-enable mdd interrupt cause */
3677 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3678 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3679 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3680 	ixl_flush(hw);
3681 }
3682 
3683 void
3684 ixl_enable_intr(struct ixl_vsi *vsi)
3685 {
3686 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
3687 	struct i40e_hw		*hw = vsi->hw;
3688 	struct ixl_queue	*que = vsi->queues;
3689 
3690 	if (pf->msix > 1) {
3691 		for (int i = 0; i < vsi->num_queues; i++, que++)
3692 			ixl_enable_queue(hw, que->me);
3693 	} else
3694 		ixl_enable_intr0(hw);
3695 }
3696 
3697 void
3698 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3699 {
3700 	struct i40e_hw		*hw = vsi->hw;
3701 	struct ixl_queue	*que = vsi->queues;
3702 
3703 	for (int i = 0; i < vsi->num_queues; i++, que++)
3704 		ixl_disable_queue(hw, que->me);
3705 }
3706 
3707 void
3708 ixl_enable_intr0(struct i40e_hw *hw)
3709 {
3710 	u32		reg;
3711 
3712 	/* Use IXL_ITR_NONE so ITR isn't updated here */
3713 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3714 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3715 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3716 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3717 }
3718 
3719 void
3720 ixl_disable_intr0(struct i40e_hw *hw)
3721 {
3722 	u32		reg;
3723 
3724 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3725 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3726 	ixl_flush(hw);
3727 }
3728 
3729 void
3730 ixl_enable_queue(struct i40e_hw *hw, int id)
3731 {
3732 	u32		reg;
3733 
3734 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3735 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3736 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3737 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3738 }
3739 
3740 void
3741 ixl_disable_queue(struct i40e_hw *hw, int id)
3742 {
3743 	u32		reg;
3744 
3745 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3746 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3747 }
3748 
3749 void
3750 ixl_update_stats_counters(struct ixl_pf *pf)
3751 {
3752 	struct i40e_hw	*hw = &pf->hw;
3753 	struct ixl_vsi	*vsi = &pf->vsi;
3754 	struct ixl_vf	*vf;
3755 
3756 	struct i40e_hw_port_stats *nsd = &pf->stats;
3757 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3758 
3759 	/* Update hw stats */
3760 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3761 			   pf->stat_offsets_loaded,
3762 			   &osd->crc_errors, &nsd->crc_errors);
3763 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3764 			   pf->stat_offsets_loaded,
3765 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3766 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3767 			   I40E_GLPRT_GORCL(hw->port),
3768 			   pf->stat_offsets_loaded,
3769 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3770 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3771 			   I40E_GLPRT_GOTCL(hw->port),
3772 			   pf->stat_offsets_loaded,
3773 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3774 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3775 			   pf->stat_offsets_loaded,
3776 			   &osd->eth.rx_discards,
3777 			   &nsd->eth.rx_discards);
3778 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3779 			   I40E_GLPRT_UPRCL(hw->port),
3780 			   pf->stat_offsets_loaded,
3781 			   &osd->eth.rx_unicast,
3782 			   &nsd->eth.rx_unicast);
3783 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3784 			   I40E_GLPRT_UPTCL(hw->port),
3785 			   pf->stat_offsets_loaded,
3786 			   &osd->eth.tx_unicast,
3787 			   &nsd->eth.tx_unicast);
3788 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3789 			   I40E_GLPRT_MPRCL(hw->port),
3790 			   pf->stat_offsets_loaded,
3791 			   &osd->eth.rx_multicast,
3792 			   &nsd->eth.rx_multicast);
3793 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3794 			   I40E_GLPRT_MPTCL(hw->port),
3795 			   pf->stat_offsets_loaded,
3796 			   &osd->eth.tx_multicast,
3797 			   &nsd->eth.tx_multicast);
3798 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3799 			   I40E_GLPRT_BPRCL(hw->port),
3800 			   pf->stat_offsets_loaded,
3801 			   &osd->eth.rx_broadcast,
3802 			   &nsd->eth.rx_broadcast);
3803 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3804 			   I40E_GLPRT_BPTCL(hw->port),
3805 			   pf->stat_offsets_loaded,
3806 			   &osd->eth.tx_broadcast,
3807 			   &nsd->eth.tx_broadcast);
3808 
3809 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3810 			   pf->stat_offsets_loaded,
3811 			   &osd->tx_dropped_link_down,
3812 			   &nsd->tx_dropped_link_down);
3813 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3814 			   pf->stat_offsets_loaded,
3815 			   &osd->mac_local_faults,
3816 			   &nsd->mac_local_faults);
3817 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3818 			   pf->stat_offsets_loaded,
3819 			   &osd->mac_remote_faults,
3820 			   &nsd->mac_remote_faults);
3821 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3822 			   pf->stat_offsets_loaded,
3823 			   &osd->rx_length_errors,
3824 			   &nsd->rx_length_errors);
3825 
3826 	/* Flow control (LFC) stats */
3827 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3828 			   pf->stat_offsets_loaded,
3829 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3830 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3831 			   pf->stat_offsets_loaded,
3832 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3833 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3834 			   pf->stat_offsets_loaded,
3835 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3836 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3837 			   pf->stat_offsets_loaded,
3838 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3839 
3840 	/* Packet size stats rx */
3841 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3842 			   I40E_GLPRT_PRC64L(hw->port),
3843 			   pf->stat_offsets_loaded,
3844 			   &osd->rx_size_64, &nsd->rx_size_64);
3845 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3846 			   I40E_GLPRT_PRC127L(hw->port),
3847 			   pf->stat_offsets_loaded,
3848 			   &osd->rx_size_127, &nsd->rx_size_127);
3849 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3850 			   I40E_GLPRT_PRC255L(hw->port),
3851 			   pf->stat_offsets_loaded,
3852 			   &osd->rx_size_255, &nsd->rx_size_255);
3853 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3854 			   I40E_GLPRT_PRC511L(hw->port),
3855 			   pf->stat_offsets_loaded,
3856 			   &osd->rx_size_511, &nsd->rx_size_511);
3857 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3858 			   I40E_GLPRT_PRC1023L(hw->port),
3859 			   pf->stat_offsets_loaded,
3860 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3861 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3862 			   I40E_GLPRT_PRC1522L(hw->port),
3863 			   pf->stat_offsets_loaded,
3864 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3865 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3866 			   I40E_GLPRT_PRC9522L(hw->port),
3867 			   pf->stat_offsets_loaded,
3868 			   &osd->rx_size_big, &nsd->rx_size_big);
3869 
3870 	/* Packet size stats tx */
3871 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3872 			   I40E_GLPRT_PTC64L(hw->port),
3873 			   pf->stat_offsets_loaded,
3874 			   &osd->tx_size_64, &nsd->tx_size_64);
3875 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3876 			   I40E_GLPRT_PTC127L(hw->port),
3877 			   pf->stat_offsets_loaded,
3878 			   &osd->tx_size_127, &nsd->tx_size_127);
3879 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3880 			   I40E_GLPRT_PTC255L(hw->port),
3881 			   pf->stat_offsets_loaded,
3882 			   &osd->tx_size_255, &nsd->tx_size_255);
3883 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3884 			   I40E_GLPRT_PTC511L(hw->port),
3885 			   pf->stat_offsets_loaded,
3886 			   &osd->tx_size_511, &nsd->tx_size_511);
3887 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3888 			   I40E_GLPRT_PTC1023L(hw->port),
3889 			   pf->stat_offsets_loaded,
3890 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3891 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3892 			   I40E_GLPRT_PTC1522L(hw->port),
3893 			   pf->stat_offsets_loaded,
3894 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3895 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3896 			   I40E_GLPRT_PTC9522L(hw->port),
3897 			   pf->stat_offsets_loaded,
3898 			   &osd->tx_size_big, &nsd->tx_size_big);
3899 
3900 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3901 			   pf->stat_offsets_loaded,
3902 			   &osd->rx_undersize, &nsd->rx_undersize);
3903 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3904 			   pf->stat_offsets_loaded,
3905 			   &osd->rx_fragments, &nsd->rx_fragments);
3906 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3907 			   pf->stat_offsets_loaded,
3908 			   &osd->rx_oversize, &nsd->rx_oversize);
3909 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3910 			   pf->stat_offsets_loaded,
3911 			   &osd->rx_jabber, &nsd->rx_jabber);
3912 	pf->stat_offsets_loaded = true;
3913 	/* End hw stats */
3914 
3915 	/* Update vsi stats */
3916 	ixl_update_vsi_stats(vsi);
3917 
3918 	for (int i = 0; i < pf->num_vfs; i++) {
3919 		vf = &pf->vfs[i];
3920 		if (vf->vf_flags & VF_FLAG_ENABLED)
3921 			ixl_update_eth_stats(&pf->vfs[i].vsi);
3922 	}
3923 }
3924 
3925 int
3926 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3927 {
3928 	struct i40e_hw *hw = &pf->hw;
3929 	struct ixl_vsi *vsi = &pf->vsi;
3930 	device_t dev = pf->dev;
3931 	bool is_up = false;
3932 	int error = 0;
3933 
3934 	is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3935 
3936 	/* Teardown */
3937 	if (is_up)
3938 		ixl_stop(pf);
3939 	error = i40e_shutdown_lan_hmc(hw);
3940 	if (error)
3941 		device_printf(dev,
3942 		    "Shutdown LAN HMC failed with code %d\n", error);
3943 	ixl_disable_intr0(hw);
3944 	ixl_teardown_adminq_msix(pf);
3945 	error = i40e_shutdown_adminq(hw);
3946 	if (error)
3947 		device_printf(dev,
3948 		    "Shutdown Admin queue failed with code %d\n", error);
3949 
3950 	/* Setup */
3951 	error = i40e_init_adminq(hw);
3952 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3953 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3954 		    error);
3955 	}
3956 	error = ixl_setup_adminq_msix(pf);
3957 	if (error) {
3958 		device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3959 		    error);
3960 	}
3961 	ixl_configure_intr0_msix(pf);
3962 	ixl_enable_intr0(hw);
3963 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3964 	    hw->func_caps.num_rx_qp, 0, 0);
3965 	if (error) {
3966 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
3967 	}
3968 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3969 	if (error) {
3970 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3971 	}
3972 	if (is_up)
3973 		ixl_init(pf);
3974 
3975 	return (0);
3976 }
3977 
3978 void
3979 ixl_handle_empr_reset(struct ixl_pf *pf)
3980 {
3981 	struct i40e_hw *hw = &pf->hw;
3982 	device_t dev = pf->dev;
3983 	int count = 0;
3984 	u32 reg;
3985 
3986 	/* Typically finishes within 3-4 seconds */
3987 	while (count++ < 100) {
3988 		reg = rd32(hw, I40E_GLGEN_RSTAT)
3989 		    & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3990 		if (reg)
3991 			i40e_msec_delay(100);
3992 		else
3993 			break;
3994 	}
3995 	ixl_dbg(pf, IXL_DBG_INFO,
3996 	    "EMPR reset wait count: %d\n", count);
3997 
3998 	device_printf(dev, "Rebuilding driver state...\n");
3999 	ixl_rebuild_hw_structs_after_reset(pf);
4000 	device_printf(dev, "Rebuilding driver state done.\n");
4001 
4002 	atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4003 }
4004 
4005 /*
4006 ** Tasklet handler for MSIX Adminq interrupts
4007 **  - do outside interrupt since it might sleep
4008 */
4009 void
4010 ixl_do_adminq(void *context, int pending)
4011 {
4012 	struct ixl_pf			*pf = context;
4013 	struct i40e_hw			*hw = &pf->hw;
4014 	struct i40e_arq_event_info	event;
4015 	i40e_status			ret;
4016 	device_t			dev = pf->dev;
4017 	u32				loop = 0;
4018 	u16				opcode, result;
4019 
4020 	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4021 		/* Flag cleared at end of this function */
4022 		ixl_handle_empr_reset(pf);
4023 		return;
4024 	}
4025 
4026 	/* Admin Queue handling */
4027 	event.buf_len = IXL_AQ_BUF_SZ;
4028 	event.msg_buf = malloc(event.buf_len,
4029 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4030 	if (!event.msg_buf) {
4031 		device_printf(dev, "%s: Unable to allocate memory for Admin"
4032 		    " Queue event!\n", __func__);
4033 		return;
4034 	}
4035 
4036 	IXL_PF_LOCK(pf);
4037 	/* clean and process any events */
4038 	do {
4039 		ret = i40e_clean_arq_element(hw, &event, &result);
4040 		if (ret)
4041 			break;
4042 		opcode = LE16_TO_CPU(event.desc.opcode);
4043 		ixl_dbg(pf, IXL_DBG_AQ,
4044 		    "Admin Queue event: %#06x\n", opcode);
4045 		switch (opcode) {
4046 		case i40e_aqc_opc_get_link_status:
4047 			ixl_link_event(pf, &event);
4048 			break;
4049 		case i40e_aqc_opc_send_msg_to_pf:
4050 #ifdef PCI_IOV
4051 			ixl_handle_vf_msg(pf, &event);
4052 #endif
4053 			break;
4054 		case i40e_aqc_opc_event_lan_overflow:
4055 		default:
4056 			break;
4057 		}
4058 
4059 	} while (result && (loop++ < IXL_ADM_LIMIT));
4060 
4061 	free(event.msg_buf, M_DEVBUF);
4062 
4063 	/*
4064 	 * If there are still messages to process, reschedule ourselves.
4065 	 * Otherwise, re-enable our interrupt.
4066 	 */
4067 	if (result > 0)
4068 		taskqueue_enqueue(pf->tq, &pf->adminq);
4069 	else
4070 		ixl_enable_intr0(hw);
4071 
4072 	IXL_PF_UNLOCK(pf);
4073 }
4074 
4075 /**
4076  * Update VSI-specific ethernet statistics counters.
4077  **/
4078 void
4079 ixl_update_eth_stats(struct ixl_vsi *vsi)
4080 {
4081 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4082 	struct i40e_hw *hw = &pf->hw;
4083 	struct i40e_eth_stats *es;
4084 	struct i40e_eth_stats *oes;
4085 	struct i40e_hw_port_stats *nsd;
4086 	u16 stat_idx = vsi->info.stat_counter_idx;
4087 
4088 	es = &vsi->eth_stats;
4089 	oes = &vsi->eth_stats_offsets;
4090 	nsd = &pf->stats;
4091 
4092 	/* Gather up the stats that the hw collects */
4093 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4094 			   vsi->stat_offsets_loaded,
4095 			   &oes->tx_errors, &es->tx_errors);
4096 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4097 			   vsi->stat_offsets_loaded,
4098 			   &oes->rx_discards, &es->rx_discards);
4099 
4100 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4101 			   I40E_GLV_GORCL(stat_idx),
4102 			   vsi->stat_offsets_loaded,
4103 			   &oes->rx_bytes, &es->rx_bytes);
4104 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4105 			   I40E_GLV_UPRCL(stat_idx),
4106 			   vsi->stat_offsets_loaded,
4107 			   &oes->rx_unicast, &es->rx_unicast);
4108 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4109 			   I40E_GLV_MPRCL(stat_idx),
4110 			   vsi->stat_offsets_loaded,
4111 			   &oes->rx_multicast, &es->rx_multicast);
4112 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4113 			   I40E_GLV_BPRCL(stat_idx),
4114 			   vsi->stat_offsets_loaded,
4115 			   &oes->rx_broadcast, &es->rx_broadcast);
4116 
4117 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4118 			   I40E_GLV_GOTCL(stat_idx),
4119 			   vsi->stat_offsets_loaded,
4120 			   &oes->tx_bytes, &es->tx_bytes);
4121 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4122 			   I40E_GLV_UPTCL(stat_idx),
4123 			   vsi->stat_offsets_loaded,
4124 			   &oes->tx_unicast, &es->tx_unicast);
4125 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4126 			   I40E_GLV_MPTCL(stat_idx),
4127 			   vsi->stat_offsets_loaded,
4128 			   &oes->tx_multicast, &es->tx_multicast);
4129 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4130 			   I40E_GLV_BPTCL(stat_idx),
4131 			   vsi->stat_offsets_loaded,
4132 			   &oes->tx_broadcast, &es->tx_broadcast);
4133 	vsi->stat_offsets_loaded = true;
4134 }
4135 
4136 void
4137 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4138 {
4139 	struct ixl_pf		*pf;
4140 	struct ifnet		*ifp;
4141 	struct i40e_eth_stats	*es;
4142 	u64			tx_discards;
4143 
4144 	struct i40e_hw_port_stats *nsd;
4145 
4146 	pf = vsi->back;
4147 	ifp = vsi->ifp;
4148 	es = &vsi->eth_stats;
4149 	nsd = &pf->stats;
4150 
4151 	ixl_update_eth_stats(vsi);
4152 
4153 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4154 	for (int i = 0; i < vsi->num_queues; i++)
4155 		tx_discards += vsi->queues[i].txr.br->br_drops;
4156 
4157 	/* Update ifnet stats */
4158 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4159 	                   es->rx_multicast +
4160 			   es->rx_broadcast);
4161 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4162 	                   es->tx_multicast +
4163 			   es->tx_broadcast);
4164 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4165 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4166 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4167 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4168 
4169 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4170 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4171 	    nsd->rx_jabber);
4172 	IXL_SET_OERRORS(vsi, es->tx_errors);
4173 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4174 	IXL_SET_OQDROPS(vsi, tx_discards);
4175 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4176 	IXL_SET_COLLISIONS(vsi, 0);
4177 }
4178 
4179 /**
4180  * Reset all of the stats for the given pf
4181  **/
4182 void
4183 ixl_pf_reset_stats(struct ixl_pf *pf)
4184 {
4185 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4186 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4187 	pf->stat_offsets_loaded = false;
4188 }
4189 
4190 /**
4191  * Resets all stats of the given vsi
4192  **/
4193 void
4194 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4195 {
4196 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4197 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4198 	vsi->stat_offsets_loaded = false;
4199 }
4200 
4201 /**
4202  * Read and update a 48 bit stat from the hw
4203  *
4204  * Since the device stats are not reset at PFReset, they likely will not
4205  * be zeroed when the driver starts.  We'll save the first values read
4206  * and use them as offsets to be subtracted from the raw values in order
4207  * to report stats that count from zero.
4208  **/
4209 void
4210 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4211 	bool offset_loaded, u64 *offset, u64 *stat)
4212 {
4213 	u64 new_data;
4214 
4215 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4216 	new_data = rd64(hw, loreg);
4217 #else
4218 	/*
4219 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4220 	 * 10 don't support 64-bit bus reads/writes.
4221 	 */
4222 	new_data = rd32(hw, loreg);
4223 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4224 #endif
4225 
4226 	if (!offset_loaded)
4227 		*offset = new_data;
4228 	if (new_data >= *offset)
4229 		*stat = new_data - *offset;
4230 	else
4231 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4232 	*stat &= 0xFFFFFFFFFFFFULL;
4233 }
4234 
4235 /**
4236  * Read and update a 32 bit stat from the hw
4237  **/
4238 void
4239 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4240 	bool offset_loaded, u64 *offset, u64 *stat)
4241 {
4242 	u32 new_data;
4243 
4244 	new_data = rd32(hw, reg);
4245 	if (!offset_loaded)
4246 		*offset = new_data;
4247 	if (new_data >= *offset)
4248 		*stat = (u32)(new_data - *offset);
4249 	else
4250 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4251 }
4252 
4253 void
4254 ixl_add_device_sysctls(struct ixl_pf *pf)
4255 {
4256 	device_t dev = pf->dev;
4257 	struct i40e_hw *hw = &pf->hw;
4258 
4259 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4260 	struct sysctl_oid_list *ctx_list =
4261 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4262 
4263 	struct sysctl_oid *debug_node;
4264 	struct sysctl_oid_list *debug_list;
4265 
4266 	struct sysctl_oid *fec_node;
4267 	struct sysctl_oid_list *fec_list;
4268 
4269 	/* Set up sysctls */
4270 	SYSCTL_ADD_PROC(ctx, ctx_list,
4271 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4272 	    pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4273 
4274 	SYSCTL_ADD_PROC(ctx, ctx_list,
4275 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4276 	    pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4277 
4278 	SYSCTL_ADD_PROC(ctx, ctx_list,
4279 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4280 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
4281 
4282 	SYSCTL_ADD_PROC(ctx, ctx_list,
4283 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4284 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4285 
4286 	SYSCTL_ADD_PROC(ctx, ctx_list,
4287 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4288 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
4289 	    "Queues not allocated to a PF or VF");
4290 
4291 	SYSCTL_ADD_PROC(ctx, ctx_list,
4292 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4293 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
4294 	    "Immediately set TX ITR value for all queues");
4295 
4296 	SYSCTL_ADD_PROC(ctx, ctx_list,
4297 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4298 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
4299 	    "Immediately set RX ITR value for all queues");
4300 
4301 	SYSCTL_ADD_INT(ctx, ctx_list,
4302 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4303 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4304 
4305 	SYSCTL_ADD_INT(ctx, ctx_list,
4306 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4307 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4308 
4309 	/* Add FEC sysctls for 25G adapters */
4310 	/*
4311 	 * XXX: These settings can be changed, but that isn't supported,
4312 	 * so these are read-only for now.
4313 	 */
4314 	if (hw->device_id == I40E_DEV_ID_25G_B
4315 	    || hw->device_id == I40E_DEV_ID_25G_SFP28) {
4316 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4317 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4318 		fec_list = SYSCTL_CHILDREN(fec_node);
4319 
4320 		SYSCTL_ADD_PROC(ctx, fec_list,
4321 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
4322 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4323 
4324 		SYSCTL_ADD_PROC(ctx, fec_list,
4325 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
4326 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4327 
4328 		SYSCTL_ADD_PROC(ctx, fec_list,
4329 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
4330 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4331 
4332 		SYSCTL_ADD_PROC(ctx, fec_list,
4333 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
4334 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4335 
4336 		SYSCTL_ADD_PROC(ctx, fec_list,
4337 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
4338 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4339 	}
4340 
4341 	/* Add sysctls meant to print debug information, but don't list them
4342 	 * in "sysctl -a" output. */
4343 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4344 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4345 	debug_list = SYSCTL_CHILDREN(debug_node);
4346 
4347 	SYSCTL_ADD_UINT(ctx, debug_list,
4348 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4349 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
4350 
4351 	SYSCTL_ADD_UINT(ctx, debug_list,
4352 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4353 	    &pf->dbg_mask, 0, "Non-hared code debug message level");
4354 
4355 	SYSCTL_ADD_PROC(ctx, debug_list,
4356 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4357 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4358 
4359 	SYSCTL_ADD_PROC(ctx, debug_list,
4360 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4361 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4362 
4363 	SYSCTL_ADD_PROC(ctx, debug_list,
4364 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4365 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4366 
4367 	SYSCTL_ADD_PROC(ctx, debug_list,
4368 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4369 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4370 
4371 	SYSCTL_ADD_PROC(ctx, debug_list,
4372 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4373 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4374 
4375 	SYSCTL_ADD_PROC(ctx, debug_list,
4376 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4377 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4378 
4379 	SYSCTL_ADD_PROC(ctx, debug_list,
4380 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4381 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4382 
4383 	SYSCTL_ADD_PROC(ctx, debug_list,
4384 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4385 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4386 
4387 	SYSCTL_ADD_PROC(ctx, debug_list,
4388 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4389 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4390 
4391 	if (pf->has_i2c) {
4392 		SYSCTL_ADD_PROC(ctx, debug_list,
4393 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4394 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4395 
4396 		SYSCTL_ADD_PROC(ctx, debug_list,
4397 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4398 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4399 	}
4400 
4401 #ifdef PCI_IOV
4402 	SYSCTL_ADD_UINT(ctx, debug_list,
4403 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4404 	    0, "PF/VF Virtual Channel debug level");
4405 #endif
4406 }
4407 
4408 /*
4409  * Primarily for finding out how many queues can be assigned to VFs,
4410  * at runtime.
4411  */
4412 static int
4413 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4414 {
4415 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4416 	int queues;
4417 
4418 	IXL_PF_LOCK(pf);
4419 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4420 	IXL_PF_UNLOCK(pf);
4421 
4422 	return sysctl_handle_int(oidp, NULL, queues, req);
4423 }
4424 
4425 /*
4426 ** Set flow control using sysctl:
4427 ** 	0 - off
4428 **	1 - rx pause
4429 **	2 - tx pause
4430 **	3 - full
4431 */
4432 int
4433 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4434 {
4435 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4436 	struct i40e_hw *hw = &pf->hw;
4437 	device_t dev = pf->dev;
4438 	int requested_fc, error = 0;
4439 	enum i40e_status_code aq_error = 0;
4440 	u8 fc_aq_err = 0;
4441 
4442 	/* Get request */
4443 	requested_fc = pf->fc;
4444 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4445 	if ((error) || (req->newptr == NULL))
4446 		return (error);
4447 	if (requested_fc < 0 || requested_fc > 3) {
4448 		device_printf(dev,
4449 		    "Invalid fc mode; valid modes are 0 through 3\n");
4450 		return (EINVAL);
4451 	}
4452 
4453 	/* Set fc ability for port */
4454 	hw->fc.requested_mode = requested_fc;
4455 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4456 	if (aq_error) {
4457 		device_printf(dev,
4458 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4459 		    __func__, aq_error, fc_aq_err);
4460 		return (EIO);
4461 	}
4462 	pf->fc = requested_fc;
4463 
4464 	/* Get new link state */
4465 	i40e_msec_delay(250);
4466 	hw->phy.get_link_info = TRUE;
4467 	i40e_get_link_status(hw, &pf->link_up);
4468 
4469 	return (0);
4470 }
4471 
4472 char *
4473 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4474 {
4475 	int index;
4476 
4477 	char *speeds[] = {
4478 		"Unknown",
4479 		"100 Mbps",
4480 		"1 Gbps",
4481 		"10 Gbps",
4482 		"40 Gbps",
4483 		"20 Gbps",
4484 		"25 Gbps",
4485 	};
4486 
4487 	switch (link_speed) {
4488 	case I40E_LINK_SPEED_100MB:
4489 		index = 1;
4490 		break;
4491 	case I40E_LINK_SPEED_1GB:
4492 		index = 2;
4493 		break;
4494 	case I40E_LINK_SPEED_10GB:
4495 		index = 3;
4496 		break;
4497 	case I40E_LINK_SPEED_40GB:
4498 		index = 4;
4499 		break;
4500 	case I40E_LINK_SPEED_20GB:
4501 		index = 5;
4502 		break;
4503 	case I40E_LINK_SPEED_25GB:
4504 		index = 6;
4505 		break;
4506 	case I40E_LINK_SPEED_UNKNOWN:
4507 	default:
4508 		index = 0;
4509 		break;
4510 	}
4511 
4512 	return speeds[index];
4513 }
4514 
4515 int
4516 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4517 {
4518 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4519 	struct i40e_hw *hw = &pf->hw;
4520 	int error = 0;
4521 
4522 	ixl_update_link_status(pf);
4523 
4524 	error = sysctl_handle_string(oidp,
4525 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4526 	    8, req);
4527 	return (error);
4528 }
4529 
4530 static u8
4531 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4532 {
4533 	static u16 speedmap[6] = {
4534 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
4535 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
4536 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
4537 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
4538 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
4539 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
4540 	};
4541 	u8 retval = 0;
4542 
4543 	for (int i = 0; i < 6; i++) {
4544 		if (to_aq)
4545 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4546 		else
4547 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4548 	}
4549 
4550 	return (retval);
4551 }
4552 
4553 int
4554 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4555 {
4556 	struct i40e_hw *hw = &pf->hw;
4557 	device_t dev = pf->dev;
4558 	struct i40e_aq_get_phy_abilities_resp abilities;
4559 	struct i40e_aq_set_phy_config config;
4560 	enum i40e_status_code aq_error = 0;
4561 
4562 	/* Get current capability information */
4563 	aq_error = i40e_aq_get_phy_capabilities(hw,
4564 	    FALSE, FALSE, &abilities, NULL);
4565 	if (aq_error) {
4566 		device_printf(dev,
4567 		    "%s: Error getting phy capabilities %d,"
4568 		    " aq error: %d\n", __func__, aq_error,
4569 		    hw->aq.asq_last_status);
4570 		return (EIO);
4571 	}
4572 
4573 	/* Prepare new config */
4574 	bzero(&config, sizeof(config));
4575 	config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4576 	config.phy_type = abilities.phy_type;
4577 	config.phy_type_ext = abilities.phy_type_ext;
4578 	config.abilities = abilities.abilities
4579 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4580 	config.eee_capability = abilities.eee_capability;
4581 	config.eeer = abilities.eeer_val;
4582 	config.low_power_ctrl = abilities.d3_lpan;
4583 
4584 	/* Do aq command & restart link */
4585 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4586 	if (aq_error) {
4587 		device_printf(dev,
4588 		    "%s: Error setting new phy config %d,"
4589 		    " aq error: %d\n", __func__, aq_error,
4590 		    hw->aq.asq_last_status);
4591 		return (EIO);
4592 	}
4593 
4594 	return (0);
4595 }
4596 
4597 /*
4598 ** Control link advertise speed:
4599 **	Flags:
4600 **	 0x1 - advertise 100 Mb
4601 **	 0x2 - advertise 1G
4602 **	 0x4 - advertise 10G
4603 **	 0x8 - advertise 20G
4604 **	0x10 - advertise 25G
4605 **	0x20 - advertise 40G
4606 **
4607 **	Set to 0 to disable link
4608 */
4609 int
4610 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4611 {
4612 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4613 	struct i40e_hw *hw = &pf->hw;
4614 	device_t dev = pf->dev;
4615 	u8 converted_speeds;
4616 	int requested_ls = 0;
4617 	int error = 0;
4618 
4619 	/* Read in new mode */
4620 	requested_ls = pf->advertised_speed;
4621 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4622 	if ((error) || (req->newptr == NULL))
4623 		return (error);
4624 	/* Check if changing speeds is supported */
4625 	switch (hw->device_id) {
4626 	case I40E_DEV_ID_25G_B:
4627 	case I40E_DEV_ID_25G_SFP28:
4628 		device_printf(dev, "Changing advertised speeds not supported"
4629 		" on this device.\n");
4630 		return (EINVAL);
4631 	}
4632 	if (requested_ls < 0 || requested_ls > 0xff) {
4633 	}
4634 
4635 	/* Check for valid value */
4636 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4637 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4638 		device_printf(dev, "Invalid advertised speed; "
4639 		    "valid flags are: 0x%02x\n",
4640 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4641 		return (EINVAL);
4642 	}
4643 
4644 	error = ixl_set_advertised_speeds(pf, requested_ls);
4645 	if (error)
4646 		return (error);
4647 
4648 	pf->advertised_speed = requested_ls;
4649 	ixl_update_link_status(pf);
4650 	return (0);
4651 }
4652 
4653 /*
4654  * Input: bitmap of enum i40e_aq_link_speed
4655  */
4656 static u64
4657 ixl_max_aq_speed_to_value(u8 link_speeds)
4658 {
4659 	if (link_speeds & I40E_LINK_SPEED_40GB)
4660 		return IF_Gbps(40);
4661 	if (link_speeds & I40E_LINK_SPEED_25GB)
4662 		return IF_Gbps(25);
4663 	if (link_speeds & I40E_LINK_SPEED_20GB)
4664 		return IF_Gbps(20);
4665 	if (link_speeds & I40E_LINK_SPEED_10GB)
4666 		return IF_Gbps(10);
4667 	if (link_speeds & I40E_LINK_SPEED_1GB)
4668 		return IF_Gbps(1);
4669 	if (link_speeds & I40E_LINK_SPEED_100MB)
4670 		return IF_Mbps(100);
4671 	else
4672 		/* Minimum supported link speed */
4673 		return IF_Mbps(100);
4674 }
4675 
4676 /*
4677 ** Get the width and transaction speed of
4678 ** the bus this adapter is plugged into.
4679 */
4680 void
4681 ixl_get_bus_info(struct ixl_pf *pf)
4682 {
4683 	struct i40e_hw *hw = &pf->hw;
4684 	device_t dev = pf->dev;
4685         u16 link;
4686         u32 offset, num_ports;
4687 	u64 max_speed;
4688 
4689 	/* Some devices don't use PCIE */
4690 	if (hw->mac.type == I40E_MAC_X722)
4691 		return;
4692 
4693         /* Read PCI Express Capabilities Link Status Register */
4694         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4695         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4696 
4697 	/* Fill out hw struct with PCIE info */
4698 	i40e_set_pci_config_data(hw, link);
4699 
4700 	/* Use info to print out bandwidth messages */
4701         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4702             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4703             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4704             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4705             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4706             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4707             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4708             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4709             ("Unknown"));
4710 
4711 	/*
4712 	 * If adapter is in slot with maximum supported speed,
4713 	 * no warning message needs to be printed out.
4714 	 */
4715 	if (hw->bus.speed >= i40e_bus_speed_8000
4716 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
4717 		return;
4718 
4719 	num_ports = bitcount32(hw->func_caps.valid_functions);
4720 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4721 
4722 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4723                 device_printf(dev, "PCI-Express bandwidth available"
4724                     " for this device may be insufficient for"
4725                     " optimal performance.\n");
4726                 device_printf(dev, "Please move the device to a different"
4727 		    " PCI-e link with more lanes and/or higher"
4728 		    " transfer rate.\n");
4729         }
4730 }
4731 
4732 static int
4733 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4734 {
4735 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4736 	struct i40e_hw	*hw = &pf->hw;
4737 	struct sbuf	*sbuf;
4738 
4739 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4740 	ixl_nvm_version_str(hw, sbuf);
4741 	sbuf_finish(sbuf);
4742 	sbuf_delete(sbuf);
4743 
4744 	return 0;
4745 }
4746 
4747 void
4748 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4749 {
4750 	if ((nvma->command == I40E_NVM_READ) &&
4751 	    ((nvma->config & 0xFF) == 0xF) &&
4752 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
4753 	    (nvma->offset == 0) &&
4754 	    (nvma->data_size == 1)) {
4755 		// device_printf(dev, "- Get Driver Status Command\n");
4756 	}
4757 	else if (nvma->command == I40E_NVM_READ) {
4758 
4759 	}
4760 	else {
4761 		switch (nvma->command) {
4762 		case 0xB:
4763 			device_printf(dev, "- command: I40E_NVM_READ\n");
4764 			break;
4765 		case 0xC:
4766 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
4767 			break;
4768 		default:
4769 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4770 			break;
4771 		}
4772 
4773 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
4774 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4775 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4776 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4777 	}
4778 }
4779 
4780 int
4781 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4782 {
4783 	struct i40e_hw *hw = &pf->hw;
4784 	struct i40e_nvm_access *nvma;
4785 	device_t dev = pf->dev;
4786 	enum i40e_status_code status = 0;
4787 	int perrno;
4788 
4789 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
4790 
4791 	/* Sanity checks */
4792 	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4793 	    ifd->ifd_data == NULL) {
4794 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4795 		    __func__);
4796 		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4797 		    __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4798 		device_printf(dev, "%s: data pointer: %p\n", __func__,
4799 		    ifd->ifd_data);
4800 		return (EINVAL);
4801 	}
4802 
4803 	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4804 
4805 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
4806 		ixl_print_nvm_cmd(dev, nvma);
4807 
4808 	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4809 		int count = 0;
4810 		while (count++ < 100) {
4811 			i40e_msec_delay(100);
4812 			if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4813 				break;
4814 		}
4815 	}
4816 
4817 	if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4818 		IXL_PF_LOCK(pf);
4819 		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4820 		IXL_PF_UNLOCK(pf);
4821 	} else {
4822 		perrno = -EBUSY;
4823 	}
4824 
4825 	if (status)
4826 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
4827 		    i40e_stat_str(hw, status), perrno);
4828 
4829 	/*
4830 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
4831 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
4832 	 */
4833 	if (perrno == -EPERM)
4834 		return (-EACCES);
4835 	else
4836 		return (perrno);
4837 }
4838 
4839 /*********************************************************************
4840  *
4841  *  Media Ioctl callback
4842  *
4843  *  This routine is called whenever the user queries the status of
4844  *  the interface using ifconfig.
4845  *
4846  **********************************************************************/
4847 void
4848 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4849 {
4850 	struct ixl_vsi	*vsi = ifp->if_softc;
4851 	struct ixl_pf	*pf = vsi->back;
4852 	struct i40e_hw  *hw = &pf->hw;
4853 
4854 	INIT_DEBUGOUT("ixl_media_status: begin");
4855 	IXL_PF_LOCK(pf);
4856 
4857 	hw->phy.get_link_info = TRUE;
4858 	i40e_get_link_status(hw, &pf->link_up);
4859 	ixl_update_link_status(pf);
4860 
4861 	ifmr->ifm_status = IFM_AVALID;
4862 	ifmr->ifm_active = IFM_ETHER;
4863 
4864 	if (!pf->link_up) {
4865 		IXL_PF_UNLOCK(pf);
4866 		return;
4867 	}
4868 
4869 	ifmr->ifm_status |= IFM_ACTIVE;
4870 
4871 	/* Hardware always does full-duplex */
4872 	ifmr->ifm_active |= IFM_FDX;
4873 
4874 	switch (hw->phy.link_info.phy_type) {
4875 		/* 100 M */
4876 		case I40E_PHY_TYPE_100BASE_TX:
4877 			ifmr->ifm_active |= IFM_100_TX;
4878 			break;
4879 		/* 1 G */
4880 		case I40E_PHY_TYPE_1000BASE_T:
4881 			ifmr->ifm_active |= IFM_1000_T;
4882 			break;
4883 		case I40E_PHY_TYPE_1000BASE_SX:
4884 			ifmr->ifm_active |= IFM_1000_SX;
4885 			break;
4886 		case I40E_PHY_TYPE_1000BASE_LX:
4887 			ifmr->ifm_active |= IFM_1000_LX;
4888 			break;
4889 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4890 			ifmr->ifm_active |= IFM_OTHER;
4891 			break;
4892 		/* 10 G */
4893 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4894 			ifmr->ifm_active |= IFM_10G_TWINAX;
4895 			break;
4896 		case I40E_PHY_TYPE_10GBASE_SR:
4897 			ifmr->ifm_active |= IFM_10G_SR;
4898 			break;
4899 		case I40E_PHY_TYPE_10GBASE_LR:
4900 			ifmr->ifm_active |= IFM_10G_LR;
4901 			break;
4902 		case I40E_PHY_TYPE_10GBASE_T:
4903 			ifmr->ifm_active |= IFM_10G_T;
4904 			break;
4905 		case I40E_PHY_TYPE_XAUI:
4906 		case I40E_PHY_TYPE_XFI:
4907 		case I40E_PHY_TYPE_10GBASE_AOC:
4908 			ifmr->ifm_active |= IFM_OTHER;
4909 			break;
4910 		/* 25 G */
4911 		case I40E_PHY_TYPE_25GBASE_KR:
4912 			ifmr->ifm_active |= IFM_25G_KR;
4913 			break;
4914 		case I40E_PHY_TYPE_25GBASE_CR:
4915 			ifmr->ifm_active |= IFM_25G_CR;
4916 			break;
4917 		case I40E_PHY_TYPE_25GBASE_SR:
4918 			ifmr->ifm_active |= IFM_25G_SR;
4919 			break;
4920 		case I40E_PHY_TYPE_25GBASE_LR:
4921 			ifmr->ifm_active |= IFM_UNKNOWN;
4922 			break;
4923 		/* 40 G */
4924 		case I40E_PHY_TYPE_40GBASE_CR4:
4925 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
4926 			ifmr->ifm_active |= IFM_40G_CR4;
4927 			break;
4928 		case I40E_PHY_TYPE_40GBASE_SR4:
4929 			ifmr->ifm_active |= IFM_40G_SR4;
4930 			break;
4931 		case I40E_PHY_TYPE_40GBASE_LR4:
4932 			ifmr->ifm_active |= IFM_40G_LR4;
4933 			break;
4934 		case I40E_PHY_TYPE_XLAUI:
4935 			ifmr->ifm_active |= IFM_OTHER;
4936 			break;
4937 		case I40E_PHY_TYPE_1000BASE_KX:
4938 			ifmr->ifm_active |= IFM_1000_KX;
4939 			break;
4940 		case I40E_PHY_TYPE_SGMII:
4941 			ifmr->ifm_active |= IFM_1000_SGMII;
4942 			break;
4943 		/* ERJ: What's the difference between these? */
4944 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
4945 		case I40E_PHY_TYPE_10GBASE_CR1:
4946 			ifmr->ifm_active |= IFM_10G_CR1;
4947 			break;
4948 		case I40E_PHY_TYPE_10GBASE_KX4:
4949 			ifmr->ifm_active |= IFM_10G_KX4;
4950 			break;
4951 		case I40E_PHY_TYPE_10GBASE_KR:
4952 			ifmr->ifm_active |= IFM_10G_KR;
4953 			break;
4954 		case I40E_PHY_TYPE_SFI:
4955 			ifmr->ifm_active |= IFM_10G_SFI;
4956 			break;
4957 		/* Our single 20G media type */
4958 		case I40E_PHY_TYPE_20GBASE_KR2:
4959 			ifmr->ifm_active |= IFM_20G_KR2;
4960 			break;
4961 		case I40E_PHY_TYPE_40GBASE_KR4:
4962 			ifmr->ifm_active |= IFM_40G_KR4;
4963 			break;
4964 		case I40E_PHY_TYPE_XLPPI:
4965 		case I40E_PHY_TYPE_40GBASE_AOC:
4966 			ifmr->ifm_active |= IFM_40G_XLPPI;
4967 			break;
4968 		/* Unknown to driver */
4969 		default:
4970 			ifmr->ifm_active |= IFM_UNKNOWN;
4971 			break;
4972 	}
4973 	/* Report flow control status as well */
4974 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4975 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4976 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4977 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4978 
4979 	IXL_PF_UNLOCK(pf);
4980 }
4981 
4982 void
4983 ixl_init(void *arg)
4984 {
4985 	struct ixl_pf *pf = arg;
4986 
4987 	IXL_PF_LOCK(pf);
4988 	ixl_init_locked(pf);
4989 	IXL_PF_UNLOCK(pf);
4990 }
4991 
4992 /*
4993  * NOTE: Fortville does not support forcing media speeds. Instead,
4994  * use the set_advertise sysctl to set the speeds Fortville
4995  * will advertise or be allowed to operate at.
4996  */
4997 int
4998 ixl_media_change(struct ifnet * ifp)
4999 {
5000 	struct ixl_vsi *vsi = ifp->if_softc;
5001 	struct ifmedia *ifm = &vsi->media;
5002 
5003 	INIT_DEBUGOUT("ixl_media_change: begin");
5004 
5005 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5006 		return (EINVAL);
5007 
5008 	if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5009 
5010 	return (ENODEV);
5011 }
5012 
5013 /*********************************************************************
5014  *  Ioctl entry point
5015  *
5016  *  ixl_ioctl is called when the user wants to configure the
5017  *  interface.
5018  *
5019  *  return 0 on success, positive on failure
5020  **********************************************************************/
5021 
5022 int
5023 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5024 {
5025 	struct ixl_vsi	*vsi = ifp->if_softc;
5026 	struct ixl_pf	*pf = vsi->back;
5027 	struct ifreq	*ifr = (struct ifreq *)data;
5028 	struct ifdrv	*ifd = (struct ifdrv *)data;
5029 #if defined(INET) || defined(INET6)
5030 	struct ifaddr *ifa = (struct ifaddr *)data;
5031 	bool		avoid_reset = FALSE;
5032 #endif
5033 	int             error = 0;
5034 
5035 	switch (command) {
5036 
5037         case SIOCSIFADDR:
5038 		IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5039 #ifdef INET
5040 		if (ifa->ifa_addr->sa_family == AF_INET)
5041 			avoid_reset = TRUE;
5042 #endif
5043 #ifdef INET6
5044 		if (ifa->ifa_addr->sa_family == AF_INET6)
5045 			avoid_reset = TRUE;
5046 #endif
5047 #if defined(INET) || defined(INET6)
5048 		/*
5049 		** Calling init results in link renegotiation,
5050 		** so we avoid doing it when possible.
5051 		*/
5052 		if (avoid_reset) {
5053 			ifp->if_flags |= IFF_UP;
5054 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5055 				ixl_init(pf);
5056 #ifdef INET
5057 			if (!(ifp->if_flags & IFF_NOARP))
5058 				arp_ifinit(ifp, ifa);
5059 #endif
5060 		} else
5061 			error = ether_ioctl(ifp, command, data);
5062 		break;
5063 #endif
5064 	case SIOCSIFMTU:
5065 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5066 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
5067 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5068 			error = EINVAL;
5069 		} else {
5070 			IXL_PF_LOCK(pf);
5071 			ifp->if_mtu = ifr->ifr_mtu;
5072 			vsi->max_frame_size =
5073 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5074 			    + ETHER_VLAN_ENCAP_LEN;
5075 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5076 				ixl_init_locked(pf);
5077 			IXL_PF_UNLOCK(pf);
5078 		}
5079 		break;
5080 	case SIOCSIFFLAGS:
5081 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5082 		IXL_PF_LOCK(pf);
5083 		if (ifp->if_flags & IFF_UP) {
5084 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5085 				if ((ifp->if_flags ^ pf->if_flags) &
5086 				    (IFF_PROMISC | IFF_ALLMULTI)) {
5087 					ixl_set_promisc(vsi);
5088 				}
5089 			} else {
5090 				IXL_PF_UNLOCK(pf);
5091 				ixl_init(pf);
5092 				IXL_PF_LOCK(pf);
5093 			}
5094 		} else {
5095 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5096 				ixl_stop_locked(pf);
5097 			}
5098 		}
5099 		pf->if_flags = ifp->if_flags;
5100 		IXL_PF_UNLOCK(pf);
5101 		break;
5102 	case SIOCSDRVSPEC:
5103 	case SIOCGDRVSPEC:
5104 		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5105 		    "Info)\n");
5106 
5107 		/* NVM update command */
5108 		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5109 			error = ixl_handle_nvmupd_cmd(pf, ifd);
5110 		else
5111 			error = EINVAL;
5112 		break;
5113 	case SIOCADDMULTI:
5114 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5115 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5116 			IXL_PF_LOCK(pf);
5117 			ixl_disable_rings_intr(vsi);
5118 			ixl_add_multi(vsi);
5119 			ixl_enable_intr(vsi);
5120 			IXL_PF_UNLOCK(pf);
5121 		}
5122 		break;
5123 	case SIOCDELMULTI:
5124 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5125 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5126 			IXL_PF_LOCK(pf);
5127 			ixl_disable_rings_intr(vsi);
5128 			ixl_del_multi(vsi);
5129 			ixl_enable_intr(vsi);
5130 			IXL_PF_UNLOCK(pf);
5131 		}
5132 		break;
5133 	case SIOCSIFMEDIA:
5134 	case SIOCGIFMEDIA:
5135 	case SIOCGIFXMEDIA:
5136 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5137 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5138 		break;
5139 	case SIOCSIFCAP:
5140 	{
5141 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5142 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5143 
5144 		ixl_cap_txcsum_tso(vsi, ifp, mask);
5145 
5146 		if (mask & IFCAP_RXCSUM)
5147 			ifp->if_capenable ^= IFCAP_RXCSUM;
5148 		if (mask & IFCAP_RXCSUM_IPV6)
5149 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5150 		if (mask & IFCAP_LRO)
5151 			ifp->if_capenable ^= IFCAP_LRO;
5152 		if (mask & IFCAP_VLAN_HWTAGGING)
5153 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5154 		if (mask & IFCAP_VLAN_HWFILTER)
5155 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5156 		if (mask & IFCAP_VLAN_HWTSO)
5157 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5158 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5159 			IXL_PF_LOCK(pf);
5160 			ixl_init_locked(pf);
5161 			IXL_PF_UNLOCK(pf);
5162 		}
5163 		VLAN_CAPABILITIES(ifp);
5164 
5165 		break;
5166 	}
5167 #if __FreeBSD_version >= 1003000
5168 	case SIOCGI2C:
5169 	{
5170 		struct ifi2creq i2c;
5171 		int i;
5172 
5173 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5174 		if (!pf->has_i2c)
5175 			return (ENOTTY);
5176 
5177 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5178 		if (error != 0)
5179 			break;
5180 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5181 			error = EINVAL;
5182 			break;
5183 		}
5184 		if (i2c.len > sizeof(i2c.data)) {
5185 			error = EINVAL;
5186 			break;
5187 		}
5188 
5189 		for (i = 0; i < i2c.len; i++)
5190 			if (ixl_read_i2c_byte(pf, i2c.offset + i,
5191 			    i2c.dev_addr, &i2c.data[i]))
5192 				return (EIO);
5193 
5194 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5195 		break;
5196 	}
5197 #endif
5198 	default:
5199 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5200 		error = ether_ioctl(ifp, command, data);
5201 		break;
5202 	}
5203 
5204 	return (error);
5205 }
5206 
5207 int
5208 ixl_find_i2c_interface(struct ixl_pf *pf)
5209 {
5210 	struct i40e_hw *hw = &pf->hw;
5211 	bool i2c_en, port_matched;
5212 	u32 reg;
5213 
5214 	for (int i = 0; i < 4; i++) {
5215 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5216 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5217 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5218 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5219 		    & BIT(hw->port);
5220 		if (i2c_en && port_matched)
5221 			return (i);
5222 	}
5223 
5224 	return (-1);
5225 }
5226 
5227 static char *
5228 ixl_phy_type_string(u32 bit_pos, bool ext)
5229 {
5230 	static char * phy_types_str[32] = {
5231 		"SGMII",
5232 		"1000BASE-KX",
5233 		"10GBASE-KX4",
5234 		"10GBASE-KR",
5235 		"40GBASE-KR4",
5236 		"XAUI",
5237 		"XFI",
5238 		"SFI",
5239 		"XLAUI",
5240 		"XLPPI",
5241 		"40GBASE-CR4",
5242 		"10GBASE-CR1",
5243 		"Reserved (12)",
5244 		"Reserved (13)",
5245 		"Reserved (14)",
5246 		"Reserved (15)",
5247 		"Reserved (16)",
5248 		"100BASE-TX",
5249 		"1000BASE-T",
5250 		"10GBASE-T",
5251 		"10GBASE-SR",
5252 		"10GBASE-LR",
5253 		"10GBASE-SFP+Cu",
5254 		"10GBASE-CR1",
5255 		"40GBASE-CR4",
5256 		"40GBASE-SR4",
5257 		"40GBASE-LR4",
5258 		"1000BASE-SX",
5259 		"1000BASE-LX",
5260 		"1000BASE-T Optical",
5261 		"20GBASE-KR2",
5262 		"Reserved (31)"
5263 	};
5264 	static char * ext_phy_types_str[4] = {
5265 		"25GBASE-KR",
5266 		"25GBASE-CR",
5267 		"25GBASE-SR",
5268 		"25GBASE-LR"
5269 	};
5270 
5271 	if (ext && bit_pos > 3) return "Invalid_Ext";
5272 	if (bit_pos > 31) return "Invalid";
5273 
5274 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5275 }
5276 
5277 int
5278 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5279 {
5280 	device_t dev = pf->dev;
5281 	struct i40e_hw *hw = &pf->hw;
5282 	struct i40e_aq_desc desc;
5283 	enum i40e_status_code status;
5284 
5285 	struct i40e_aqc_get_link_status *aq_link_status =
5286 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
5287 
5288 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5289 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5290 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5291 	if (status) {
5292 		device_printf(dev,
5293 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5294 		    __func__, i40e_stat_str(hw, status),
5295 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5296 		return (EIO);
5297 	}
5298 
5299 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5300 	return (0);
5301 }
5302 
5303 static char *
5304 ixl_phy_type_string_ls(u8 val)
5305 {
5306 	if (val >= 0x1F)
5307 		return ixl_phy_type_string(val - 0x1F, true);
5308 	else
5309 		return ixl_phy_type_string(val, false);
5310 }
5311 
5312 static int
5313 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5314 {
5315 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5316 	device_t dev = pf->dev;
5317 	struct sbuf *buf;
5318 	int error = 0;
5319 
5320 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5321 	if (!buf) {
5322 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5323 		return (ENOMEM);
5324 	}
5325 
5326 	struct i40e_aqc_get_link_status link_status;
5327 	error = ixl_aq_get_link_status(pf, &link_status);
5328 	if (error) {
5329 		sbuf_delete(buf);
5330 		return (error);
5331 	}
5332 
5333 	/* TODO: Add 25G types */
5334 	sbuf_printf(buf, "\n"
5335 	    "PHY Type : 0x%02x<%s>\n"
5336 	    "Speed    : 0x%02x\n"
5337 	    "Link info: 0x%02x\n"
5338 	    "AN info  : 0x%02x\n"
5339 	    "Ext info : 0x%02x\n"
5340 	    "Loopback : 0x%02x\n"
5341 	    "Max Frame: %d\n"
5342 	    "Config   : 0x%02x\n"
5343 	    "Power    : 0x%02x",
5344 	    link_status.phy_type,
5345 	    ixl_phy_type_string_ls(link_status.phy_type),
5346 	    link_status.link_speed,
5347 	    link_status.link_info,
5348 	    link_status.an_info,
5349 	    link_status.ext_info,
5350 	    link_status.loopback,
5351 	    link_status.max_frame_size,
5352 	    link_status.config,
5353 	    link_status.power_desc);
5354 
5355 	error = sbuf_finish(buf);
5356 	if (error)
5357 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5358 
5359 	sbuf_delete(buf);
5360 	return (error);
5361 }
5362 
5363 static int
5364 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5365 {
5366 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5367 	struct i40e_hw *hw = &pf->hw;
5368 	device_t dev = pf->dev;
5369 	enum i40e_status_code status;
5370 	struct i40e_aq_get_phy_abilities_resp abilities;
5371 	struct sbuf *buf;
5372 	int error = 0;
5373 
5374 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5375 	if (!buf) {
5376 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5377 		return (ENOMEM);
5378 	}
5379 
5380 	status = i40e_aq_get_phy_capabilities(hw,
5381 	    FALSE, FALSE, &abilities, NULL);
5382 	if (status) {
5383 		device_printf(dev,
5384 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5385 		    __func__, i40e_stat_str(hw, status),
5386 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5387 		sbuf_delete(buf);
5388 		return (EIO);
5389 	}
5390 
5391 	sbuf_printf(buf, "\n"
5392 	    "PHY Type : %08x",
5393 	    abilities.phy_type);
5394 
5395 	if (abilities.phy_type != 0) {
5396 		sbuf_printf(buf, "<");
5397 		for (int i = 0; i < 32; i++)
5398 			if ((1 << i) & abilities.phy_type)
5399 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5400 		sbuf_printf(buf, ">\n");
5401 	}
5402 
5403 	sbuf_printf(buf, "PHY Ext  : %02x",
5404 	    abilities.phy_type_ext);
5405 
5406 	if (abilities.phy_type_ext != 0) {
5407 		sbuf_printf(buf, "<");
5408 		for (int i = 0; i < 4; i++)
5409 			if ((1 << i) & abilities.phy_type_ext)
5410 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5411 		sbuf_printf(buf, ">");
5412 	}
5413 	sbuf_printf(buf, "\n");
5414 
5415 	sbuf_printf(buf,
5416 	    "Speed    : %02x\n"
5417 	    "Abilities: %02x\n"
5418 	    "EEE cap  : %04x\n"
5419 	    "EEER reg : %08x\n"
5420 	    "D3 Lpan  : %02x\n"
5421 	    "ID       : %02x %02x %02x %02x\n"
5422 	    "ModType  : %02x %02x %02x\n"
5423 	    "ModType E: %01x\n"
5424 	    "FEC Cfg  : %02x\n"
5425 	    "Ext CC   : %02x",
5426 	    abilities.link_speed,
5427 	    abilities.abilities, abilities.eee_capability,
5428 	    abilities.eeer_val, abilities.d3_lpan,
5429 	    abilities.phy_id[0], abilities.phy_id[1],
5430 	    abilities.phy_id[2], abilities.phy_id[3],
5431 	    abilities.module_type[0], abilities.module_type[1],
5432 	    abilities.module_type[2], abilities.phy_type_ext >> 5,
5433 	    abilities.phy_type_ext & 0x1F,
5434 	    abilities.ext_comp_code);
5435 
5436 	error = sbuf_finish(buf);
5437 	if (error)
5438 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5439 
5440 	sbuf_delete(buf);
5441 	return (error);
5442 }
5443 
5444 static int
5445 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5446 {
5447 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5448 	struct ixl_vsi *vsi = &pf->vsi;
5449 	struct ixl_mac_filter *f;
5450 	char *buf, *buf_i;
5451 
5452 	int error = 0;
5453 	int ftl_len = 0;
5454 	int ftl_counter = 0;
5455 	int buf_len = 0;
5456 	int entry_len = 42;
5457 
5458 	SLIST_FOREACH(f, &vsi->ftl, next) {
5459 		ftl_len++;
5460 	}
5461 
5462 	if (ftl_len < 1) {
5463 		sysctl_handle_string(oidp, "(none)", 6, req);
5464 		return (0);
5465 	}
5466 
5467 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5468 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5469 
5470 	sprintf(buf_i++, "\n");
5471 	SLIST_FOREACH(f, &vsi->ftl, next) {
5472 		sprintf(buf_i,
5473 		    MAC_FORMAT ", vlan %4d, flags %#06x",
5474 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5475 		buf_i += entry_len;
5476 		/* don't print '\n' for last entry */
5477 		if (++ftl_counter != ftl_len) {
5478 			sprintf(buf_i, "\n");
5479 			buf_i++;
5480 		}
5481 	}
5482 
5483 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5484 	if (error)
5485 		printf("sysctl error: %d\n", error);
5486 	free(buf, M_DEVBUF);
5487 	return error;
5488 }
5489 
5490 #define IXL_SW_RES_SIZE 0x14
5491 int
5492 ixl_res_alloc_cmp(const void *a, const void *b)
5493 {
5494 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5495 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5496 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5497 
5498 	return ((int)one->resource_type - (int)two->resource_type);
5499 }
5500 
5501 /*
5502  * Longest string length: 25
5503  */
5504 char *
5505 ixl_switch_res_type_string(u8 type)
5506 {
5507 	static char * ixl_switch_res_type_strings[0x14] = {
5508 		"VEB",
5509 		"VSI",
5510 		"Perfect Match MAC address",
5511 		"S-tag",
5512 		"(Reserved)",
5513 		"Multicast hash entry",
5514 		"Unicast hash entry",
5515 		"VLAN",
5516 		"VSI List entry",
5517 		"(Reserved)",
5518 		"VLAN Statistic Pool",
5519 		"Mirror Rule",
5520 		"Queue Set",
5521 		"Inner VLAN Forward filter",
5522 		"(Reserved)",
5523 		"Inner MAC",
5524 		"IP",
5525 		"GRE/VN1 Key",
5526 		"VN2 Key",
5527 		"Tunneling Port"
5528 	};
5529 
5530 	if (type < 0x14)
5531 		return ixl_switch_res_type_strings[type];
5532 	else
5533 		return "(Reserved)";
5534 }
5535 
5536 static int
5537 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5538 {
5539 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5540 	struct i40e_hw *hw = &pf->hw;
5541 	device_t dev = pf->dev;
5542 	struct sbuf *buf;
5543 	enum i40e_status_code status;
5544 	int error = 0;
5545 
5546 	u8 num_entries;
5547 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5548 
5549 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5550 	if (!buf) {
5551 		device_printf(dev, "Could not allocate sbuf for output.\n");
5552 		return (ENOMEM);
5553 	}
5554 
5555 	bzero(resp, sizeof(resp));
5556 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5557 				resp,
5558 				IXL_SW_RES_SIZE,
5559 				NULL);
5560 	if (status) {
5561 		device_printf(dev,
5562 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5563 		    __func__, i40e_stat_str(hw, status),
5564 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5565 		sbuf_delete(buf);
5566 		return (error);
5567 	}
5568 
5569 	/* Sort entries by type for display */
5570 	qsort(resp, num_entries,
5571 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5572 	    &ixl_res_alloc_cmp);
5573 
5574 	sbuf_cat(buf, "\n");
5575 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5576 	sbuf_printf(buf,
5577 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5578 	    "                          | (this)     | (all) | (this) | (all)       \n");
5579 	for (int i = 0; i < num_entries; i++) {
5580 		sbuf_printf(buf,
5581 		    "%25s | %10d   %5d   %6d   %12d",
5582 		    ixl_switch_res_type_string(resp[i].resource_type),
5583 		    resp[i].guaranteed,
5584 		    resp[i].total,
5585 		    resp[i].used,
5586 		    resp[i].total_unalloced);
5587 		if (i < num_entries - 1)
5588 			sbuf_cat(buf, "\n");
5589 	}
5590 
5591 	error = sbuf_finish(buf);
5592 	if (error)
5593 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5594 
5595 	sbuf_delete(buf);
5596 	return (error);
5597 }
5598 
5599 /*
5600 ** Caller must init and delete sbuf; this function will clear and
5601 ** finish it for caller.
5602 **
5603 ** XXX: Cannot use the SEID for this, since there is no longer a
5604 ** fixed mapping between SEID and element type.
5605 */
5606 char *
5607 ixl_switch_element_string(struct sbuf *s,
5608     struct i40e_aqc_switch_config_element_resp *element)
5609 {
5610 	sbuf_clear(s);
5611 
5612 	switch (element->element_type) {
5613 	case I40E_AQ_SW_ELEM_TYPE_MAC:
5614 		sbuf_printf(s, "MAC %3d", element->element_info);
5615 		break;
5616 	case I40E_AQ_SW_ELEM_TYPE_PF:
5617 		sbuf_printf(s, "PF  %3d", element->element_info);
5618 		break;
5619 	case I40E_AQ_SW_ELEM_TYPE_VF:
5620 		sbuf_printf(s, "VF  %3d", element->element_info);
5621 		break;
5622 	case I40E_AQ_SW_ELEM_TYPE_EMP:
5623 		sbuf_cat(s, "EMP");
5624 		break;
5625 	case I40E_AQ_SW_ELEM_TYPE_BMC:
5626 		sbuf_cat(s, "BMC");
5627 		break;
5628 	case I40E_AQ_SW_ELEM_TYPE_PV:
5629 		sbuf_cat(s, "PV");
5630 		break;
5631 	case I40E_AQ_SW_ELEM_TYPE_VEB:
5632 		sbuf_cat(s, "VEB");
5633 		break;
5634 	case I40E_AQ_SW_ELEM_TYPE_PA:
5635 		sbuf_cat(s, "PA");
5636 		break;
5637 	case I40E_AQ_SW_ELEM_TYPE_VSI:
5638 		sbuf_printf(s, "VSI %3d", element->element_info);
5639 		break;
5640 	default:
5641 		sbuf_cat(s, "?");
5642 		break;
5643 	}
5644 
5645 	sbuf_finish(s);
5646 	return sbuf_data(s);
5647 }
5648 
5649 static int
5650 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5651 {
5652 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5653 	struct i40e_hw *hw = &pf->hw;
5654 	device_t dev = pf->dev;
5655 	struct sbuf *buf;
5656 	struct sbuf *nmbuf;
5657 	enum i40e_status_code status;
5658 	int error = 0;
5659 	u16 next = 0;
5660 	u8 aq_buf[I40E_AQ_LARGE_BUF];
5661 
5662 	struct i40e_aqc_get_switch_config_resp *sw_config;
5663 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5664 
5665 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5666 	if (!buf) {
5667 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5668 		return (ENOMEM);
5669 	}
5670 
5671 	status = i40e_aq_get_switch_config(hw, sw_config,
5672 	    sizeof(aq_buf), &next, NULL);
5673 	if (status) {
5674 		device_printf(dev,
5675 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
5676 		    __func__, i40e_stat_str(hw, status),
5677 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5678 		sbuf_delete(buf);
5679 		return error;
5680 	}
5681 	if (next)
5682 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5683 		    __func__, next);
5684 
5685 	nmbuf = sbuf_new_auto();
5686 	if (!nmbuf) {
5687 		device_printf(dev, "Could not allocate sbuf for name output.\n");
5688 		sbuf_delete(buf);
5689 		return (ENOMEM);
5690 	}
5691 
5692 	sbuf_cat(buf, "\n");
5693 	/* Assuming <= 255 elements in switch */
5694 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5695 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5696 	/* Exclude:
5697 	** Revision -- all elements are revision 1 for now
5698 	*/
5699 	sbuf_printf(buf,
5700 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5701 	    "                |          |          | (uplink)\n");
5702 	for (int i = 0; i < sw_config->header.num_reported; i++) {
5703 		// "%4d (%8s) | %8s   %8s   %#8x",
5704 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5705 		sbuf_cat(buf, " ");
5706 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5707 		    &sw_config->element[i]));
5708 		sbuf_cat(buf, " | ");
5709 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5710 		sbuf_cat(buf, "   ");
5711 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5712 		sbuf_cat(buf, "   ");
5713 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5714 		if (i < sw_config->header.num_reported - 1)
5715 			sbuf_cat(buf, "\n");
5716 	}
5717 	sbuf_delete(nmbuf);
5718 
5719 	error = sbuf_finish(buf);
5720 	if (error)
5721 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5722 
5723 	sbuf_delete(buf);
5724 
5725 	return (error);
5726 }
5727 
5728 static int
5729 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5730 {
5731 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5732 	struct i40e_hw *hw = &pf->hw;
5733 	device_t dev = pf->dev;
5734 	struct sbuf *buf;
5735 	int error = 0;
5736 	enum i40e_status_code status;
5737 	u32 reg;
5738 
5739 	struct i40e_aqc_get_set_rss_key_data key_data;
5740 
5741 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5742 	if (!buf) {
5743 		device_printf(dev, "Could not allocate sbuf for output.\n");
5744 		return (ENOMEM);
5745 	}
5746 
5747 	sbuf_cat(buf, "\n");
5748 	if (hw->mac.type == I40E_MAC_X722) {
5749 		bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5750 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5751 		if (status)
5752 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5753 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5754 		sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5755 	} else {
5756 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5757 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5758 			sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5759 		}
5760 	}
5761 
5762 	error = sbuf_finish(buf);
5763 	if (error)
5764 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5765 	sbuf_delete(buf);
5766 
5767 	return (error);
5768 }
5769 
5770 static int
5771 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5772 {
5773 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5774 	struct i40e_hw *hw = &pf->hw;
5775 	device_t dev = pf->dev;
5776 	struct sbuf *buf;
5777 	int error = 0;
5778 	enum i40e_status_code status;
5779 	u8 hlut[512];
5780 	u32 reg;
5781 
5782 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5783 	if (!buf) {
5784 		device_printf(dev, "Could not allocate sbuf for output.\n");
5785 		return (ENOMEM);
5786 	}
5787 
5788 	sbuf_cat(buf, "\n");
5789 	if (hw->mac.type == I40E_MAC_X722) {
5790 		bzero(hlut, sizeof(hlut));
5791 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5792 		if (status)
5793 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5794 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5795 		sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5796 	} else {
5797 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5798 			reg = rd32(hw, I40E_PFQF_HLUT(i));
5799 			sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5800 		}
5801 	}
5802 
5803 	error = sbuf_finish(buf);
5804 	if (error)
5805 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5806 	sbuf_delete(buf);
5807 
5808 	return (error);
5809 }
5810 
5811 static int
5812 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
5813 {
5814 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5815 	struct i40e_hw *hw = &pf->hw;
5816 	u64 hena;
5817 
5818 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
5819 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
5820 
5821 	return sysctl_handle_long(oidp, NULL, hena, req);
5822 }
5823 
5824 /*
5825  * Sysctl to disable firmware's link management
5826  *
5827  * 1 - Disable link management on this port
5828  * 0 - Re-enable link management
5829  *
5830  * On normal NVMs, firmware manages link by default.
5831  */
5832 static int
5833 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
5834 {
5835 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5836 	struct i40e_hw *hw = &pf->hw;
5837 	device_t dev = pf->dev;
5838 	int requested_mode = -1;
5839 	enum i40e_status_code status = 0;
5840 	int error = 0;
5841 
5842 	/* Read in new mode */
5843 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
5844 	if ((error) || (req->newptr == NULL))
5845 		return (error);
5846 	/* Check for sane value */
5847 	if (requested_mode < 0 || requested_mode > 1) {
5848 		device_printf(dev, "Valid modes are 0 or 1\n");
5849 		return (EINVAL);
5850 	}
5851 
5852 	/* Set new mode */
5853 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
5854 	if (status) {
5855 		device_printf(dev,
5856 		    "%s: Error setting new phy debug mode %s,"
5857 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
5858 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5859 		return (EIO);
5860 	}
5861 
5862 	return (0);
5863 }
5864 
5865 /*
5866  * Sysctl to read a byte from I2C bus.
5867  *
5868  * Input: 32-bit value:
5869  * 	bits 0-7:   device address (0xA0 or 0xA2)
5870  * 	bits 8-15:  offset (0-255)
5871  *	bits 16-31: unused
5872  * Output: 8-bit value read
5873  */
5874 static int
5875 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
5876 {
5877 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5878 	device_t dev = pf->dev;
5879 	int input = -1, error = 0;
5880 
5881 	device_printf(dev, "%s: start\n", __func__);
5882 
5883 	u8 dev_addr, offset, output;
5884 
5885 	/* Read in I2C read parameters */
5886 	error = sysctl_handle_int(oidp, &input, 0, req);
5887 	if ((error) || (req->newptr == NULL))
5888 		return (error);
5889 	/* Validate device address */
5890 	dev_addr = input & 0xFF;
5891 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5892 		return (EINVAL);
5893 	}
5894 	offset = (input >> 8) & 0xFF;
5895 
5896 	error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
5897 	if (error)
5898 		return (error);
5899 
5900 	device_printf(dev, "%02X\n", output);
5901 	return (0);
5902 }
5903 
5904 /*
5905  * Sysctl to write a byte to the I2C bus.
5906  *
5907  * Input: 32-bit value:
5908  * 	bits 0-7:   device address (0xA0 or 0xA2)
5909  * 	bits 8-15:  offset (0-255)
5910  *	bits 16-23: value to write
5911  *	bits 24-31: unused
5912  * Output: 8-bit value written
5913  */
5914 static int
5915 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
5916 {
5917 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5918 	device_t dev = pf->dev;
5919 	int input = -1, error = 0;
5920 
5921 	u8 dev_addr, offset, value;
5922 
5923 	/* Read in I2C write parameters */
5924 	error = sysctl_handle_int(oidp, &input, 0, req);
5925 	if ((error) || (req->newptr == NULL))
5926 		return (error);
5927 	/* Validate device address */
5928 	dev_addr = input & 0xFF;
5929 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5930 		return (EINVAL);
5931 	}
5932 	offset = (input >> 8) & 0xFF;
5933 	value = (input >> 16) & 0xFF;
5934 
5935 	error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
5936 	if (error)
5937 		return (error);
5938 
5939 	device_printf(dev, "%02X written\n", value);
5940 	return (0);
5941 }
5942 
5943 static int
5944 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5945     u8 bit_pos, int *is_set)
5946 {
5947 	device_t dev = pf->dev;
5948 	struct i40e_hw *hw = &pf->hw;
5949 	enum i40e_status_code status;
5950 
5951 	status = i40e_aq_get_phy_capabilities(hw,
5952 	    FALSE, FALSE, abilities, NULL);
5953 	if (status) {
5954 		device_printf(dev,
5955 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5956 		    __func__, i40e_stat_str(hw, status),
5957 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5958 		return (EIO);
5959 	}
5960 
5961 	*is_set = !!(abilities->phy_type_ext & bit_pos);
5962 	return (0);
5963 }
5964 
5965 static int
5966 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5967     u8 bit_pos, int set)
5968 {
5969 	device_t dev = pf->dev;
5970 	struct i40e_hw *hw = &pf->hw;
5971 	struct i40e_aq_set_phy_config config;
5972 	enum i40e_status_code status;
5973 
5974 	/* Set new PHY config */
5975 	memset(&config, 0, sizeof(config));
5976 	config.fec_config = abilities->phy_type_ext & ~(bit_pos);
5977 	if (set)
5978 		config.fec_config |= bit_pos;
5979 	if (config.fec_config != abilities->phy_type_ext) {
5980 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5981 		config.phy_type = abilities->phy_type;
5982 		config.phy_type_ext = abilities->phy_type_ext;
5983 		config.link_speed = abilities->link_speed;
5984 		config.eee_capability = abilities->eee_capability;
5985 		config.eeer = abilities->eeer_val;
5986 		config.low_power_ctrl = abilities->d3_lpan;
5987 		status = i40e_aq_set_phy_config(hw, &config, NULL);
5988 
5989 		if (status) {
5990 			device_printf(dev,
5991 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
5992 			    __func__, i40e_stat_str(hw, status),
5993 			    i40e_aq_str(hw, hw->aq.asq_last_status));
5994 			return (EIO);
5995 		}
5996 	}
5997 
5998 	return (0);
5999 }
6000 
6001 static int
6002 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6003 {
6004 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6005 	int mode, error = 0;
6006 
6007 	struct i40e_aq_get_phy_abilities_resp abilities;
6008 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
6009 	if (error)
6010 		return (error);
6011 	/* Read in new mode */
6012 	error = sysctl_handle_int(oidp, &mode, 0, req);
6013 	if ((error) || (req->newptr == NULL))
6014 		return (error);
6015 
6016 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6017 }
6018 
6019 static int
6020 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6021 {
6022 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6023 	int mode, error = 0;
6024 
6025 	struct i40e_aq_get_phy_abilities_resp abilities;
6026 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
6027 	if (error)
6028 		return (error);
6029 	/* Read in new mode */
6030 	error = sysctl_handle_int(oidp, &mode, 0, req);
6031 	if ((error) || (req->newptr == NULL))
6032 		return (error);
6033 
6034 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6035 }
6036 
6037 static int
6038 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6039 {
6040 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6041 	int mode, error = 0;
6042 
6043 	struct i40e_aq_get_phy_abilities_resp abilities;
6044 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
6045 	if (error)
6046 		return (error);
6047 	/* Read in new mode */
6048 	error = sysctl_handle_int(oidp, &mode, 0, req);
6049 	if ((error) || (req->newptr == NULL))
6050 		return (error);
6051 
6052 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6053 }
6054 
6055 static int
6056 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6057 {
6058 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6059 	int mode, error = 0;
6060 
6061 	struct i40e_aq_get_phy_abilities_resp abilities;
6062 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
6063 	if (error)
6064 		return (error);
6065 	/* Read in new mode */
6066 	error = sysctl_handle_int(oidp, &mode, 0, req);
6067 	if ((error) || (req->newptr == NULL))
6068 		return (error);
6069 
6070 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6071 }
6072 
6073 static int
6074 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6075 {
6076 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6077 	int mode, error = 0;
6078 
6079 	struct i40e_aq_get_phy_abilities_resp abilities;
6080 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
6081 	if (error)
6082 		return (error);
6083 	/* Read in new mode */
6084 	error = sysctl_handle_int(oidp, &mode, 0, req);
6085 	if ((error) || (req->newptr == NULL))
6086 		return (error);
6087 
6088 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6089 }
6090 
6091