xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision af6a5351a1fdb1130f18be6c782c4d48916eb971)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 #ifdef DEV_NETMAP
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
52 
53 static int	ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
54 static u64	ixl_max_aq_speed_to_value(u8);
55 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
56 
57 /* Sysctls */
58 static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
59 static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
60 static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
61 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
65 
66 /* Debug Sysctls */
67 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
83 #ifdef IXL_DEBUG
84 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
86 #endif
87 
88 #ifdef IXL_IW
89 extern int ixl_enable_iwarp;
90 #endif
91 
92 void
93 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
94 {
95 	va_list args;
96 
97 	if (!(mask & pf->dbg_mask))
98 		return;
99 
100 	/* Re-implement device_printf() */
101 	device_print_prettyname(pf->dev);
102 	va_start(args, fmt);
103 	vprintf(fmt, args);
104 	va_end(args);
105 }
106 
107 /*
108 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
109 */
110 void
111 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
112 {
113 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
114 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
115 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
116 
117 	sbuf_printf(buf,
118 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
119 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
120 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
121 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
122 	    IXL_NVM_VERSION_HI_SHIFT,
123 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
124 	    IXL_NVM_VERSION_LO_SHIFT,
125 	    hw->nvm.eetrack,
126 	    oem_ver, oem_build, oem_patch);
127 }
128 
129 void
130 ixl_print_nvm_version(struct ixl_pf *pf)
131 {
132 	struct i40e_hw *hw = &pf->hw;
133 	device_t dev = pf->dev;
134 	struct sbuf *sbuf;
135 
136 	sbuf = sbuf_new_auto();
137 	ixl_nvm_version_str(hw, sbuf);
138 	sbuf_finish(sbuf);
139 	device_printf(dev, "%s\n", sbuf_data(sbuf));
140 	sbuf_delete(sbuf);
141 }
142 
143 static void
144 ixl_configure_tx_itr(struct ixl_pf *pf)
145 {
146 	struct i40e_hw		*hw = &pf->hw;
147 	struct ixl_vsi		*vsi = &pf->vsi;
148 	struct ixl_queue	*que = vsi->queues;
149 
150 	vsi->tx_itr_setting = pf->tx_itr;
151 
152 	for (int i = 0; i < vsi->num_queues; i++, que++) {
153 		struct tx_ring	*txr = &que->txr;
154 
155 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
156 		    vsi->tx_itr_setting);
157 		txr->itr = vsi->tx_itr_setting;
158 		txr->latency = IXL_AVE_LATENCY;
159 	}
160 }
161 
162 static void
163 ixl_configure_rx_itr(struct ixl_pf *pf)
164 {
165 	struct i40e_hw		*hw = &pf->hw;
166 	struct ixl_vsi		*vsi = &pf->vsi;
167 	struct ixl_queue	*que = vsi->queues;
168 
169 	vsi->rx_itr_setting = pf->rx_itr;
170 
171 	for (int i = 0; i < vsi->num_queues; i++, que++) {
172 		struct rx_ring 	*rxr = &que->rxr;
173 
174 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
175 		    vsi->rx_itr_setting);
176 		rxr->itr = vsi->rx_itr_setting;
177 		rxr->latency = IXL_AVE_LATENCY;
178 	}
179 }
180 
181 /*
182  * Write PF ITR values to queue ITR registers.
183  */
184 void
185 ixl_configure_itr(struct ixl_pf *pf)
186 {
187 	ixl_configure_tx_itr(pf);
188 	ixl_configure_rx_itr(pf);
189 }
190 
191 
192 /*********************************************************************
193  *  Init entry point
194  *
195  *  This routine is used in two ways. It is used by the stack as
196  *  init entry point in network interface structure. It is also used
197  *  by the driver as a hw/sw initialization routine to get to a
198  *  consistent state.
199  *
200  *  return 0 on success, positive on failure
201  **********************************************************************/
202 void
203 ixl_init_locked(struct ixl_pf *pf)
204 {
205 	struct i40e_hw	*hw = &pf->hw;
206 	struct ixl_vsi	*vsi = &pf->vsi;
207 	struct ifnet	*ifp = vsi->ifp;
208 	device_t 	dev = pf->dev;
209 	struct i40e_filter_control_settings	filter;
210 	u8		tmpaddr[ETHER_ADDR_LEN];
211 	int		ret;
212 
213 	INIT_DEBUGOUT("ixl_init_locked: begin");
214 	IXL_PF_LOCK_ASSERT(pf);
215 
216 	ixl_stop_locked(pf);
217 
218 	/*
219 	 * If the aq is dead here, it probably means something outside of the driver
220 	 * did something to the adapter, like a PF reset.
221 	 * So rebuild the driver's state here if that occurs.
222 	 */
223 	if (!i40e_check_asq_alive(&pf->hw)) {
224 		device_printf(dev, "Admin Queue is down; resetting...\n");
225 		ixl_teardown_hw_structs(pf);
226 		ixl_reset(pf);
227 	}
228 
229 	/* Get the latest mac address... User might use a LAA */
230 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
231 	      I40E_ETH_LENGTH_OF_ADDRESS);
232 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
233 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
234 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
235 		bcopy(tmpaddr, hw->mac.addr,
236 		    I40E_ETH_LENGTH_OF_ADDRESS);
237 		ret = i40e_aq_mac_address_write(hw,
238 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
239 		    hw->mac.addr, NULL);
240 		if (ret) {
241 			device_printf(dev, "LLA address"
242 			 "change failed!!\n");
243 			return;
244 		}
245 	}
246 
247 	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
248 
249 	/* Set the various hardware offload abilities */
250 	ifp->if_hwassist = 0;
251 	if (ifp->if_capenable & IFCAP_TSO)
252 		ifp->if_hwassist |= CSUM_TSO;
253 	if (ifp->if_capenable & IFCAP_TXCSUM)
254 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
255 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
256 		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
257 
258 	/* Set up the device filtering */
259 	bzero(&filter, sizeof(filter));
260 	filter.enable_ethtype = TRUE;
261 	filter.enable_macvlan = TRUE;
262 	filter.enable_fdir = FALSE;
263 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
264 	if (i40e_set_filter_control(hw, &filter))
265 		device_printf(dev, "i40e_set_filter_control() failed\n");
266 
267 	/* Prepare the VSI: rings, hmc contexts, etc... */
268 	if (ixl_initialize_vsi(vsi)) {
269 		device_printf(dev, "initialize vsi failed!!\n");
270 		return;
271 	}
272 
273 	/* Set up RSS */
274 	ixl_config_rss(pf);
275 
276 	/* Add protocol filters to list */
277 	ixl_init_filters(vsi);
278 
279 	/* Setup vlan's if needed */
280 	ixl_setup_vlan_filters(vsi);
281 
282 	/* Set up MSI/X routing and the ITR settings */
283 	if (pf->msix > 1) {
284 		ixl_configure_queue_intr_msix(pf);
285 		ixl_configure_itr(pf);
286 	} else
287 		ixl_configure_legacy(pf);
288 
289 	ixl_enable_rings(vsi);
290 
291 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
292 
293 	ixl_reconfigure_filters(vsi);
294 
295 	/* And now turn on interrupts */
296 	ixl_enable_intr(vsi);
297 
298 	/* Get link info */
299 	hw->phy.get_link_info = TRUE;
300 	i40e_get_link_status(hw, &pf->link_up);
301 	ixl_update_link_status(pf);
302 
303 	/* Start the local timer */
304 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
305 
306 	/* Now inform the stack we're ready */
307 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
308 
309 #ifdef IXL_IW
310 	if (ixl_enable_iwarp && pf->iw_enabled) {
311 		ret = ixl_iw_pf_init(pf);
312 		if (ret)
313 			device_printf(dev,
314 			    "initialize iwarp failed, code %d\n", ret);
315 	}
316 #endif
317 
318 }
319 
320 
321 /*********************************************************************
322  *
323  *  Get the hardware capabilities
324  *
325  **********************************************************************/
326 
327 int
328 ixl_get_hw_capabilities(struct ixl_pf *pf)
329 {
330 	struct i40e_aqc_list_capabilities_element_resp *buf;
331 	struct i40e_hw	*hw = &pf->hw;
332 	device_t 	dev = pf->dev;
333 	int             error, len;
334 	u16		needed;
335 	bool		again = TRUE;
336 
337 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
338 retry:
339 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
340 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
341 		device_printf(dev, "Unable to allocate cap memory\n");
342                 return (ENOMEM);
343 	}
344 
345 	/* This populates the hw struct */
346         error = i40e_aq_discover_capabilities(hw, buf, len,
347 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
348 	free(buf, M_DEVBUF);
349 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
350 	    (again == TRUE)) {
351 		/* retry once with a larger buffer */
352 		again = FALSE;
353 		len = needed;
354 		goto retry;
355 	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
356 		device_printf(dev, "capability discovery failed: %d\n",
357 		    pf->hw.aq.asq_last_status);
358 		return (ENODEV);
359 	}
360 
361 	/* Capture this PF's starting queue pair */
362 	pf->qbase = hw->func_caps.base_queue;
363 
364 #ifdef IXL_DEBUG
365 	device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
366 	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
367 	    hw->pf_id, hw->func_caps.num_vfs,
368 	    hw->func_caps.num_msix_vectors,
369 	    hw->func_caps.num_msix_vectors_vf,
370 	    hw->func_caps.fd_filters_guaranteed,
371 	    hw->func_caps.fd_filters_best_effort,
372 	    hw->func_caps.num_tx_qp,
373 	    hw->func_caps.num_rx_qp,
374 	    hw->func_caps.base_queue);
375 #endif
376 	/* Print a subset of the capability information. */
377 	device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
378 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
379 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
380 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
381 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
382 	    "MDIO shared");
383 
384 	struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
385 	osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
386 	if (osdep->i2c_intfc_num != -1)
387 		pf->has_i2c = true;
388 
389 	return (error);
390 }
391 
392 void
393 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
394 {
395 	device_t 	dev = vsi->dev;
396 
397 	/* Enable/disable TXCSUM/TSO4 */
398 	if (!(ifp->if_capenable & IFCAP_TXCSUM)
399 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
400 		if (mask & IFCAP_TXCSUM) {
401 			ifp->if_capenable |= IFCAP_TXCSUM;
402 			/* enable TXCSUM, restore TSO if previously enabled */
403 			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
404 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
405 				ifp->if_capenable |= IFCAP_TSO4;
406 			}
407 		}
408 		else if (mask & IFCAP_TSO4) {
409 			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
410 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
411 			device_printf(dev,
412 			    "TSO4 requires txcsum, enabling both...\n");
413 		}
414 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
415 	    && !(ifp->if_capenable & IFCAP_TSO4)) {
416 		if (mask & IFCAP_TXCSUM)
417 			ifp->if_capenable &= ~IFCAP_TXCSUM;
418 		else if (mask & IFCAP_TSO4)
419 			ifp->if_capenable |= IFCAP_TSO4;
420 	} else if((ifp->if_capenable & IFCAP_TXCSUM)
421 	    && (ifp->if_capenable & IFCAP_TSO4)) {
422 		if (mask & IFCAP_TXCSUM) {
423 			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
424 			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
425 			device_printf(dev,
426 			    "TSO4 requires txcsum, disabling both...\n");
427 		} else if (mask & IFCAP_TSO4)
428 			ifp->if_capenable &= ~IFCAP_TSO4;
429 	}
430 
431 	/* Enable/disable TXCSUM_IPV6/TSO6 */
432 	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
433 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
434 		if (mask & IFCAP_TXCSUM_IPV6) {
435 			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
436 			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
437 				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
438 				ifp->if_capenable |= IFCAP_TSO6;
439 			}
440 		} else if (mask & IFCAP_TSO6) {
441 			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
442 			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
443 			device_printf(dev,
444 			    "TSO6 requires txcsum6, enabling both...\n");
445 		}
446 	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
447 	    && !(ifp->if_capenable & IFCAP_TSO6)) {
448 		if (mask & IFCAP_TXCSUM_IPV6)
449 			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
450 		else if (mask & IFCAP_TSO6)
451 			ifp->if_capenable |= IFCAP_TSO6;
452 	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
453 	    && (ifp->if_capenable & IFCAP_TSO6)) {
454 		if (mask & IFCAP_TXCSUM_IPV6) {
455 			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
456 			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
457 			device_printf(dev,
458 			    "TSO6 requires txcsum6, disabling both...\n");
459 		} else if (mask & IFCAP_TSO6)
460 			ifp->if_capenable &= ~IFCAP_TSO6;
461 	}
462 }
463 
464 /* For the set_advertise sysctl */
465 void
466 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
467 {
468 	struct i40e_hw *hw = &pf->hw;
469 	device_t dev = pf->dev;
470 	enum i40e_status_code status;
471 	struct i40e_aq_get_phy_abilities_resp abilities;
472 
473 	/* Set initial sysctl values */
474 	status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
475 					      NULL);
476 	if (status) {
477 		/* Non-fatal error */
478 		device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
479 		     __func__, status);
480 		return;
481 	}
482 
483 	pf->advertised_speed =
484 	    ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
485 }
486 
487 int
488 ixl_teardown_hw_structs(struct ixl_pf *pf)
489 {
490 	enum i40e_status_code status = 0;
491 	struct i40e_hw *hw = &pf->hw;
492 	device_t dev = pf->dev;
493 
494 	/* Shutdown LAN HMC */
495 	if (hw->hmc.hmc_obj) {
496 		status = i40e_shutdown_lan_hmc(hw);
497 		if (status) {
498 			device_printf(dev,
499 			    "init: LAN HMC shutdown failure; status %d\n", status);
500 			goto err_out;
501 		}
502 	}
503 
504 	// XXX: This gets called when we know the adminq is inactive;
505 	// so we already know it's setup when we get here.
506 
507 	/* Shutdown admin queue */
508 	status = i40e_shutdown_adminq(hw);
509 	if (status)
510 		device_printf(dev,
511 		    "init: Admin Queue shutdown failure; status %d\n", status);
512 
513 err_out:
514 	return (status);
515 }
516 
517 int
518 ixl_reset(struct ixl_pf *pf)
519 {
520 	struct i40e_hw *hw = &pf->hw;
521 	device_t dev = pf->dev;
522 	u8 set_fc_err_mask;
523 	int error = 0;
524 
525 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
526 	i40e_clear_hw(hw);
527 	error = i40e_pf_reset(hw);
528 	if (error) {
529 		device_printf(dev, "init: PF reset failure");
530 		error = EIO;
531 		goto err_out;
532 	}
533 
534 	error = i40e_init_adminq(hw);
535 	if (error) {
536 		device_printf(dev, "init: Admin queue init failure;"
537 		    " status code %d", error);
538 		error = EIO;
539 		goto err_out;
540 	}
541 
542 	i40e_clear_pxe_mode(hw);
543 
544 	error = ixl_get_hw_capabilities(pf);
545 	if (error) {
546 		device_printf(dev, "init: Error retrieving HW capabilities;"
547 		    " status code %d\n", error);
548 		goto err_out;
549 	}
550 
551 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
552 	    hw->func_caps.num_rx_qp, 0, 0);
553 	if (error) {
554 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
555 		    error);
556 		error = EIO;
557 		goto err_out;
558 	}
559 
560 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
561 	if (error) {
562 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
563 		    error);
564 		error = EIO;
565 		goto err_out;
566 	}
567 
568 	// XXX: possible fix for panic, but our failure recovery is still broken
569 	error = ixl_switch_config(pf);
570 	if (error) {
571 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
572 		     error);
573 		goto err_out;
574 	}
575 
576 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
577 	    NULL);
578         if (error) {
579 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
580 		    " aq_err %d\n", error, hw->aq.asq_last_status);
581 		error = EIO;
582 		goto err_out;
583 	}
584 
585 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
586 	if (error) {
587 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
588 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
589 		goto err_out;
590 	}
591 
592 	// XXX: (Rebuild VSIs?)
593 
594 	/* Firmware delay workaround */
595 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
596 	    (hw->aq.fw_maj_ver < 4)) {
597 		i40e_msec_delay(75);
598 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
599 		if (error) {
600 			device_printf(dev, "init: link restart failed, aq_err %d\n",
601 			    hw->aq.asq_last_status);
602 			goto err_out;
603 		}
604 	}
605 
606 
607 err_out:
608 	return (error);
609 }
610 
611 /*
612 ** MSIX Interrupt Handlers and Tasklets
613 */
614 void
615 ixl_handle_que(void *context, int pending)
616 {
617 	struct ixl_queue *que = context;
618 	struct ixl_vsi *vsi = que->vsi;
619 	struct i40e_hw  *hw = vsi->hw;
620 	struct tx_ring  *txr = &que->txr;
621 	struct ifnet    *ifp = vsi->ifp;
622 	bool		more;
623 
624 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
625 		more = ixl_rxeof(que, IXL_RX_LIMIT);
626 		IXL_TX_LOCK(txr);
627 		ixl_txeof(que);
628 		if (!drbr_empty(ifp, txr->br))
629 			ixl_mq_start_locked(ifp, txr);
630 		IXL_TX_UNLOCK(txr);
631 		if (more) {
632 			taskqueue_enqueue(que->tq, &que->task);
633 			return;
634 		}
635 	}
636 
637 	/* Reenable this interrupt - hmmm */
638 	ixl_enable_queue(hw, que->me);
639 	return;
640 }
641 
642 
643 /*********************************************************************
644  *
645  *  Legacy Interrupt Service routine
646  *
647  **********************************************************************/
648 void
649 ixl_intr(void *arg)
650 {
651 	struct ixl_pf		*pf = arg;
652 	struct i40e_hw		*hw =  &pf->hw;
653 	struct ixl_vsi		*vsi = &pf->vsi;
654 	struct ixl_queue	*que = vsi->queues;
655 	struct ifnet		*ifp = vsi->ifp;
656 	struct tx_ring		*txr = &que->txr;
657         u32			icr0;
658 	bool			more_tx, more_rx;
659 
660 	pf->admin_irq++;
661 
662 	/* Protect against spurious interrupts */
663 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
664 		return;
665 
666 	icr0 = rd32(hw, I40E_PFINT_ICR0);
667 
668 
669 #ifdef PCI_IOV
670 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
671 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
672 #endif
673 
674 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
675 		taskqueue_enqueue(pf->tq, &pf->adminq);
676 	}
677 
678 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
679 		++que->irqs;
680 
681 		more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
682 
683 		IXL_TX_LOCK(txr);
684 		more_tx = ixl_txeof(que);
685 		if (!drbr_empty(vsi->ifp, txr->br))
686 			more_tx = 1;
687 		IXL_TX_UNLOCK(txr);
688 	}
689 
690 	ixl_enable_intr0(hw);
691 }
692 
693 
694 /*********************************************************************
695  *
696  *  MSIX VSI Interrupt Service routine
697  *
698  **********************************************************************/
699 void
700 ixl_msix_que(void *arg)
701 {
702 	struct ixl_queue	*que = arg;
703 	struct ixl_vsi	*vsi = que->vsi;
704 	struct i40e_hw	*hw = vsi->hw;
705 	struct tx_ring	*txr = &que->txr;
706 	bool		more_tx, more_rx;
707 
708 	/* Protect against spurious interrupts */
709 	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
710 		return;
711 
712 	++que->irqs;
713 
714 	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
715 
716 	IXL_TX_LOCK(txr);
717 	more_tx = ixl_txeof(que);
718 	/*
719 	** Make certain that if the stack
720 	** has anything queued the task gets
721 	** scheduled to handle it.
722 	*/
723 	if (!drbr_empty(vsi->ifp, txr->br))
724 		more_tx = 1;
725 	IXL_TX_UNLOCK(txr);
726 
727 	ixl_set_queue_rx_itr(que);
728 	ixl_set_queue_tx_itr(que);
729 
730 	if (more_tx || more_rx)
731 		taskqueue_enqueue(que->tq, &que->task);
732 	else
733 		ixl_enable_queue(hw, que->me);
734 
735 	return;
736 }
737 
738 
739 /*********************************************************************
740  *
741  *  MSIX Admin Queue Interrupt Service routine
742  *
743  **********************************************************************/
744 void
745 ixl_msix_adminq(void *arg)
746 {
747 	struct ixl_pf	*pf = arg;
748 	struct i40e_hw	*hw = &pf->hw;
749 	device_t	dev = pf->dev;
750 	u32		reg, mask, rstat_reg;
751 	bool		do_task = FALSE;
752 
753 	++pf->admin_irq;
754 
755 	reg = rd32(hw, I40E_PFINT_ICR0);
756 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
757 
758 	/* Check on the cause */
759 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
760 		mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
761 		do_task = TRUE;
762 	}
763 
764 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
765 		ixl_handle_mdd_event(pf);
766 		mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
767 	}
768 
769 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
770 		device_printf(dev, "Reset Requested!\n");
771 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
772 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
773 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
774 		device_printf(dev, "Reset type: ");
775 		switch (rstat_reg) {
776 		/* These others might be handled similarly to an EMPR reset */
777 		case I40E_RESET_CORER:
778 			printf("CORER\n");
779 			break;
780 		case I40E_RESET_GLOBR:
781 			printf("GLOBR\n");
782 			break;
783 		case I40E_RESET_EMPR:
784 			printf("EMPR\n");
785 			atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
786 			break;
787 		default:
788 			printf("POR\n");
789 			break;
790 		}
791 		/* overload admin queue task to check reset progress */
792 		do_task = TRUE;
793 	}
794 
795 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
796 		device_printf(dev, "ECC Error detected!\n");
797 	}
798 
799 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
800 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
801 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
802 			device_printf(dev, "HMC Error detected!\n");
803 			device_printf(dev, "INFO 0x%08x\n", reg);
804 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
805 			device_printf(dev, "DATA 0x%08x\n", reg);
806 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
807 		}
808 	}
809 
810 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
811 		device_printf(dev, "PCI Exception detected!\n");
812 	}
813 
814 #ifdef PCI_IOV
815 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
816 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
817 		taskqueue_enqueue(pf->tq, &pf->vflr_task);
818 	}
819 #endif
820 
821 	if (do_task)
822 		taskqueue_enqueue(pf->tq, &pf->adminq);
823 	else
824 		ixl_enable_intr0(hw);
825 }
826 
827 void
828 ixl_set_promisc(struct ixl_vsi *vsi)
829 {
830 	struct ifnet	*ifp = vsi->ifp;
831 	struct i40e_hw	*hw = vsi->hw;
832 	int		err, mcnt = 0;
833 	bool		uni = FALSE, multi = FALSE;
834 
835 	if (ifp->if_flags & IFF_ALLMULTI)
836                 multi = TRUE;
837 	else { /* Need to count the multicast addresses */
838 		struct  ifmultiaddr *ifma;
839 		if_maddr_rlock(ifp);
840 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
841                         if (ifma->ifma_addr->sa_family != AF_LINK)
842                                 continue;
843                         if (mcnt == MAX_MULTICAST_ADDR)
844                                 break;
845                         mcnt++;
846 		}
847 		if_maddr_runlock(ifp);
848 	}
849 
850 	if (mcnt >= MAX_MULTICAST_ADDR)
851                 multi = TRUE;
852         if (ifp->if_flags & IFF_PROMISC)
853 		uni = TRUE;
854 
855 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
856 	    vsi->seid, uni, NULL, TRUE);
857 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
858 	    vsi->seid, multi, NULL);
859 	return;
860 }
861 
862 /*********************************************************************
863  * 	Filter Routines
864  *
865  *	Routines for multicast and vlan filter management.
866  *
867  *********************************************************************/
868 void
869 ixl_add_multi(struct ixl_vsi *vsi)
870 {
871 	struct	ifmultiaddr	*ifma;
872 	struct ifnet		*ifp = vsi->ifp;
873 	struct i40e_hw		*hw = vsi->hw;
874 	int			mcnt = 0, flags;
875 
876 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
877 
878 	if_maddr_rlock(ifp);
879 	/*
880 	** First just get a count, to decide if we
881 	** we simply use multicast promiscuous.
882 	*/
883 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
884 		if (ifma->ifma_addr->sa_family != AF_LINK)
885 			continue;
886 		mcnt++;
887 	}
888 	if_maddr_runlock(ifp);
889 
890 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
891 		/* delete existing MC filters */
892 		ixl_del_hw_filters(vsi, mcnt);
893 		i40e_aq_set_vsi_multicast_promiscuous(hw,
894 		    vsi->seid, TRUE, NULL);
895 		return;
896 	}
897 
898 	mcnt = 0;
899 	if_maddr_rlock(ifp);
900 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
901 		if (ifma->ifma_addr->sa_family != AF_LINK)
902 			continue;
903 		ixl_add_mc_filter(vsi,
904 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
905 		mcnt++;
906 	}
907 	if_maddr_runlock(ifp);
908 	if (mcnt > 0) {
909 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
910 		ixl_add_hw_filters(vsi, flags, mcnt);
911 	}
912 
913 	IOCTL_DEBUGOUT("ixl_add_multi: end");
914 	return;
915 }
916 
917 void
918 ixl_del_multi(struct ixl_vsi *vsi)
919 {
920 	struct ifnet		*ifp = vsi->ifp;
921 	struct ifmultiaddr	*ifma;
922 	struct ixl_mac_filter	*f;
923 	int			mcnt = 0;
924 	bool		match = FALSE;
925 
926 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
927 
928 	/* Search for removed multicast addresses */
929 	if_maddr_rlock(ifp);
930 	SLIST_FOREACH(f, &vsi->ftl, next) {
931 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
932 			match = FALSE;
933 			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
934 				if (ifma->ifma_addr->sa_family != AF_LINK)
935 					continue;
936 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
937 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
938 					match = TRUE;
939 					break;
940 				}
941 			}
942 			if (match == FALSE) {
943 				f->flags |= IXL_FILTER_DEL;
944 				mcnt++;
945 			}
946 		}
947 	}
948 	if_maddr_runlock(ifp);
949 
950 	if (mcnt > 0)
951 		ixl_del_hw_filters(vsi, mcnt);
952 }
953 
954 
955 /*********************************************************************
956  *  Timer routine
957  *
958  *  This routine checks for link status,updates statistics,
959  *  and runs the watchdog check.
960  *
961  *  Only runs when the driver is configured UP and RUNNING.
962  *
963  **********************************************************************/
964 
965 void
966 ixl_local_timer(void *arg)
967 {
968 	struct ixl_pf		*pf = arg;
969 	struct i40e_hw		*hw = &pf->hw;
970 	struct ixl_vsi		*vsi = &pf->vsi;
971 	struct ixl_queue	*que = vsi->queues;
972 	device_t		dev = pf->dev;
973 	struct tx_ring		*txr;
974 	int			hung = 0;
975 	u32			mask;
976 	s32			timer, new_timer;
977 
978 	IXL_PF_LOCK_ASSERT(pf);
979 
980 	/* Fire off the adminq task */
981 	taskqueue_enqueue(pf->tq, &pf->adminq);
982 
983 	/* Update stats */
984 	ixl_update_stats_counters(pf);
985 
986 	/* Check status of the queues */
987 	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
988 		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
989 		I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
990 
991 	for (int i = 0; i < vsi->num_queues; i++, que++) {
992 		txr = &que->txr;
993 		timer = atomic_load_acq_32(&txr->watchdog_timer);
994 		if (timer > 0) {
995 			new_timer = timer - hz;
996 			if (new_timer <= 0) {
997 				atomic_store_rel_32(&txr->watchdog_timer, -1);
998 				device_printf(dev, "WARNING: queue %d "
999 				    "appears to be hung!\n", que->me);
1000 				++hung;
1001 			} else {
1002 				/*
1003 				 * If this fails, that means something in the TX path has updated
1004 				 * the watchdog, so it means the TX path is still working and
1005 				 * the watchdog doesn't need to countdown.
1006 				 */
1007 				atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
1008 				/* Any queues with outstanding work get a sw irq */
1009 				wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1010 			}
1011 		}
1012 	}
1013 	/* Reset when a queue shows hung */
1014 	if (hung)
1015 		goto hung;
1016 
1017 	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1018 	return;
1019 
1020 hung:
1021 	device_printf(dev, "WARNING: Resetting!\n");
1022 	pf->watchdog_events++;
1023 	ixl_init_locked(pf);
1024 }
1025 
1026 void
1027 ixl_link_up_msg(struct ixl_pf *pf)
1028 {
1029 	struct i40e_hw *hw = &pf->hw;
1030 	struct ifnet *ifp = pf->vsi.ifp;
1031 
1032 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
1033 	    ifp->if_xname,
1034 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1035 	    (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
1036 		"Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
1037 		"Clause 108 RS-FEC" : "None",
1038 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1039 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1040 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1041 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1042 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1043 		ixl_fc_string[1] : ixl_fc_string[0]);
1044 }
1045 
1046 /*
1047 ** Note: this routine updates the OS on the link state
1048 **	the real check of the hardware only happens with
1049 **	a link interrupt.
1050 */
1051 void
1052 ixl_update_link_status(struct ixl_pf *pf)
1053 {
1054 	struct ixl_vsi		*vsi = &pf->vsi;
1055 	struct ifnet		*ifp = vsi->ifp;
1056 	device_t		dev = pf->dev;
1057 
1058 	if (pf->link_up) {
1059 		if (vsi->link_active == FALSE) {
1060 			vsi->link_active = TRUE;
1061 			ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1062 			if_link_state_change(ifp, LINK_STATE_UP);
1063 			ixl_link_up_msg(pf);
1064 		}
1065 	} else { /* Link down */
1066 		if (vsi->link_active == TRUE) {
1067 			if (bootverbose)
1068 				device_printf(dev, "Link is Down\n");
1069 			if_link_state_change(ifp, LINK_STATE_DOWN);
1070 			vsi->link_active = FALSE;
1071 		}
1072 	}
1073 
1074 	return;
1075 }
1076 
1077 /*********************************************************************
1078  *
1079  *  This routine disables all traffic on the adapter by issuing a
1080  *  global reset on the MAC and deallocates TX/RX buffers.
1081  *
1082  **********************************************************************/
1083 
1084 void
1085 ixl_stop_locked(struct ixl_pf *pf)
1086 {
1087 	struct ixl_vsi	*vsi = &pf->vsi;
1088 	struct ifnet	*ifp = vsi->ifp;
1089 
1090 	INIT_DEBUGOUT("ixl_stop: begin\n");
1091 
1092 	IXL_PF_LOCK_ASSERT(pf);
1093 
1094 #ifdef IXL_IW
1095 	/* Stop iWARP device */
1096 	if (ixl_enable_iwarp && pf->iw_enabled)
1097 		ixl_iw_pf_stop(pf);
1098 #endif
1099 
1100 	/* Stop the local timer */
1101 	callout_stop(&pf->timer);
1102 
1103 	ixl_disable_rings_intr(vsi);
1104 	ixl_disable_rings(vsi);
1105 
1106 	/* Tell the stack that the interface is no longer active */
1107 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1108 }
1109 
1110 void
1111 ixl_stop(struct ixl_pf *pf)
1112 {
1113 	IXL_PF_LOCK(pf);
1114 	ixl_stop_locked(pf);
1115 	IXL_PF_UNLOCK(pf);
1116 }
1117 
1118 /*********************************************************************
1119  *
1120  *  Setup MSIX Interrupt resources and handlers for the VSI
1121  *
1122  **********************************************************************/
1123 int
1124 ixl_setup_legacy(struct ixl_pf *pf)
1125 {
1126 	device_t        dev = pf->dev;
1127 	int 		error, rid = 0;
1128 
1129 	if (pf->msix == 1)
1130 		rid = 1;
1131 	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1132 	    &rid, RF_SHAREABLE | RF_ACTIVE);
1133 	if (pf->res == NULL) {
1134 		device_printf(dev, "bus_alloc_resource_any() for"
1135 		    " legacy/msi interrupt\n");
1136 		return (ENXIO);
1137 	}
1138 
1139 	/* Set the handler function */
1140 	error = bus_setup_intr(dev, pf->res,
1141 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1142 	    ixl_intr, pf, &pf->tag);
1143 	if (error) {
1144 		pf->res = NULL;
1145 		device_printf(dev, "bus_setup_intr() for legacy/msi"
1146 		    " interrupt handler failed, error %d\n", error);
1147 		return (ENXIO);
1148 	}
1149 	error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1150 	if (error) {
1151 		/* non-fatal */
1152 		device_printf(dev, "bus_describe_intr() for Admin Queue"
1153 		    " interrupt name failed, error %d\n", error);
1154 	}
1155 
1156 	return (0);
1157 }
1158 
1159 int
1160 ixl_setup_adminq_tq(struct ixl_pf *pf)
1161 {
1162 	device_t dev = pf->dev;
1163 	int error = 0;
1164 
1165 	/* Tasklet for Admin Queue interrupts */
1166 	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1167 #ifdef PCI_IOV
1168 	/* VFLR Tasklet */
1169 	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1170 #endif
1171 	/* Create and start Admin Queue taskqueue */
1172 	pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1173 	    taskqueue_thread_enqueue, &pf->tq);
1174 	if (!pf->tq) {
1175 		device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1176 		return (ENOMEM);
1177 	}
1178 	error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1179 	    device_get_nameunit(dev));
1180 	if (error) {
1181 		device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1182 		    error);
1183 		taskqueue_free(pf->tq);
1184 		return (error);
1185 	}
1186 	return (0);
1187 }
1188 
1189 int
1190 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1191 {
1192 	struct ixl_queue *que = vsi->queues;
1193 	device_t dev = vsi->dev;
1194 #ifdef  RSS
1195 	int		cpu_id = 0;
1196         cpuset_t	cpu_mask;
1197 #endif
1198 
1199 	/* Create queue tasks and start queue taskqueues */
1200 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1201 		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1202 		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1203 		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1204 		    taskqueue_thread_enqueue, &que->tq);
1205 #ifdef RSS
1206 		CPU_SETOF(cpu_id, &cpu_mask);
1207 		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1208 		    &cpu_mask, "%s (bucket %d)",
1209 		    device_get_nameunit(dev), cpu_id);
1210 #else
1211 		taskqueue_start_threads(&que->tq, 1, PI_NET,
1212 		    "%s (que %d)", device_get_nameunit(dev), que->me);
1213 #endif
1214 	}
1215 
1216 	return (0);
1217 }
1218 
1219 void
1220 ixl_free_adminq_tq(struct ixl_pf *pf)
1221 {
1222 	if (pf->tq) {
1223 		taskqueue_free(pf->tq);
1224 		pf->tq = NULL;
1225 	}
1226 }
1227 
1228 void
1229 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1230 {
1231 	struct ixl_queue *que = vsi->queues;
1232 
1233 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1234 		if (que->tq) {
1235 			taskqueue_free(que->tq);
1236 			que->tq = NULL;
1237 		}
1238 	}
1239 }
1240 
1241 int
1242 ixl_setup_adminq_msix(struct ixl_pf *pf)
1243 {
1244 	device_t dev = pf->dev;
1245 	int rid, error = 0;
1246 
1247 	/* Admin IRQ rid is 1, vector is 0 */
1248 	rid = 1;
1249 	/* Get interrupt resource from bus */
1250 	pf->res = bus_alloc_resource_any(dev,
1251     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1252 	if (!pf->res) {
1253 		device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1254 		    " interrupt failed [rid=%d]\n", rid);
1255 		return (ENXIO);
1256 	}
1257 	/* Then associate interrupt with handler */
1258 	error = bus_setup_intr(dev, pf->res,
1259 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1260 	    ixl_msix_adminq, pf, &pf->tag);
1261 	if (error) {
1262 		pf->res = NULL;
1263 		device_printf(dev, "bus_setup_intr() for Admin Queue"
1264 		    " interrupt handler failed, error %d\n", error);
1265 		return (ENXIO);
1266 	}
1267 	error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1268 	if (error) {
1269 		/* non-fatal */
1270 		device_printf(dev, "bus_describe_intr() for Admin Queue"
1271 		    " interrupt name failed, error %d\n", error);
1272 	}
1273 	pf->admvec = 0;
1274 
1275 	return (0);
1276 }
1277 
1278 /*
1279  * Allocate interrupt resources from bus and associate an interrupt handler
1280  * to those for the VSI's queues.
1281  */
1282 int
1283 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1284 {
1285 	device_t	dev = vsi->dev;
1286 	struct 		ixl_queue *que = vsi->queues;
1287 	struct		tx_ring	 *txr;
1288 	int 		error, rid, vector = 1;
1289 
1290 	/* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1291 	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1292 		int cpu_id = i;
1293 		rid = vector + 1;
1294 		txr = &que->txr;
1295 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1296 		    RF_SHAREABLE | RF_ACTIVE);
1297 		if (!que->res) {
1298 			device_printf(dev, "bus_alloc_resource_any() for"
1299 			    " Queue %d interrupt failed [rid=%d]\n",
1300 			    que->me, rid);
1301 			return (ENXIO);
1302 		}
1303 		/* Set the handler function */
1304 		error = bus_setup_intr(dev, que->res,
1305 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1306 		    ixl_msix_que, que, &que->tag);
1307 		if (error) {
1308 			device_printf(dev, "bus_setup_intr() for Queue %d"
1309 			    " interrupt handler failed, error %d\n",
1310 			    que->me, error);
1311 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1312 			return (error);
1313 		}
1314 		error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1315 		if (error) {
1316 			device_printf(dev, "bus_describe_intr() for Queue %d"
1317 			    " interrupt name failed, error %d\n",
1318 			    que->me, error);
1319 		}
1320 		/* Bind the vector to a CPU */
1321 #ifdef RSS
1322 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1323 #endif
1324 		error = bus_bind_intr(dev, que->res, cpu_id);
1325 		if (error) {
1326 			device_printf(dev, "bus_bind_intr() for Queue %d"
1327 			    " to CPU %d failed, error %d\n",
1328 			    que->me, cpu_id, error);
1329 		}
1330 		que->msix = vector;
1331 	}
1332 
1333 	return (0);
1334 }
1335 
1336 /*
1337  * When used in a virtualized environment PCI BUSMASTER capability may not be set
1338  * so explicity set it here and rewrite the ENABLE in the MSIX control register
1339  * at this point to cause the host to successfully initialize us.
1340  */
1341 void
1342 ixl_set_busmaster(device_t dev)
1343 {
1344 	u16 pci_cmd_word;
1345 
1346 	pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1347 	pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1348 	pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1349 }
1350 
1351 /*
1352  * rewrite the ENABLE in the MSIX control register
1353  * to cause the host to successfully initialize us.
1354  */
1355 void
1356 ixl_set_msix_enable(device_t dev)
1357 {
1358 	int msix_ctrl, rid;
1359 
1360 	pci_find_cap(dev, PCIY_MSIX, &rid);
1361 	rid += PCIR_MSIX_CTRL;
1362 	msix_ctrl = pci_read_config(dev, rid, 2);
1363 	msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1364 	pci_write_config(dev, rid, msix_ctrl, 2);
1365 }
1366 
1367 /*
1368  * Allocate MSI/X vectors from the OS.
1369  * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1370  */
1371 int
1372 ixl_init_msix(struct ixl_pf *pf)
1373 {
1374 	device_t dev = pf->dev;
1375 	struct i40e_hw *hw = &pf->hw;
1376 	int auto_max_queues;
1377 	int rid, want, vectors, queues, available;
1378 #ifdef IXL_IW
1379 	int iw_want, iw_vectors;
1380 
1381 	pf->iw_msix = 0;
1382 #endif
1383 
1384 	/* Override by tuneable */
1385 	if (!pf->enable_msix)
1386 		goto no_msix;
1387 
1388 	/* Ensure proper operation in virtualized environment */
1389 	ixl_set_busmaster(dev);
1390 
1391 	/* First try MSI/X */
1392 	rid = PCIR_BAR(IXL_MSIX_BAR);
1393 	pf->msix_mem = bus_alloc_resource_any(dev,
1394 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1395        	if (!pf->msix_mem) {
1396 		/* May not be enabled */
1397 		device_printf(pf->dev,
1398 		    "Unable to map MSIX table\n");
1399 		goto no_msix;
1400 	}
1401 
1402 	available = pci_msix_count(dev);
1403 	if (available < 2) {
1404 		/* system has msix disabled (0), or only one vector (1) */
1405 		bus_release_resource(dev, SYS_RES_MEMORY,
1406 		    rid, pf->msix_mem);
1407 		pf->msix_mem = NULL;
1408 		goto no_msix;
1409 	}
1410 
1411 	/* Clamp max number of queues based on:
1412 	 * - # of MSI-X vectors available
1413 	 * - # of cpus available
1414 	 * - # of queues that can be assigned to the LAN VSI
1415 	 */
1416 	auto_max_queues = min(mp_ncpus, available - 1);
1417 	if (hw->mac.type == I40E_MAC_X722)
1418 		auto_max_queues = min(auto_max_queues, 128);
1419 	else
1420 		auto_max_queues = min(auto_max_queues, 64);
1421 
1422 	/* Override with tunable value if tunable is less than autoconfig count */
1423 	if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1424 		queues = pf->max_queues;
1425 	/* Use autoconfig amount if that's lower */
1426 	else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1427 		device_printf(dev, "ixl_max_queues (%d) is too large, using "
1428 		    "autoconfig amount (%d)...\n",
1429 		    pf->max_queues, auto_max_queues);
1430 		queues = auto_max_queues;
1431 	}
1432 	/* Limit maximum auto-configured queues to 8 if no user value is set */
1433 	else
1434 		queues = min(auto_max_queues, 8);
1435 
1436 #ifdef  RSS
1437 	/* If we're doing RSS, clamp at the number of RSS buckets */
1438 	if (queues > rss_getnumbuckets())
1439 		queues = rss_getnumbuckets();
1440 #endif
1441 
1442 	/*
1443 	** Want one vector (RX/TX pair) per queue
1444 	** plus an additional for the admin queue.
1445 	*/
1446 	want = queues + 1;
1447 	if (want <= available)	/* Have enough */
1448 		vectors = want;
1449 	else {
1450                	device_printf(pf->dev,
1451 		    "MSIX Configuration Problem, "
1452 		    "%d vectors available but %d wanted!\n",
1453 		    available, want);
1454 		pf->msix_mem = NULL;
1455 		goto no_msix; /* Will go to Legacy setup */
1456 	}
1457 
1458 #ifdef IXL_IW
1459 	if (ixl_enable_iwarp) {
1460 		/* iWARP wants additional vector for CQP */
1461 		iw_want = mp_ncpus + 1;
1462 		available -= vectors;
1463 		if (available > 0) {
1464 			iw_vectors = (available >= iw_want) ?
1465 				iw_want : available;
1466 			vectors += iw_vectors;
1467 		} else
1468 			iw_vectors = 0;
1469 	}
1470 #endif
1471 
1472 	ixl_set_msix_enable(dev);
1473 	if (pci_alloc_msix(dev, &vectors) == 0) {
1474                	device_printf(pf->dev,
1475 		    "Using MSIX interrupts with %d vectors\n", vectors);
1476 		pf->msix = vectors;
1477 #ifdef IXL_IW
1478 		if (ixl_enable_iwarp)
1479 			pf->iw_msix = iw_vectors;
1480 #endif
1481 
1482 		pf->vsi.num_queues = queues;
1483 #ifdef RSS
1484 		/*
1485 		 * If we're doing RSS, the number of queues needs to
1486 		 * match the number of RSS buckets that are configured.
1487 		 *
1488 		 * + If there's more queues than RSS buckets, we'll end
1489 		 *   up with queues that get no traffic.
1490 		 *
1491 		 * + If there's more RSS buckets than queues, we'll end
1492 		 *   up having multiple RSS buckets map to the same queue,
1493 		 *   so there'll be some contention.
1494 		 */
1495 		if (queues != rss_getnumbuckets()) {
1496 			device_printf(dev,
1497 			    "%s: queues (%d) != RSS buckets (%d)"
1498 			    "; performance will be impacted.\n",
1499 			    __func__, queues, rss_getnumbuckets());
1500 		}
1501 #endif
1502 		return (vectors);
1503 	}
1504 no_msix:
1505 	vectors = pci_msi_count(dev);
1506 	pf->vsi.num_queues = 1;
1507 	pf->max_queues = 1;
1508 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1509 		device_printf(pf->dev, "Using an MSI interrupt\n");
1510 	else {
1511 		vectors = 0;
1512 		device_printf(pf->dev, "Using a Legacy interrupt\n");
1513 	}
1514 	return (vectors);
1515 }
1516 
1517 /*
1518  * Configure admin queue/misc interrupt cause registers in hardware.
1519  */
1520 void
1521 ixl_configure_intr0_msix(struct ixl_pf *pf)
1522 {
1523 	struct i40e_hw *hw = &pf->hw;
1524 	u32 reg;
1525 
1526 	/* First set up the adminq - vector 0 */
1527 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
1528 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
1529 
1530 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1531 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
1532 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1533 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1534 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1535 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
1536 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1537 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1538 
1539 	/*
1540 	 * 0x7FF is the end of the queue list.
1541 	 * This means we won't use MSI-X vector 0 for a queue interrupt
1542 	 * in MSIX mode.
1543 	 */
1544 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1545 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1546 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1547 
1548 	wr32(hw, I40E_PFINT_DYN_CTL0,
1549 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1550 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1551 
1552 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1553 }
1554 
1555 /*
1556  * Configure queue interrupt cause registers in hardware.
1557  */
1558 void
1559 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1560 {
1561 	struct i40e_hw	*hw = &pf->hw;
1562 	struct ixl_vsi *vsi = &pf->vsi;
1563 	u32		reg;
1564 	u16		vector = 1;
1565 
1566 	for (int i = 0; i < vsi->num_queues; i++, vector++) {
1567 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1568 		/* First queue type is RX / 0 */
1569 		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1570 
1571 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1572 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1573 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1574 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1575 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1576 		wr32(hw, I40E_QINT_RQCTL(i), reg);
1577 
1578 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1579 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1580 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1581 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1582 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1583 		wr32(hw, I40E_QINT_TQCTL(i), reg);
1584 	}
1585 }
1586 
1587 /*
1588  * Configure for MSI single vector operation
1589  */
1590 void
1591 ixl_configure_legacy(struct ixl_pf *pf)
1592 {
1593 	struct i40e_hw	*hw = &pf->hw;
1594 	struct ixl_vsi	*vsi = &pf->vsi;
1595 	struct ixl_queue *que = vsi->queues;
1596 	struct rx_ring 	*rxr = &que->rxr;
1597 	struct tx_ring	*txr = &que->txr;
1598 	u32 reg;
1599 
1600 	/* Configure ITR */
1601 	vsi->tx_itr_setting = pf->tx_itr;
1602 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1603 	    vsi->tx_itr_setting);
1604 	txr->itr = vsi->tx_itr_setting;
1605 
1606 	vsi->rx_itr_setting = pf->rx_itr;
1607 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1608 	    vsi->rx_itr_setting);
1609 	rxr->itr = vsi->rx_itr_setting;
1610 
1611 	/* Setup "other" causes */
1612 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1613 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1614 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
1615 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1616 	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
1617 	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1618 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1619 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1620 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
1621 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1622 	    ;
1623 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1624 
1625 	/* No ITR for non-queue interrupts */
1626 	wr32(hw, I40E_PFINT_STAT_CTL0,
1627 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1628 
1629 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1630 	wr32(hw, I40E_PFINT_LNKLST0, 0);
1631 
1632 	/* Associate the queue pair to the vector and enable the q int */
1633 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1634 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1635 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1636 	wr32(hw, I40E_QINT_RQCTL(0), reg);
1637 
1638 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1639 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1640 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1641 	wr32(hw, I40E_QINT_TQCTL(0), reg);
1642 }
1643 
1644 int
1645 ixl_allocate_pci_resources(struct ixl_pf *pf)
1646 {
1647 	int             rid;
1648 	struct i40e_hw *hw = &pf->hw;
1649 	device_t        dev = pf->dev;
1650 
1651 	/* Map BAR0 */
1652 	rid = PCIR_BAR(0);
1653 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1654 	    &rid, RF_ACTIVE);
1655 
1656 	if (!(pf->pci_mem)) {
1657 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1658 		return (ENXIO);
1659 	}
1660 
1661 	/* Save off the PCI information */
1662 	hw->vendor_id = pci_get_vendor(dev);
1663 	hw->device_id = pci_get_device(dev);
1664 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1665 	hw->subsystem_vendor_id =
1666 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1667 	hw->subsystem_device_id =
1668 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1669 
1670 	hw->bus.device = pci_get_slot(dev);
1671 	hw->bus.func = pci_get_function(dev);
1672 
1673 	/* Save off register access information */
1674 	pf->osdep.mem_bus_space_tag =
1675 		rman_get_bustag(pf->pci_mem);
1676 	pf->osdep.mem_bus_space_handle =
1677 		rman_get_bushandle(pf->pci_mem);
1678 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1679 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
1680 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1681 
1682 	pf->hw.back = &pf->osdep;
1683 
1684 	return (0);
1685 }
1686 
1687 /*
1688  * Teardown and release the admin queue/misc vector
1689  * interrupt.
1690  */
1691 int
1692 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1693 {
1694 	device_t		dev = pf->dev;
1695 	int			rid, error = 0;
1696 
1697 	if (pf->admvec) /* we are doing MSIX */
1698 		rid = pf->admvec + 1;
1699 	else
1700 		(pf->msix != 0) ? (rid = 1):(rid = 0);
1701 
1702 	if (pf->tag != NULL) {
1703 		bus_teardown_intr(dev, pf->res, pf->tag);
1704 		if (error) {
1705 			device_printf(dev, "bus_teardown_intr() for"
1706 			    " interrupt 0 failed\n");
1707 			// return (ENXIO);
1708 		}
1709 		pf->tag = NULL;
1710 	}
1711 	if (pf->res != NULL) {
1712 		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1713 		if (error) {
1714 			device_printf(dev, "bus_release_resource() for"
1715 			    " interrupt 0 failed [rid=%d]\n", rid);
1716 			// return (ENXIO);
1717 		}
1718 		pf->res = NULL;
1719 	}
1720 
1721 	return (0);
1722 }
1723 
1724 int
1725 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1726 {
1727 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
1728 	struct ixl_queue	*que = vsi->queues;
1729 	device_t		dev = vsi->dev;
1730 	int			rid, error = 0;
1731 
1732 	/* We may get here before stations are setup */
1733 	if ((pf->msix < 2) || (que == NULL))
1734 		return (0);
1735 
1736 	/* Release all MSIX queue resources */
1737 	for (int i = 0; i < vsi->num_queues; i++, que++) {
1738 		rid = que->msix + 1;
1739 		if (que->tag != NULL) {
1740 			error = bus_teardown_intr(dev, que->res, que->tag);
1741 			if (error) {
1742 				device_printf(dev, "bus_teardown_intr() for"
1743 				    " Queue %d interrupt failed\n",
1744 				    que->me);
1745 				// return (ENXIO);
1746 			}
1747 			que->tag = NULL;
1748 		}
1749 		if (que->res != NULL) {
1750 			error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1751 			if (error) {
1752 				device_printf(dev, "bus_release_resource() for"
1753 				    " Queue %d interrupt failed [rid=%d]\n",
1754 				    que->me, rid);
1755 				// return (ENXIO);
1756 			}
1757 			que->res = NULL;
1758 		}
1759 	}
1760 
1761 	return (0);
1762 }
1763 
1764 void
1765 ixl_free_pci_resources(struct ixl_pf *pf)
1766 {
1767 	device_t		dev = pf->dev;
1768 	int			memrid;
1769 
1770 	ixl_teardown_queue_msix(&pf->vsi);
1771 	ixl_teardown_adminq_msix(pf);
1772 
1773 	if (pf->msix > 0)
1774 		pci_release_msi(dev);
1775 
1776 	memrid = PCIR_BAR(IXL_MSIX_BAR);
1777 
1778 	if (pf->msix_mem != NULL)
1779 		bus_release_resource(dev, SYS_RES_MEMORY,
1780 		    memrid, pf->msix_mem);
1781 
1782 	if (pf->pci_mem != NULL)
1783 		bus_release_resource(dev, SYS_RES_MEMORY,
1784 		    PCIR_BAR(0), pf->pci_mem);
1785 
1786 	return;
1787 }
1788 
1789 void
1790 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1791 {
1792 	/* Display supported media types */
1793 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1794 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1795 
1796 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1797 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1798 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1799 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1800 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1801 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1802 
1803 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1804 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1805 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1806 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1807 
1808 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1809 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1810 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1811 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1812 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1813 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1814 
1815 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1816 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1817 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1818 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1819 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1820 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1821 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1822 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1823 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1824 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1825 
1826 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1827 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1828 
1829 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1830 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1831 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1832 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1833 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1834 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1835 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1836 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1837 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1838 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1839 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1840 
1841 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1842 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1843 
1844 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1845 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1846 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1847 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1848 
1849 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1850 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1851 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1852 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1853 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1854 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1855 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1856 		ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
1857 }
1858 
1859 /*********************************************************************
1860  *
1861  *  Setup networking device structure and register an interface.
1862  *
1863  **********************************************************************/
1864 int
1865 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1866 {
1867 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
1868 	struct ifnet		*ifp;
1869 	struct i40e_hw		*hw = vsi->hw;
1870 	struct ixl_queue	*que = vsi->queues;
1871 	struct i40e_aq_get_phy_abilities_resp abilities;
1872 	enum i40e_status_code aq_error = 0;
1873 
1874 	INIT_DEBUGOUT("ixl_setup_interface: begin");
1875 
1876 	ifp = vsi->ifp = if_alloc(IFT_ETHER);
1877 	if (ifp == NULL) {
1878 		device_printf(dev, "can not allocate ifnet structure\n");
1879 		return (-1);
1880 	}
1881 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1882 	ifp->if_mtu = ETHERMTU;
1883 	ifp->if_init = ixl_init;
1884 	ifp->if_softc = vsi;
1885 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1886 	ifp->if_ioctl = ixl_ioctl;
1887 
1888 #if __FreeBSD_version >= 1100036
1889 	if_setgetcounterfn(ifp, ixl_get_counter);
1890 #endif
1891 
1892 	ifp->if_transmit = ixl_mq_start;
1893 
1894 	ifp->if_qflush = ixl_qflush;
1895 
1896 	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1897 
1898 	vsi->max_frame_size =
1899 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1900 	    + ETHER_VLAN_ENCAP_LEN;
1901 
1902 	/* Set TSO limits */
1903 	ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1904 	ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1905 	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1906 
1907 	/*
1908 	 * Tell the upper layer(s) we support long frames.
1909 	 */
1910 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1911 
1912 	ifp->if_capabilities |= IFCAP_HWCSUM;
1913 	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1914 	ifp->if_capabilities |= IFCAP_TSO;
1915 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1916 	ifp->if_capabilities |= IFCAP_LRO;
1917 
1918 	/* VLAN capabilties */
1919 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1920 			     |  IFCAP_VLAN_HWTSO
1921 			     |  IFCAP_VLAN_MTU
1922 			     |  IFCAP_VLAN_HWCSUM;
1923 	ifp->if_capenable = ifp->if_capabilities;
1924 
1925 	/*
1926 	** Don't turn this on by default, if vlans are
1927 	** created on another pseudo device (eg. lagg)
1928 	** then vlan events are not passed thru, breaking
1929 	** operation, but with HW FILTER off it works. If
1930 	** using vlans directly on the ixl driver you can
1931 	** enable this and get full hardware tag filtering.
1932 	*/
1933 	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1934 
1935 	/*
1936 	 * Specify the media types supported by this adapter and register
1937 	 * callbacks to update media and link information
1938 	 */
1939 	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1940 		     ixl_media_status);
1941 
1942 	aq_error = i40e_aq_get_phy_capabilities(hw,
1943 	    FALSE, TRUE, &abilities, NULL);
1944 	/* May need delay to detect fiber correctly */
1945 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1946 		i40e_msec_delay(200);
1947 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1948 		    TRUE, &abilities, NULL);
1949 	}
1950 	if (aq_error) {
1951 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1952 			device_printf(dev, "Unknown PHY type detected!\n");
1953 		else
1954 			device_printf(dev,
1955 			    "Error getting supported media types, err %d,"
1956 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1957 		return (0);
1958 	}
1959 	pf->supported_speeds = abilities.link_speed;
1960 	ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1961 
1962 	ixl_add_ifmedia(vsi, hw->phy.phy_types);
1963 
1964 	/* Use autoselect media by default */
1965 	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1966 	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1967 
1968 	ether_ifattach(ifp, hw->mac.addr);
1969 
1970 	return (0);
1971 }
1972 
1973 /*
1974 ** Run when the Admin Queue gets a link state change interrupt.
1975 */
1976 void
1977 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1978 {
1979 	struct i40e_hw	*hw = &pf->hw;
1980 	device_t dev = pf->dev;
1981 	struct i40e_aqc_get_link_status *status =
1982 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1983 
1984 	/* Request link status from adapter */
1985 	hw->phy.get_link_info = TRUE;
1986 	i40e_get_link_status(hw, &pf->link_up);
1987 
1988 	/* Print out message if an unqualified module is found */
1989 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1990 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1991 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1992 		device_printf(dev, "Link failed because "
1993 		    "an unqualified module was detected!\n");
1994 
1995 	/* Update OS link info */
1996 	ixl_update_link_status(pf);
1997 }
1998 
1999 /*********************************************************************
2000  *
2001  *  Get Firmware Switch configuration
2002  *	- this will need to be more robust when more complex
2003  *	  switch configurations are enabled.
2004  *
2005  **********************************************************************/
2006 int
2007 ixl_switch_config(struct ixl_pf *pf)
2008 {
2009 	struct i40e_hw	*hw = &pf->hw;
2010 	struct ixl_vsi	*vsi = &pf->vsi;
2011 	device_t 	dev = vsi->dev;
2012 	struct i40e_aqc_get_switch_config_resp *sw_config;
2013 	u8	aq_buf[I40E_AQ_LARGE_BUF];
2014 	int	ret;
2015 	u16	next = 0;
2016 
2017 	memset(&aq_buf, 0, sizeof(aq_buf));
2018 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2019 	ret = i40e_aq_get_switch_config(hw, sw_config,
2020 	    sizeof(aq_buf), &next, NULL);
2021 	if (ret) {
2022 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
2023 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2024 		return (ret);
2025 	}
2026 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2027 		device_printf(dev,
2028 		    "Switch config: header reported: %d in structure, %d total\n",
2029 		    sw_config->header.num_reported, sw_config->header.num_total);
2030 		for (int i = 0; i < sw_config->header.num_reported; i++) {
2031 			device_printf(dev,
2032 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2033 			    sw_config->element[i].element_type,
2034 			    sw_config->element[i].seid,
2035 			    sw_config->element[i].uplink_seid,
2036 			    sw_config->element[i].downlink_seid);
2037 		}
2038 	}
2039 	/* Simplified due to a single VSI */
2040 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2041 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2042 	vsi->seid = sw_config->element[0].seid;
2043 	return (ret);
2044 }
2045 
2046 /*********************************************************************
2047  *
2048  *  Initialize the VSI:  this handles contexts, which means things
2049  *  			 like the number of descriptors, buffer size,
2050  *			 plus we init the rings thru this function.
2051  *
2052  **********************************************************************/
2053 int
2054 ixl_initialize_vsi(struct ixl_vsi *vsi)
2055 {
2056 	struct ixl_pf		*pf = vsi->back;
2057 	struct ixl_queue	*que = vsi->queues;
2058 	device_t		dev = vsi->dev;
2059 	struct i40e_hw		*hw = vsi->hw;
2060 	struct i40e_vsi_context	ctxt;
2061 	int 			tc_queues;
2062 	int			err = 0;
2063 
2064 	memset(&ctxt, 0, sizeof(ctxt));
2065 	ctxt.seid = vsi->seid;
2066 	if (pf->veb_seid != 0)
2067 		ctxt.uplink_seid = pf->veb_seid;
2068 	ctxt.pf_num = hw->pf_id;
2069 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2070 	if (err) {
2071 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2072 		    " aq_error %d\n", err, hw->aq.asq_last_status);
2073 		return (err);
2074 	}
2075 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2076 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2077 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2078 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2079 	    ctxt.uplink_seid, ctxt.vsi_number,
2080 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2081 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2082 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2083 	/*
2084 	** Set the queue and traffic class bits
2085 	**  - when multiple traffic classes are supported
2086 	**    this will need to be more robust.
2087 	*/
2088 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2089 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2090 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
2091 	ctxt.info.queue_mapping[0] = 0;
2092 	/*
2093 	 * This VSI will only use traffic class 0; start traffic class 0's
2094 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2095 	 * the driver may not use all of them).
2096 	 */
2097 	tc_queues = bsrl(pf->qtag.num_allocated);
2098 	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2099 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2100 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2101 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2102 
2103 	/* Set VLAN receive stripping mode */
2104 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2105 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2106 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2107 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2108 	else
2109 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2110 
2111 #ifdef IXL_IW
2112 	/* Set TCP Enable for iWARP capable VSI */
2113 	if (ixl_enable_iwarp && pf->iw_enabled) {
2114 		ctxt.info.valid_sections |=
2115 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2116 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2117 	}
2118 #endif
2119 	/* Save VSI number and info for use later */
2120 	vsi->vsi_num = ctxt.vsi_number;
2121 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2122 
2123 	/* Reset VSI statistics */
2124 	ixl_vsi_reset_stats(vsi);
2125 	vsi->hw_filters_add = 0;
2126 	vsi->hw_filters_del = 0;
2127 
2128 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2129 
2130 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2131 	if (err) {
2132 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2133 		    " aq_error %d\n", err, hw->aq.asq_last_status);
2134 		return (err);
2135 	}
2136 
2137 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2138 		struct tx_ring		*txr = &que->txr;
2139 		struct rx_ring 		*rxr = &que->rxr;
2140 		struct i40e_hmc_obj_txq tctx;
2141 		struct i40e_hmc_obj_rxq rctx;
2142 		u32			txctl;
2143 		u16			size;
2144 
2145 		/* Setup the HMC TX Context  */
2146 		size = que->num_desc * sizeof(struct i40e_tx_desc);
2147 		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2148 		tctx.new_context = 1;
2149 		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2150 		tctx.qlen = que->num_desc;
2151 		tctx.fc_ena = 0;
2152 		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2153 		/* Enable HEAD writeback */
2154 		tctx.head_wb_ena = 1;
2155 		tctx.head_wb_addr = txr->dma.pa +
2156 		    (que->num_desc * sizeof(struct i40e_tx_desc));
2157 		tctx.rdylist_act = 0;
2158 		err = i40e_clear_lan_tx_queue_context(hw, i);
2159 		if (err) {
2160 			device_printf(dev, "Unable to clear TX context\n");
2161 			break;
2162 		}
2163 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2164 		if (err) {
2165 			device_printf(dev, "Unable to set TX context\n");
2166 			break;
2167 		}
2168 		/* Associate the ring with this PF */
2169 		txctl = I40E_QTX_CTL_PF_QUEUE;
2170 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2171 		    I40E_QTX_CTL_PF_INDX_MASK);
2172 		wr32(hw, I40E_QTX_CTL(i), txctl);
2173 		ixl_flush(hw);
2174 
2175 		/* Do ring (re)init */
2176 		ixl_init_tx_ring(que);
2177 
2178 		/* Next setup the HMC RX Context  */
2179 		if (vsi->max_frame_size <= MCLBYTES)
2180 			rxr->mbuf_sz = MCLBYTES;
2181 		else
2182 			rxr->mbuf_sz = MJUMPAGESIZE;
2183 
2184 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2185 
2186 		/* Set up an RX context for the HMC */
2187 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2188 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2189 		/* ignore header split for now */
2190 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2191 		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2192 		    vsi->max_frame_size : max_rxmax;
2193 		rctx.dtype = 0;
2194 		rctx.dsize = 1;	/* do 32byte descriptors */
2195 		rctx.hsplit_0 = 0;  /* no HDR split initially */
2196 		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2197 		rctx.qlen = que->num_desc;
2198 		rctx.tphrdesc_ena = 1;
2199 		rctx.tphwdesc_ena = 1;
2200 		rctx.tphdata_ena = 0;
2201 		rctx.tphhead_ena = 0;
2202 		rctx.lrxqthresh = 2;
2203 		rctx.crcstrip = 1;
2204 		rctx.l2tsel = 1;
2205 		rctx.showiv = 1;
2206 		rctx.fc_ena = 0;
2207 		rctx.prefena = 1;
2208 
2209 		err = i40e_clear_lan_rx_queue_context(hw, i);
2210 		if (err) {
2211 			device_printf(dev,
2212 			    "Unable to clear RX context %d\n", i);
2213 			break;
2214 		}
2215 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2216 		if (err) {
2217 			device_printf(dev, "Unable to set RX context %d\n", i);
2218 			break;
2219 		}
2220 		err = ixl_init_rx_ring(que);
2221 		if (err) {
2222 			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2223 			break;
2224 		}
2225 #ifdef DEV_NETMAP
2226 		/* preserve queue */
2227 		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2228 			struct netmap_adapter *na = NA(vsi->ifp);
2229 			struct netmap_kring *kring = &na->rx_rings[i];
2230 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2231 			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2232 		} else
2233 #endif /* DEV_NETMAP */
2234 		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2235 	}
2236 	return (err);
2237 }
2238 
2239 
2240 /*********************************************************************
2241  *
2242  *  Free all VSI structs.
2243  *
2244  **********************************************************************/
2245 void
2246 ixl_free_vsi(struct ixl_vsi *vsi)
2247 {
2248 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2249 	struct ixl_queue	*que = vsi->queues;
2250 
2251 	/* Free station queues */
2252 	if (!vsi->queues)
2253 		goto free_filters;
2254 
2255 	for (int i = 0; i < vsi->num_queues; i++, que++) {
2256 		struct tx_ring *txr = &que->txr;
2257 		struct rx_ring *rxr = &que->rxr;
2258 
2259 		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2260 			continue;
2261 		IXL_TX_LOCK(txr);
2262 		ixl_free_que_tx(que);
2263 		if (txr->base)
2264 			i40e_free_dma_mem(&pf->hw, &txr->dma);
2265 		IXL_TX_UNLOCK(txr);
2266 		IXL_TX_LOCK_DESTROY(txr);
2267 
2268 		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2269 			continue;
2270 		IXL_RX_LOCK(rxr);
2271 		ixl_free_que_rx(que);
2272 		if (rxr->base)
2273 			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2274 		IXL_RX_UNLOCK(rxr);
2275 		IXL_RX_LOCK_DESTROY(rxr);
2276 	}
2277 	free(vsi->queues, M_DEVBUF);
2278 
2279 free_filters:
2280 	/* Free VSI filter list */
2281 	ixl_free_mac_filters(vsi);
2282 }
2283 
2284 void
2285 ixl_free_mac_filters(struct ixl_vsi *vsi)
2286 {
2287 	struct ixl_mac_filter *f;
2288 
2289 	while (!SLIST_EMPTY(&vsi->ftl)) {
2290 		f = SLIST_FIRST(&vsi->ftl);
2291 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2292 		free(f, M_DEVBUF);
2293 	}
2294 }
2295 
2296 /*
2297  * Fill out fields in queue struct and setup tx/rx memory and structs
2298  */
2299 static int
2300 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2301 {
2302 	device_t dev = pf->dev;
2303 	struct i40e_hw *hw = &pf->hw;
2304 	struct ixl_vsi *vsi = &pf->vsi;
2305 	struct tx_ring *txr = &que->txr;
2306 	struct rx_ring *rxr = &que->rxr;
2307 	int error = 0;
2308 	int rsize, tsize;
2309 
2310 	que->num_desc = pf->ringsz;
2311 	que->me = index;
2312 	que->vsi = vsi;
2313 
2314 	txr->que = que;
2315 	txr->tail = I40E_QTX_TAIL(que->me);
2316 
2317 	/* Initialize the TX lock */
2318 	snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2319 	    device_get_nameunit(dev), que->me);
2320 	mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2321 	/* Create the TX descriptor ring */
2322 	tsize = roundup2((que->num_desc *
2323 	    sizeof(struct i40e_tx_desc)) +
2324 	    sizeof(u32), DBA_ALIGN);
2325 	if (i40e_allocate_dma_mem(hw,
2326 	    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2327 		device_printf(dev,
2328 		    "Unable to allocate TX Descriptor memory\n");
2329 		error = ENOMEM;
2330 		goto fail;
2331 	}
2332 	txr->base = (struct i40e_tx_desc *)txr->dma.va;
2333 	bzero((void *)txr->base, tsize);
2334 	/* Now allocate transmit soft structs for the ring */
2335 	if (ixl_allocate_tx_data(que)) {
2336 		device_printf(dev,
2337 		    "Critical Failure setting up TX structures\n");
2338 		error = ENOMEM;
2339 		goto fail;
2340 	}
2341 	/* Allocate a buf ring */
2342 	txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2343 	    M_NOWAIT, &txr->mtx);
2344 	if (txr->br == NULL) {
2345 		device_printf(dev,
2346 		    "Critical Failure setting up TX buf ring\n");
2347 		error = ENOMEM;
2348 		goto fail;
2349 	}
2350 
2351 	rsize = roundup2(que->num_desc *
2352 	    sizeof(union i40e_rx_desc), DBA_ALIGN);
2353 	rxr->que = que;
2354 	rxr->tail = I40E_QRX_TAIL(que->me);
2355 
2356 	/* Initialize the RX side lock */
2357 	snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2358 	    device_get_nameunit(dev), que->me);
2359 	mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2360 
2361 	if (i40e_allocate_dma_mem(hw,
2362 	    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2363 		device_printf(dev,
2364 		    "Unable to allocate RX Descriptor memory\n");
2365 		error = ENOMEM;
2366 		goto fail;
2367 	}
2368 	rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2369 	bzero((void *)rxr->base, rsize);
2370 	/* Allocate receive soft structs for the ring*/
2371 	if (ixl_allocate_rx_data(que)) {
2372 		device_printf(dev,
2373 		    "Critical Failure setting up receive structs\n");
2374 		error = ENOMEM;
2375 		goto fail;
2376 	}
2377 
2378 	return (0);
2379 fail:
2380 	if (rxr->base)
2381 		i40e_free_dma_mem(&pf->hw, &rxr->dma);
2382 	if (mtx_initialized(&rxr->mtx))
2383 		mtx_destroy(&rxr->mtx);
2384 	if (txr->br) {
2385 		buf_ring_free(txr->br, M_DEVBUF);
2386 		txr->br = NULL;
2387 	}
2388 	if (txr->base)
2389 		i40e_free_dma_mem(&pf->hw, &txr->dma);
2390 	if (mtx_initialized(&txr->mtx))
2391 		mtx_destroy(&txr->mtx);
2392 
2393 	return (error);
2394 }
2395 
2396 /*********************************************************************
2397  *
2398  *  Allocate memory for the VSI (virtual station interface) and their
2399  *  associated queues, rings and the descriptors associated with each,
2400  *  called only once at attach.
2401  *
2402  **********************************************************************/
2403 int
2404 ixl_setup_stations(struct ixl_pf *pf)
2405 {
2406 	device_t		dev = pf->dev;
2407 	struct ixl_vsi		*vsi;
2408 	struct ixl_queue	*que;
2409 	int			error = 0;
2410 
2411 	vsi = &pf->vsi;
2412 	vsi->back = (void *)pf;
2413 	vsi->hw = &pf->hw;
2414 	vsi->id = 0;
2415 	vsi->num_vlans = 0;
2416 	vsi->back = pf;
2417 
2418 	/* Get memory for the station queues */
2419         if (!(vsi->queues =
2420             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2421             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2422                 device_printf(dev, "Unable to allocate queue memory\n");
2423                 error = ENOMEM;
2424                 return (error);
2425         }
2426 
2427 	/* Then setup each queue */
2428 	for (int i = 0; i < vsi->num_queues; i++) {
2429 		que = &vsi->queues[i];
2430 		error = ixl_setup_queue(que, pf, i);
2431 		if (error)
2432 			return (error);
2433 	}
2434 
2435 	return (0);
2436 }
2437 
2438 /*
2439 ** Provide a update to the queue RX
2440 ** interrupt moderation value.
2441 */
2442 void
2443 ixl_set_queue_rx_itr(struct ixl_queue *que)
2444 {
2445 	struct ixl_vsi	*vsi = que->vsi;
2446 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
2447 	struct i40e_hw	*hw = vsi->hw;
2448 	struct rx_ring	*rxr = &que->rxr;
2449 	u16		rx_itr;
2450 	u16		rx_latency = 0;
2451 	int		rx_bytes;
2452 
2453 	/* Idle, do nothing */
2454 	if (rxr->bytes == 0)
2455 		return;
2456 
2457 	if (pf->dynamic_rx_itr) {
2458 		rx_bytes = rxr->bytes/rxr->itr;
2459 		rx_itr = rxr->itr;
2460 
2461 		/* Adjust latency range */
2462 		switch (rxr->latency) {
2463 		case IXL_LOW_LATENCY:
2464 			if (rx_bytes > 10) {
2465 				rx_latency = IXL_AVE_LATENCY;
2466 				rx_itr = IXL_ITR_20K;
2467 			}
2468 			break;
2469 		case IXL_AVE_LATENCY:
2470 			if (rx_bytes > 20) {
2471 				rx_latency = IXL_BULK_LATENCY;
2472 				rx_itr = IXL_ITR_8K;
2473 			} else if (rx_bytes <= 10) {
2474 				rx_latency = IXL_LOW_LATENCY;
2475 				rx_itr = IXL_ITR_100K;
2476 			}
2477 			break;
2478 		case IXL_BULK_LATENCY:
2479 			if (rx_bytes <= 20) {
2480 				rx_latency = IXL_AVE_LATENCY;
2481 				rx_itr = IXL_ITR_20K;
2482 			}
2483 			break;
2484        		 }
2485 
2486 		rxr->latency = rx_latency;
2487 
2488 		if (rx_itr != rxr->itr) {
2489 			/* do an exponential smoothing */
2490 			rx_itr = (10 * rx_itr * rxr->itr) /
2491 			    ((9 * rx_itr) + rxr->itr);
2492 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
2493 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2494 			    que->me), rxr->itr);
2495 		}
2496 	} else { /* We may have have toggled to non-dynamic */
2497 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2498 			vsi->rx_itr_setting = pf->rx_itr;
2499 		/* Update the hardware if needed */
2500 		if (rxr->itr != vsi->rx_itr_setting) {
2501 			rxr->itr = vsi->rx_itr_setting;
2502 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2503 			    que->me), rxr->itr);
2504 		}
2505 	}
2506 	rxr->bytes = 0;
2507 	rxr->packets = 0;
2508 	return;
2509 }
2510 
2511 
2512 /*
2513 ** Provide a update to the queue TX
2514 ** interrupt moderation value.
2515 */
2516 void
2517 ixl_set_queue_tx_itr(struct ixl_queue *que)
2518 {
2519 	struct ixl_vsi	*vsi = que->vsi;
2520 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
2521 	struct i40e_hw	*hw = vsi->hw;
2522 	struct tx_ring	*txr = &que->txr;
2523 	u16		tx_itr;
2524 	u16		tx_latency = 0;
2525 	int		tx_bytes;
2526 
2527 
2528 	/* Idle, do nothing */
2529 	if (txr->bytes == 0)
2530 		return;
2531 
2532 	if (pf->dynamic_tx_itr) {
2533 		tx_bytes = txr->bytes/txr->itr;
2534 		tx_itr = txr->itr;
2535 
2536 		switch (txr->latency) {
2537 		case IXL_LOW_LATENCY:
2538 			if (tx_bytes > 10) {
2539 				tx_latency = IXL_AVE_LATENCY;
2540 				tx_itr = IXL_ITR_20K;
2541 			}
2542 			break;
2543 		case IXL_AVE_LATENCY:
2544 			if (tx_bytes > 20) {
2545 				tx_latency = IXL_BULK_LATENCY;
2546 				tx_itr = IXL_ITR_8K;
2547 			} else if (tx_bytes <= 10) {
2548 				tx_latency = IXL_LOW_LATENCY;
2549 				tx_itr = IXL_ITR_100K;
2550 			}
2551 			break;
2552 		case IXL_BULK_LATENCY:
2553 			if (tx_bytes <= 20) {
2554 				tx_latency = IXL_AVE_LATENCY;
2555 				tx_itr = IXL_ITR_20K;
2556 			}
2557 			break;
2558 		}
2559 
2560 		txr->latency = tx_latency;
2561 
2562 		if (tx_itr != txr->itr) {
2563        	         /* do an exponential smoothing */
2564 			tx_itr = (10 * tx_itr * txr->itr) /
2565 			    ((9 * tx_itr) + txr->itr);
2566 			txr->itr = min(tx_itr, IXL_MAX_ITR);
2567 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2568 			    que->me), txr->itr);
2569 		}
2570 
2571 	} else { /* We may have have toggled to non-dynamic */
2572 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2573 			vsi->tx_itr_setting = pf->tx_itr;
2574 		/* Update the hardware if needed */
2575 		if (txr->itr != vsi->tx_itr_setting) {
2576 			txr->itr = vsi->tx_itr_setting;
2577 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2578 			    que->me), txr->itr);
2579 		}
2580 	}
2581 	txr->bytes = 0;
2582 	txr->packets = 0;
2583 	return;
2584 }
2585 
2586 void
2587 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2588     struct sysctl_ctx_list *ctx, const char *sysctl_name)
2589 {
2590 	struct sysctl_oid *tree;
2591 	struct sysctl_oid_list *child;
2592 	struct sysctl_oid_list *vsi_list;
2593 
2594 	tree = device_get_sysctl_tree(pf->dev);
2595 	child = SYSCTL_CHILDREN(tree);
2596 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2597 				   CTLFLAG_RD, NULL, "VSI Number");
2598 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2599 
2600 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2601 }
2602 
2603 #ifdef IXL_DEBUG
2604 /**
2605  * ixl_sysctl_qtx_tail_handler
2606  * Retrieves I40E_QTX_TAIL value from hardware
2607  * for a sysctl.
2608  */
2609 static int
2610 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2611 {
2612 	struct ixl_queue *que;
2613 	int error;
2614 	u32 val;
2615 
2616 	que = ((struct ixl_queue *)oidp->oid_arg1);
2617 	if (!que) return 0;
2618 
2619 	val = rd32(que->vsi->hw, que->txr.tail);
2620 	error = sysctl_handle_int(oidp, &val, 0, req);
2621 	if (error || !req->newptr)
2622 		return error;
2623 	return (0);
2624 }
2625 
2626 /**
2627  * ixl_sysctl_qrx_tail_handler
2628  * Retrieves I40E_QRX_TAIL value from hardware
2629  * for a sysctl.
2630  */
2631 static int
2632 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2633 {
2634 	struct ixl_queue *que;
2635 	int error;
2636 	u32 val;
2637 
2638 	que = ((struct ixl_queue *)oidp->oid_arg1);
2639 	if (!que) return 0;
2640 
2641 	val = rd32(que->vsi->hw, que->rxr.tail);
2642 	error = sysctl_handle_int(oidp, &val, 0, req);
2643 	if (error || !req->newptr)
2644 		return error;
2645 	return (0);
2646 }
2647 #endif
2648 
2649 /*
2650  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2651  * Writes to the ITR registers immediately.
2652  */
2653 static int
2654 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2655 {
2656 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2657 	device_t dev = pf->dev;
2658 	int error = 0;
2659 	int requested_tx_itr;
2660 
2661 	requested_tx_itr = pf->tx_itr;
2662 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2663 	if ((error) || (req->newptr == NULL))
2664 		return (error);
2665 	if (pf->dynamic_tx_itr) {
2666 		device_printf(dev,
2667 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
2668 		    return (EINVAL);
2669 	}
2670 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2671 		device_printf(dev,
2672 		    "Invalid TX itr value; value must be between 0 and %d\n",
2673 		        IXL_MAX_ITR);
2674 		return (EINVAL);
2675 	}
2676 
2677 	pf->tx_itr = requested_tx_itr;
2678 	ixl_configure_tx_itr(pf);
2679 
2680 	return (error);
2681 }
2682 
2683 /*
2684  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2685  * Writes to the ITR registers immediately.
2686  */
2687 static int
2688 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2689 {
2690 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2691 	device_t dev = pf->dev;
2692 	int error = 0;
2693 	int requested_rx_itr;
2694 
2695 	requested_rx_itr = pf->rx_itr;
2696 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2697 	if ((error) || (req->newptr == NULL))
2698 		return (error);
2699 	if (pf->dynamic_rx_itr) {
2700 		device_printf(dev,
2701 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
2702 		    return (EINVAL);
2703 	}
2704 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2705 		device_printf(dev,
2706 		    "Invalid RX itr value; value must be between 0 and %d\n",
2707 		        IXL_MAX_ITR);
2708 		return (EINVAL);
2709 	}
2710 
2711 	pf->rx_itr = requested_rx_itr;
2712 	ixl_configure_rx_itr(pf);
2713 
2714 	return (error);
2715 }
2716 
2717 void
2718 ixl_add_hw_stats(struct ixl_pf *pf)
2719 {
2720 	device_t dev = pf->dev;
2721 	struct ixl_vsi *vsi = &pf->vsi;
2722 	struct ixl_queue *queues = vsi->queues;
2723 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2724 
2725 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2726 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2727 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2728 	struct sysctl_oid_list *vsi_list;
2729 
2730 	struct sysctl_oid *queue_node;
2731 	struct sysctl_oid_list *queue_list;
2732 
2733 	struct tx_ring *txr;
2734 	struct rx_ring *rxr;
2735 	char queue_namebuf[QUEUE_NAME_LEN];
2736 
2737 	/* Driver statistics */
2738 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2739 			CTLFLAG_RD, &pf->watchdog_events,
2740 			"Watchdog timeouts");
2741 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2742 			CTLFLAG_RD, &pf->admin_irq,
2743 			"Admin Queue IRQ Handled");
2744 
2745 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2746 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2747 
2748 	/* Queue statistics */
2749 	for (int q = 0; q < vsi->num_queues; q++) {
2750 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2751 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2752 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2753 		queue_list = SYSCTL_CHILDREN(queue_node);
2754 
2755 		txr = &(queues[q].txr);
2756 		rxr = &(queues[q].rxr);
2757 
2758 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2759 				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2760 				"m_defrag() failed");
2761 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2762 				CTLFLAG_RD, &(queues[q].irqs),
2763 				"irqs on this queue");
2764 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2765 				CTLFLAG_RD, &(queues[q].tso),
2766 				"TSO");
2767 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2768 				CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2769 				"Driver tx dma failure in xmit");
2770 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2771 				CTLFLAG_RD, &(queues[q].mss_too_small),
2772 				"TSO sends with an MSS less than 64");
2773 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2774 				CTLFLAG_RD, &(txr->no_desc),
2775 				"Queue No Descriptor Available");
2776 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2777 				CTLFLAG_RD, &(txr->total_packets),
2778 				"Queue Packets Transmitted");
2779 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2780 				CTLFLAG_RD, &(txr->tx_bytes),
2781 				"Queue Bytes Transmitted");
2782 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2783 				CTLFLAG_RD, &(rxr->rx_packets),
2784 				"Queue Packets Received");
2785 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2786 				CTLFLAG_RD, &(rxr->rx_bytes),
2787 				"Queue Bytes Received");
2788 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2789 				CTLFLAG_RD, &(rxr->desc_errs),
2790 				"Queue Rx Descriptor Errors");
2791 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2792 				CTLFLAG_RD, &(rxr->itr), 0,
2793 				"Queue Rx ITR Interval");
2794 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2795 				CTLFLAG_RD, &(txr->itr), 0,
2796 				"Queue Tx ITR Interval");
2797 #ifdef IXL_DEBUG
2798 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2799 				CTLFLAG_RD, &(rxr->not_done),
2800 				"Queue Rx Descriptors not Done");
2801 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2802 				CTLFLAG_RD, &(rxr->next_refresh), 0,
2803 				"Queue Rx Descriptors not Done");
2804 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2805 				CTLFLAG_RD, &(rxr->next_check), 0,
2806 				"Queue Rx Descriptors not Done");
2807 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2808 				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2809 				sizeof(struct ixl_queue),
2810 				ixl_sysctl_qtx_tail_handler, "IU",
2811 				"Queue Transmit Descriptor Tail");
2812 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2813 				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2814 				sizeof(struct ixl_queue),
2815 				ixl_sysctl_qrx_tail_handler, "IU",
2816 				"Queue Receive Descriptor Tail");
2817 #endif
2818 	}
2819 
2820 	/* MAC stats */
2821 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2822 }
2823 
2824 void
2825 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2826 	struct sysctl_oid_list *child,
2827 	struct i40e_eth_stats *eth_stats)
2828 {
2829 	struct ixl_sysctl_info ctls[] =
2830 	{
2831 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2832 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2833 			"Unicast Packets Received"},
2834 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2835 			"Multicast Packets Received"},
2836 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2837 			"Broadcast Packets Received"},
2838 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2839 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2840 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2841 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
2842 			"Multicast Packets Transmitted"},
2843 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
2844 			"Broadcast Packets Transmitted"},
2845 		// end
2846 		{0,0,0}
2847 	};
2848 
2849 	struct ixl_sysctl_info *entry = ctls;
2850 	while (entry->stat != 0)
2851 	{
2852 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2853 				CTLFLAG_RD, entry->stat,
2854 				entry->description);
2855 		entry++;
2856 	}
2857 }
2858 
2859 void
2860 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2861 	struct sysctl_oid_list *child,
2862 	struct i40e_hw_port_stats *stats)
2863 {
2864 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2865 				    CTLFLAG_RD, NULL, "Mac Statistics");
2866 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2867 
2868 	struct i40e_eth_stats *eth_stats = &stats->eth;
2869 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2870 
2871 	struct ixl_sysctl_info ctls[] =
2872 	{
2873 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
2874 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2875 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2876 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2877 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2878 		/* Packet Reception Stats */
2879 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2880 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2881 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2882 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2883 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2884 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2885 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2886 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2887 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2888 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2889 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2890 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2891 		/* Packet Transmission Stats */
2892 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2893 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2894 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2895 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2896 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2897 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2898 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2899 		/* Flow control */
2900 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2901 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2902 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2903 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2904 		/* End */
2905 		{0,0,0}
2906 	};
2907 
2908 	struct ixl_sysctl_info *entry = ctls;
2909 	while (entry->stat != 0)
2910 	{
2911 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2912 				CTLFLAG_RD, entry->stat,
2913 				entry->description);
2914 		entry++;
2915 	}
2916 }
2917 
2918 void
2919 ixl_set_rss_key(struct ixl_pf *pf)
2920 {
2921 	struct i40e_hw *hw = &pf->hw;
2922 	struct ixl_vsi *vsi = &pf->vsi;
2923 	device_t	dev = pf->dev;
2924 	enum i40e_status_code status;
2925 #ifdef RSS
2926 	u32		rss_seed[IXL_RSS_KEY_SIZE_REG];
2927 #else
2928 	u32             rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2929 			    0x183cfd8c, 0xce880440, 0x580cbc3c,
2930 			    0x35897377, 0x328b25e1, 0x4fa98922,
2931 			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2932 			    0x0, 0x0, 0x0};
2933 #endif
2934 
2935 #ifdef RSS
2936         /* Fetch the configured RSS key */
2937         rss_getkey((uint8_t *) &rss_seed);
2938 #endif
2939 	/* Fill out hash function seed */
2940 	if (hw->mac.type == I40E_MAC_X722) {
2941 		struct i40e_aqc_get_set_rss_key_data key_data;
2942 		bcopy(rss_seed, key_data.standard_rss_key, 40);
2943 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2944 		if (status)
2945 			device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2946 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2947 	} else {
2948 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2949 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2950 	}
2951 }
2952 
2953 /*
2954  * Configure enabled PCTYPES for RSS.
2955  */
2956 void
2957 ixl_set_rss_pctypes(struct ixl_pf *pf)
2958 {
2959 	struct i40e_hw *hw = &pf->hw;
2960 	u64		set_hena = 0, hena;
2961 
2962 #ifdef RSS
2963 	u32		rss_hash_config;
2964 
2965 	rss_hash_config = rss_gethashconfig();
2966 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2967                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2968 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2969                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2970 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2971                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2972 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2973                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2974 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2975 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2976 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2977                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2978         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2979                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2980 #else
2981 	if (hw->mac.type == I40E_MAC_X722)
2982 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
2983 	else
2984 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2985 #endif
2986 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2987 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2988 	hena |= set_hena;
2989 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2990 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2991 
2992 }
2993 
2994 void
2995 ixl_set_rss_hlut(struct ixl_pf *pf)
2996 {
2997 	struct i40e_hw	*hw = &pf->hw;
2998 	device_t	dev = pf->dev;
2999 	struct ixl_vsi *vsi = &pf->vsi;
3000 	int		i, que_id;
3001 	int		lut_entry_width;
3002 	u32		lut = 0;
3003 	enum i40e_status_code status;
3004 
3005 	if (hw->mac.type == I40E_MAC_X722)
3006 		lut_entry_width = 7;
3007 	else
3008 		lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3009 
3010 	/* Populate the LUT with max no. of queues in round robin fashion */
3011 	u8 hlut_buf[512];
3012 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3013 #ifdef RSS
3014 		/*
3015 		 * Fetch the RSS bucket id for the given indirection entry.
3016 		 * Cap it at the number of configured buckets (which is
3017 		 * num_queues.)
3018 		 */
3019 		que_id = rss_get_indirection_to_bucket(i);
3020 		que_id = que_id % vsi->num_queues;
3021 #else
3022 		que_id = i % vsi->num_queues;
3023 #endif
3024 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
3025 		hlut_buf[i] = lut;
3026 	}
3027 
3028 	if (hw->mac.type == I40E_MAC_X722) {
3029 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3030 		if (status)
3031 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3032 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3033 	} else {
3034 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3035 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3036 		ixl_flush(hw);
3037 	}
3038 }
3039 
3040 /*
3041 ** Setup the PF's RSS parameters.
3042 */
3043 void
3044 ixl_config_rss(struct ixl_pf *pf)
3045 {
3046 	ixl_set_rss_key(pf);
3047 	ixl_set_rss_pctypes(pf);
3048 	ixl_set_rss_hlut(pf);
3049 }
3050 
3051 /*
3052 ** This routine is run via an vlan config EVENT,
3053 ** it enables us to use the HW Filter table since
3054 ** we can get the vlan id. This just creates the
3055 ** entry in the soft version of the VFTA, init will
3056 ** repopulate the real table.
3057 */
3058 void
3059 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3060 {
3061 	struct ixl_vsi	*vsi = ifp->if_softc;
3062 	struct i40e_hw	*hw = vsi->hw;
3063 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3064 
3065 	if (ifp->if_softc !=  arg)   /* Not our event */
3066 		return;
3067 
3068 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3069 		return;
3070 
3071 	IXL_PF_LOCK(pf);
3072 	++vsi->num_vlans;
3073 	ixl_add_filter(vsi, hw->mac.addr, vtag);
3074 	IXL_PF_UNLOCK(pf);
3075 }
3076 
3077 /*
3078 ** This routine is run via an vlan
3079 ** unconfig EVENT, remove our entry
3080 ** in the soft vfta.
3081 */
3082 void
3083 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3084 {
3085 	struct ixl_vsi	*vsi = ifp->if_softc;
3086 	struct i40e_hw	*hw = vsi->hw;
3087 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3088 
3089 	if (ifp->if_softc !=  arg)
3090 		return;
3091 
3092 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3093 		return;
3094 
3095 	IXL_PF_LOCK(pf);
3096 	--vsi->num_vlans;
3097 	ixl_del_filter(vsi, hw->mac.addr, vtag);
3098 	IXL_PF_UNLOCK(pf);
3099 }
3100 
3101 /*
3102 ** This routine updates vlan filters, called by init
3103 ** it scans the filter table and then updates the hw
3104 ** after a soft reset.
3105 */
3106 void
3107 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3108 {
3109 	struct ixl_mac_filter	*f;
3110 	int			cnt = 0, flags;
3111 
3112 	if (vsi->num_vlans == 0)
3113 		return;
3114 	/*
3115 	** Scan the filter list for vlan entries,
3116 	** mark them for addition and then call
3117 	** for the AQ update.
3118 	*/
3119 	SLIST_FOREACH(f, &vsi->ftl, next) {
3120 		if (f->flags & IXL_FILTER_VLAN) {
3121 			f->flags |=
3122 			    (IXL_FILTER_ADD |
3123 			    IXL_FILTER_USED);
3124 			cnt++;
3125 		}
3126 	}
3127 	if (cnt == 0) {
3128 		printf("setup vlan: no filters found!\n");
3129 		return;
3130 	}
3131 	flags = IXL_FILTER_VLAN;
3132 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3133 	ixl_add_hw_filters(vsi, flags, cnt);
3134 	return;
3135 }
3136 
3137 /*
3138 ** Initialize filter list and add filters that the hardware
3139 ** needs to know about.
3140 **
3141 ** Requires VSI's filter list & seid to be set before calling.
3142 */
3143 void
3144 ixl_init_filters(struct ixl_vsi *vsi)
3145 {
3146 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3147 
3148 	/* Add broadcast address */
3149 	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3150 
3151 	/*
3152 	 * Prevent Tx flow control frames from being sent out by
3153 	 * non-firmware transmitters.
3154 	 * This affects every VSI in the PF.
3155 	 */
3156 	if (pf->enable_tx_fc_filter)
3157 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3158 }
3159 
3160 /*
3161 ** This routine adds mulicast filters
3162 */
3163 void
3164 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3165 {
3166 	struct ixl_mac_filter *f;
3167 
3168 	/* Does one already exist */
3169 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3170 	if (f != NULL)
3171 		return;
3172 
3173 	f = ixl_get_filter(vsi);
3174 	if (f == NULL) {
3175 		printf("WARNING: no filter available!!\n");
3176 		return;
3177 	}
3178 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3179 	f->vlan = IXL_VLAN_ANY;
3180 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3181 	    | IXL_FILTER_MC);
3182 
3183 	return;
3184 }
3185 
3186 void
3187 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3188 {
3189 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3190 }
3191 
3192 /*
3193 ** This routine adds macvlan filters
3194 */
3195 void
3196 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3197 {
3198 	struct ixl_mac_filter	*f, *tmp;
3199 	struct ixl_pf		*pf;
3200 	device_t		dev;
3201 
3202 	DEBUGOUT("ixl_add_filter: begin");
3203 
3204 	pf = vsi->back;
3205 	dev = pf->dev;
3206 
3207 	/* Does one already exist */
3208 	f = ixl_find_filter(vsi, macaddr, vlan);
3209 	if (f != NULL)
3210 		return;
3211 	/*
3212 	** Is this the first vlan being registered, if so we
3213 	** need to remove the ANY filter that indicates we are
3214 	** not in a vlan, and replace that with a 0 filter.
3215 	*/
3216 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3217 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3218 		if (tmp != NULL) {
3219 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3220 			ixl_add_filter(vsi, macaddr, 0);
3221 		}
3222 	}
3223 
3224 	f = ixl_get_filter(vsi);
3225 	if (f == NULL) {
3226 		device_printf(dev, "WARNING: no filter available!!\n");
3227 		return;
3228 	}
3229 	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3230 	f->vlan = vlan;
3231 	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3232 	if (f->vlan != IXL_VLAN_ANY)
3233 		f->flags |= IXL_FILTER_VLAN;
3234 	else
3235 		vsi->num_macs++;
3236 
3237 	ixl_add_hw_filters(vsi, f->flags, 1);
3238 	return;
3239 }
3240 
3241 void
3242 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3243 {
3244 	struct ixl_mac_filter *f;
3245 
3246 	f = ixl_find_filter(vsi, macaddr, vlan);
3247 	if (f == NULL)
3248 		return;
3249 
3250 	f->flags |= IXL_FILTER_DEL;
3251 	ixl_del_hw_filters(vsi, 1);
3252 	vsi->num_macs--;
3253 
3254 	/* Check if this is the last vlan removal */
3255 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3256 		/* Switch back to a non-vlan filter */
3257 		ixl_del_filter(vsi, macaddr, 0);
3258 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3259 	}
3260 	return;
3261 }
3262 
3263 /*
3264 ** Find the filter with both matching mac addr and vlan id
3265 */
3266 struct ixl_mac_filter *
3267 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3268 {
3269 	struct ixl_mac_filter	*f;
3270 	bool			match = FALSE;
3271 
3272 	SLIST_FOREACH(f, &vsi->ftl, next) {
3273 		if (!cmp_etheraddr(f->macaddr, macaddr))
3274 			continue;
3275 		if (f->vlan == vlan) {
3276 			match = TRUE;
3277 			break;
3278 		}
3279 	}
3280 
3281 	if (!match)
3282 		f = NULL;
3283 	return (f);
3284 }
3285 
3286 /*
3287 ** This routine takes additions to the vsi filter
3288 ** table and creates an Admin Queue call to create
3289 ** the filters in the hardware.
3290 */
3291 void
3292 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3293 {
3294 	struct i40e_aqc_add_macvlan_element_data *a, *b;
3295 	struct ixl_mac_filter	*f;
3296 	struct ixl_pf		*pf;
3297 	struct i40e_hw		*hw;
3298 	device_t		dev;
3299 	int			err, j = 0;
3300 
3301 	pf = vsi->back;
3302 	dev = pf->dev;
3303 	hw = &pf->hw;
3304 	IXL_PF_LOCK_ASSERT(pf);
3305 
3306 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3307 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3308 	if (a == NULL) {
3309 		device_printf(dev, "add_hw_filters failed to get memory\n");
3310 		return;
3311 	}
3312 
3313 	/*
3314 	** Scan the filter list, each time we find one
3315 	** we add it to the admin queue array and turn off
3316 	** the add bit.
3317 	*/
3318 	SLIST_FOREACH(f, &vsi->ftl, next) {
3319 		if (f->flags == flags) {
3320 			b = &a[j]; // a pox on fvl long names :)
3321 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3322 			if (f->vlan == IXL_VLAN_ANY) {
3323 				b->vlan_tag = 0;
3324 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3325 			} else {
3326 				b->vlan_tag = f->vlan;
3327 				b->flags = 0;
3328 			}
3329 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3330 			f->flags &= ~IXL_FILTER_ADD;
3331 			j++;
3332 		}
3333 		if (j == cnt)
3334 			break;
3335 	}
3336 	if (j > 0) {
3337 		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3338 		if (err)
3339 			device_printf(dev, "aq_add_macvlan err %d, "
3340 			    "aq_error %d\n", err, hw->aq.asq_last_status);
3341 		else
3342 			vsi->hw_filters_add += j;
3343 	}
3344 	free(a, M_DEVBUF);
3345 	return;
3346 }
3347 
3348 /*
3349 ** This routine takes removals in the vsi filter
3350 ** table and creates an Admin Queue call to delete
3351 ** the filters in the hardware.
3352 */
3353 void
3354 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3355 {
3356 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3357 	struct ixl_pf		*pf;
3358 	struct i40e_hw		*hw;
3359 	device_t		dev;
3360 	struct ixl_mac_filter	*f, *f_temp;
3361 	int			err, j = 0;
3362 
3363 	DEBUGOUT("ixl_del_hw_filters: begin\n");
3364 
3365 	pf = vsi->back;
3366 	hw = &pf->hw;
3367 	dev = pf->dev;
3368 
3369 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3370 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3371 	if (d == NULL) {
3372 		printf("del hw filter failed to get memory\n");
3373 		return;
3374 	}
3375 
3376 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3377 		if (f->flags & IXL_FILTER_DEL) {
3378 			e = &d[j]; // a pox on fvl long names :)
3379 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3380 			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3381 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3382 			/* delete entry from vsi list */
3383 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3384 			free(f, M_DEVBUF);
3385 			j++;
3386 		}
3387 		if (j == cnt)
3388 			break;
3389 	}
3390 	if (j > 0) {
3391 		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3392 		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3393 			int sc = 0;
3394 			for (int i = 0; i < j; i++)
3395 				sc += (!d[i].error_code);
3396 			vsi->hw_filters_del += sc;
3397 			device_printf(dev,
3398 			    "Failed to remove %d/%d filters, aq error %d\n",
3399 			    j - sc, j, hw->aq.asq_last_status);
3400 		} else
3401 			vsi->hw_filters_del += j;
3402 	}
3403 	free(d, M_DEVBUF);
3404 
3405 	DEBUGOUT("ixl_del_hw_filters: end\n");
3406 	return;
3407 }
3408 
3409 int
3410 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3411 {
3412 	struct i40e_hw	*hw = &pf->hw;
3413 	int		error = 0;
3414 	u32		reg;
3415 	u16		pf_qidx;
3416 
3417 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3418 
3419 	ixl_dbg(pf, IXL_DBG_EN_DIS,
3420 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3421 	    pf_qidx, vsi_qidx);
3422 
3423 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3424 
3425 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3426 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3427 	    I40E_QTX_ENA_QENA_STAT_MASK;
3428 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3429 	/* Verify the enable took */
3430 	for (int j = 0; j < 10; j++) {
3431 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3432 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3433 			break;
3434 		i40e_msec_delay(10);
3435 	}
3436 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3437 		device_printf(pf->dev, "TX queue %d still disabled!\n",
3438 		    pf_qidx);
3439 		error = ETIMEDOUT;
3440 	}
3441 
3442 	return (error);
3443 }
3444 
3445 int
3446 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3447 {
3448 	struct i40e_hw	*hw = &pf->hw;
3449 	int		error = 0;
3450 	u32		reg;
3451 	u16		pf_qidx;
3452 
3453 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3454 
3455 	ixl_dbg(pf, IXL_DBG_EN_DIS,
3456 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3457 	    pf_qidx, vsi_qidx);
3458 
3459 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3460 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3461 	    I40E_QRX_ENA_QENA_STAT_MASK;
3462 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3463 	/* Verify the enable took */
3464 	for (int j = 0; j < 10; j++) {
3465 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3466 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3467 			break;
3468 		i40e_msec_delay(10);
3469 	}
3470 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3471 		device_printf(pf->dev, "RX queue %d still disabled!\n",
3472 		    pf_qidx);
3473 		error = ETIMEDOUT;
3474 	}
3475 
3476 	return (error);
3477 }
3478 
3479 int
3480 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3481 {
3482 	int error = 0;
3483 
3484 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3485 	/* Called function already prints error message */
3486 	if (error)
3487 		return (error);
3488 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3489 	return (error);
3490 }
3491 
3492 /* For PF VSI only */
3493 int
3494 ixl_enable_rings(struct ixl_vsi *vsi)
3495 {
3496 	struct ixl_pf	*pf = vsi->back;
3497 	int		error = 0;
3498 
3499 	for (int i = 0; i < vsi->num_queues; i++) {
3500 		error = ixl_enable_ring(pf, &pf->qtag, i);
3501 		if (error)
3502 			return (error);
3503 	}
3504 
3505 	return (error);
3506 }
3507 
3508 int
3509 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3510 {
3511 	struct i40e_hw	*hw = &pf->hw;
3512 	int		error = 0;
3513 	u32		reg;
3514 	u16		pf_qidx;
3515 
3516 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3517 
3518 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3519 	i40e_usec_delay(500);
3520 
3521 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3522 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3523 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3524 	/* Verify the disable took */
3525 	for (int j = 0; j < 10; j++) {
3526 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3527 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3528 			break;
3529 		i40e_msec_delay(10);
3530 	}
3531 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3532 		device_printf(pf->dev, "TX queue %d still enabled!\n",
3533 		    pf_qidx);
3534 		error = ETIMEDOUT;
3535 	}
3536 
3537 	return (error);
3538 }
3539 
3540 int
3541 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3542 {
3543 	struct i40e_hw	*hw = &pf->hw;
3544 	int		error = 0;
3545 	u32		reg;
3546 	u16		pf_qidx;
3547 
3548 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3549 
3550 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3551 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3552 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3553 	/* Verify the disable took */
3554 	for (int j = 0; j < 10; j++) {
3555 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3556 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3557 			break;
3558 		i40e_msec_delay(10);
3559 	}
3560 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3561 		device_printf(pf->dev, "RX queue %d still enabled!\n",
3562 		    pf_qidx);
3563 		error = ETIMEDOUT;
3564 	}
3565 
3566 	return (error);
3567 }
3568 
3569 int
3570 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3571 {
3572 	int error = 0;
3573 
3574 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3575 	/* Called function already prints error message */
3576 	if (error)
3577 		return (error);
3578 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3579 	return (error);
3580 }
3581 
3582 /* For PF VSI only */
3583 int
3584 ixl_disable_rings(struct ixl_vsi *vsi)
3585 {
3586 	struct ixl_pf	*pf = vsi->back;
3587 	int		error = 0;
3588 
3589 	for (int i = 0; i < vsi->num_queues; i++) {
3590 		error = ixl_disable_ring(pf, &pf->qtag, i);
3591 		if (error)
3592 			return (error);
3593 	}
3594 
3595 	return (error);
3596 }
3597 
3598 /**
3599  * ixl_handle_mdd_event
3600  *
3601  * Called from interrupt handler to identify possibly malicious vfs
3602  * (But also detects events from the PF, as well)
3603  **/
3604 void
3605 ixl_handle_mdd_event(struct ixl_pf *pf)
3606 {
3607 	struct i40e_hw *hw = &pf->hw;
3608 	device_t dev = pf->dev;
3609 	bool mdd_detected = false;
3610 	bool pf_mdd_detected = false;
3611 	u32 reg;
3612 
3613 	/* find what triggered the MDD event */
3614 	reg = rd32(hw, I40E_GL_MDET_TX);
3615 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3616 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3617 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3618 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3619 				I40E_GL_MDET_TX_EVENT_SHIFT;
3620 		u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3621 				I40E_GL_MDET_TX_QUEUE_SHIFT;
3622 		device_printf(dev,
3623 		    "Malicious Driver Detection event %d"
3624 		    " on TX queue %d, pf number %d\n",
3625 		    event, queue, pf_num);
3626 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3627 		mdd_detected = true;
3628 	}
3629 	reg = rd32(hw, I40E_GL_MDET_RX);
3630 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3631 		u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3632 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3633 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3634 				I40E_GL_MDET_RX_EVENT_SHIFT;
3635 		u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3636 				I40E_GL_MDET_RX_QUEUE_SHIFT;
3637 		device_printf(dev,
3638 		    "Malicious Driver Detection event %d"
3639 		    " on RX queue %d, pf number %d\n",
3640 		    event, queue, pf_num);
3641 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3642 		mdd_detected = true;
3643 	}
3644 
3645 	if (mdd_detected) {
3646 		reg = rd32(hw, I40E_PF_MDET_TX);
3647 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3648 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3649 			device_printf(dev,
3650 			    "MDD TX event is for this function!");
3651 			pf_mdd_detected = true;
3652 		}
3653 		reg = rd32(hw, I40E_PF_MDET_RX);
3654 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3655 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3656 			device_printf(dev,
3657 			    "MDD RX event is for this function!");
3658 			pf_mdd_detected = true;
3659 		}
3660 	}
3661 
3662 	/* re-enable mdd interrupt cause */
3663 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3664 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3665 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3666 	ixl_flush(hw);
3667 }
3668 
3669 void
3670 ixl_enable_intr(struct ixl_vsi *vsi)
3671 {
3672 	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
3673 	struct i40e_hw		*hw = vsi->hw;
3674 	struct ixl_queue	*que = vsi->queues;
3675 
3676 	if (pf->msix > 1) {
3677 		for (int i = 0; i < vsi->num_queues; i++, que++)
3678 			ixl_enable_queue(hw, que->me);
3679 	} else
3680 		ixl_enable_intr0(hw);
3681 }
3682 
3683 void
3684 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3685 {
3686 	struct i40e_hw		*hw = vsi->hw;
3687 	struct ixl_queue	*que = vsi->queues;
3688 
3689 	for (int i = 0; i < vsi->num_queues; i++, que++)
3690 		ixl_disable_queue(hw, que->me);
3691 }
3692 
3693 void
3694 ixl_enable_intr0(struct i40e_hw *hw)
3695 {
3696 	u32		reg;
3697 
3698 	/* Use IXL_ITR_NONE so ITR isn't updated here */
3699 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3700 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3701 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3702 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3703 }
3704 
3705 void
3706 ixl_disable_intr0(struct i40e_hw *hw)
3707 {
3708 	u32		reg;
3709 
3710 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3711 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3712 	ixl_flush(hw);
3713 }
3714 
3715 void
3716 ixl_enable_queue(struct i40e_hw *hw, int id)
3717 {
3718 	u32		reg;
3719 
3720 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3721 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3722 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3723 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3724 }
3725 
3726 void
3727 ixl_disable_queue(struct i40e_hw *hw, int id)
3728 {
3729 	u32		reg;
3730 
3731 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3732 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3733 }
3734 
3735 void
3736 ixl_update_stats_counters(struct ixl_pf *pf)
3737 {
3738 	struct i40e_hw	*hw = &pf->hw;
3739 	struct ixl_vsi	*vsi = &pf->vsi;
3740 	struct ixl_vf	*vf;
3741 
3742 	struct i40e_hw_port_stats *nsd = &pf->stats;
3743 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3744 
3745 	/* Update hw stats */
3746 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3747 			   pf->stat_offsets_loaded,
3748 			   &osd->crc_errors, &nsd->crc_errors);
3749 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3750 			   pf->stat_offsets_loaded,
3751 			   &osd->illegal_bytes, &nsd->illegal_bytes);
3752 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3753 			   I40E_GLPRT_GORCL(hw->port),
3754 			   pf->stat_offsets_loaded,
3755 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3756 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3757 			   I40E_GLPRT_GOTCL(hw->port),
3758 			   pf->stat_offsets_loaded,
3759 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3760 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3761 			   pf->stat_offsets_loaded,
3762 			   &osd->eth.rx_discards,
3763 			   &nsd->eth.rx_discards);
3764 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3765 			   I40E_GLPRT_UPRCL(hw->port),
3766 			   pf->stat_offsets_loaded,
3767 			   &osd->eth.rx_unicast,
3768 			   &nsd->eth.rx_unicast);
3769 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3770 			   I40E_GLPRT_UPTCL(hw->port),
3771 			   pf->stat_offsets_loaded,
3772 			   &osd->eth.tx_unicast,
3773 			   &nsd->eth.tx_unicast);
3774 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3775 			   I40E_GLPRT_MPRCL(hw->port),
3776 			   pf->stat_offsets_loaded,
3777 			   &osd->eth.rx_multicast,
3778 			   &nsd->eth.rx_multicast);
3779 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3780 			   I40E_GLPRT_MPTCL(hw->port),
3781 			   pf->stat_offsets_loaded,
3782 			   &osd->eth.tx_multicast,
3783 			   &nsd->eth.tx_multicast);
3784 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3785 			   I40E_GLPRT_BPRCL(hw->port),
3786 			   pf->stat_offsets_loaded,
3787 			   &osd->eth.rx_broadcast,
3788 			   &nsd->eth.rx_broadcast);
3789 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3790 			   I40E_GLPRT_BPTCL(hw->port),
3791 			   pf->stat_offsets_loaded,
3792 			   &osd->eth.tx_broadcast,
3793 			   &nsd->eth.tx_broadcast);
3794 
3795 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3796 			   pf->stat_offsets_loaded,
3797 			   &osd->tx_dropped_link_down,
3798 			   &nsd->tx_dropped_link_down);
3799 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3800 			   pf->stat_offsets_loaded,
3801 			   &osd->mac_local_faults,
3802 			   &nsd->mac_local_faults);
3803 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3804 			   pf->stat_offsets_loaded,
3805 			   &osd->mac_remote_faults,
3806 			   &nsd->mac_remote_faults);
3807 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3808 			   pf->stat_offsets_loaded,
3809 			   &osd->rx_length_errors,
3810 			   &nsd->rx_length_errors);
3811 
3812 	/* Flow control (LFC) stats */
3813 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3814 			   pf->stat_offsets_loaded,
3815 			   &osd->link_xon_rx, &nsd->link_xon_rx);
3816 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3817 			   pf->stat_offsets_loaded,
3818 			   &osd->link_xon_tx, &nsd->link_xon_tx);
3819 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3820 			   pf->stat_offsets_loaded,
3821 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3822 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3823 			   pf->stat_offsets_loaded,
3824 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3825 
3826 	/* Packet size stats rx */
3827 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3828 			   I40E_GLPRT_PRC64L(hw->port),
3829 			   pf->stat_offsets_loaded,
3830 			   &osd->rx_size_64, &nsd->rx_size_64);
3831 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3832 			   I40E_GLPRT_PRC127L(hw->port),
3833 			   pf->stat_offsets_loaded,
3834 			   &osd->rx_size_127, &nsd->rx_size_127);
3835 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3836 			   I40E_GLPRT_PRC255L(hw->port),
3837 			   pf->stat_offsets_loaded,
3838 			   &osd->rx_size_255, &nsd->rx_size_255);
3839 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3840 			   I40E_GLPRT_PRC511L(hw->port),
3841 			   pf->stat_offsets_loaded,
3842 			   &osd->rx_size_511, &nsd->rx_size_511);
3843 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3844 			   I40E_GLPRT_PRC1023L(hw->port),
3845 			   pf->stat_offsets_loaded,
3846 			   &osd->rx_size_1023, &nsd->rx_size_1023);
3847 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3848 			   I40E_GLPRT_PRC1522L(hw->port),
3849 			   pf->stat_offsets_loaded,
3850 			   &osd->rx_size_1522, &nsd->rx_size_1522);
3851 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3852 			   I40E_GLPRT_PRC9522L(hw->port),
3853 			   pf->stat_offsets_loaded,
3854 			   &osd->rx_size_big, &nsd->rx_size_big);
3855 
3856 	/* Packet size stats tx */
3857 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3858 			   I40E_GLPRT_PTC64L(hw->port),
3859 			   pf->stat_offsets_loaded,
3860 			   &osd->tx_size_64, &nsd->tx_size_64);
3861 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3862 			   I40E_GLPRT_PTC127L(hw->port),
3863 			   pf->stat_offsets_loaded,
3864 			   &osd->tx_size_127, &nsd->tx_size_127);
3865 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3866 			   I40E_GLPRT_PTC255L(hw->port),
3867 			   pf->stat_offsets_loaded,
3868 			   &osd->tx_size_255, &nsd->tx_size_255);
3869 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3870 			   I40E_GLPRT_PTC511L(hw->port),
3871 			   pf->stat_offsets_loaded,
3872 			   &osd->tx_size_511, &nsd->tx_size_511);
3873 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3874 			   I40E_GLPRT_PTC1023L(hw->port),
3875 			   pf->stat_offsets_loaded,
3876 			   &osd->tx_size_1023, &nsd->tx_size_1023);
3877 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3878 			   I40E_GLPRT_PTC1522L(hw->port),
3879 			   pf->stat_offsets_loaded,
3880 			   &osd->tx_size_1522, &nsd->tx_size_1522);
3881 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3882 			   I40E_GLPRT_PTC9522L(hw->port),
3883 			   pf->stat_offsets_loaded,
3884 			   &osd->tx_size_big, &nsd->tx_size_big);
3885 
3886 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3887 			   pf->stat_offsets_loaded,
3888 			   &osd->rx_undersize, &nsd->rx_undersize);
3889 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3890 			   pf->stat_offsets_loaded,
3891 			   &osd->rx_fragments, &nsd->rx_fragments);
3892 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3893 			   pf->stat_offsets_loaded,
3894 			   &osd->rx_oversize, &nsd->rx_oversize);
3895 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3896 			   pf->stat_offsets_loaded,
3897 			   &osd->rx_jabber, &nsd->rx_jabber);
3898 	pf->stat_offsets_loaded = true;
3899 	/* End hw stats */
3900 
3901 	/* Update vsi stats */
3902 	ixl_update_vsi_stats(vsi);
3903 
3904 	for (int i = 0; i < pf->num_vfs; i++) {
3905 		vf = &pf->vfs[i];
3906 		if (vf->vf_flags & VF_FLAG_ENABLED)
3907 			ixl_update_eth_stats(&pf->vfs[i].vsi);
3908 	}
3909 }
3910 
3911 int
3912 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3913 {
3914 	struct i40e_hw *hw = &pf->hw;
3915 	struct ixl_vsi *vsi = &pf->vsi;
3916 	device_t dev = pf->dev;
3917 	bool is_up = false;
3918 	int error = 0;
3919 
3920 	is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3921 
3922 	/* Teardown */
3923 	if (is_up)
3924 		ixl_stop(pf);
3925 	error = i40e_shutdown_lan_hmc(hw);
3926 	if (error)
3927 		device_printf(dev,
3928 		    "Shutdown LAN HMC failed with code %d\n", error);
3929 	ixl_disable_intr0(hw);
3930 	ixl_teardown_adminq_msix(pf);
3931 	error = i40e_shutdown_adminq(hw);
3932 	if (error)
3933 		device_printf(dev,
3934 		    "Shutdown Admin queue failed with code %d\n", error);
3935 
3936 	/* Setup */
3937 	error = i40e_init_adminq(hw);
3938 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3939 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3940 		    error);
3941 	}
3942 	error = ixl_setup_adminq_msix(pf);
3943 	if (error) {
3944 		device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3945 		    error);
3946 	}
3947 	ixl_configure_intr0_msix(pf);
3948 	ixl_enable_intr0(hw);
3949 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3950 	    hw->func_caps.num_rx_qp, 0, 0);
3951 	if (error) {
3952 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
3953 	}
3954 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3955 	if (error) {
3956 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3957 	}
3958 	if (is_up)
3959 		ixl_init(pf);
3960 
3961 	return (0);
3962 }
3963 
3964 void
3965 ixl_handle_empr_reset(struct ixl_pf *pf)
3966 {
3967 	struct i40e_hw *hw = &pf->hw;
3968 	device_t dev = pf->dev;
3969 	int count = 0;
3970 	u32 reg;
3971 
3972 	/* Typically finishes within 3-4 seconds */
3973 	while (count++ < 100) {
3974 		reg = rd32(hw, I40E_GLGEN_RSTAT)
3975 		    & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3976 		if (reg)
3977 			i40e_msec_delay(100);
3978 		else
3979 			break;
3980 	}
3981 	ixl_dbg(pf, IXL_DBG_INFO,
3982 	    "EMPR reset wait count: %d\n", count);
3983 
3984 	device_printf(dev, "Rebuilding driver state...\n");
3985 	ixl_rebuild_hw_structs_after_reset(pf);
3986 	device_printf(dev, "Rebuilding driver state done.\n");
3987 
3988 	atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
3989 }
3990 
3991 /*
3992 ** Tasklet handler for MSIX Adminq interrupts
3993 **  - do outside interrupt since it might sleep
3994 */
3995 void
3996 ixl_do_adminq(void *context, int pending)
3997 {
3998 	struct ixl_pf			*pf = context;
3999 	struct i40e_hw			*hw = &pf->hw;
4000 	struct i40e_arq_event_info	event;
4001 	i40e_status			ret;
4002 	device_t			dev = pf->dev;
4003 	u32				loop = 0;
4004 	u16				opcode, result;
4005 
4006 	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4007 		/* Flag cleared at end of this function */
4008 		ixl_handle_empr_reset(pf);
4009 		return;
4010 	}
4011 
4012 	/* Admin Queue handling */
4013 	event.buf_len = IXL_AQ_BUF_SZ;
4014 	event.msg_buf = malloc(event.buf_len,
4015 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4016 	if (!event.msg_buf) {
4017 		device_printf(dev, "%s: Unable to allocate memory for Admin"
4018 		    " Queue event!\n", __func__);
4019 		return;
4020 	}
4021 
4022 	IXL_PF_LOCK(pf);
4023 	/* clean and process any events */
4024 	do {
4025 		ret = i40e_clean_arq_element(hw, &event, &result);
4026 		if (ret)
4027 			break;
4028 		opcode = LE16_TO_CPU(event.desc.opcode);
4029 		ixl_dbg(pf, IXL_DBG_AQ,
4030 		    "Admin Queue event: %#06x\n", opcode);
4031 		switch (opcode) {
4032 		case i40e_aqc_opc_get_link_status:
4033 			ixl_link_event(pf, &event);
4034 			break;
4035 		case i40e_aqc_opc_send_msg_to_pf:
4036 #ifdef PCI_IOV
4037 			ixl_handle_vf_msg(pf, &event);
4038 #endif
4039 			break;
4040 		case i40e_aqc_opc_event_lan_overflow:
4041 		default:
4042 			break;
4043 		}
4044 
4045 	} while (result && (loop++ < IXL_ADM_LIMIT));
4046 
4047 	free(event.msg_buf, M_DEVBUF);
4048 
4049 	/*
4050 	 * If there are still messages to process, reschedule ourselves.
4051 	 * Otherwise, re-enable our interrupt.
4052 	 */
4053 	if (result > 0)
4054 		taskqueue_enqueue(pf->tq, &pf->adminq);
4055 	else
4056 		ixl_enable_intr0(hw);
4057 
4058 	IXL_PF_UNLOCK(pf);
4059 }
4060 
4061 /**
4062  * Update VSI-specific ethernet statistics counters.
4063  **/
4064 void
4065 ixl_update_eth_stats(struct ixl_vsi *vsi)
4066 {
4067 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4068 	struct i40e_hw *hw = &pf->hw;
4069 	struct i40e_eth_stats *es;
4070 	struct i40e_eth_stats *oes;
4071 	struct i40e_hw_port_stats *nsd;
4072 	u16 stat_idx = vsi->info.stat_counter_idx;
4073 
4074 	es = &vsi->eth_stats;
4075 	oes = &vsi->eth_stats_offsets;
4076 	nsd = &pf->stats;
4077 
4078 	/* Gather up the stats that the hw collects */
4079 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4080 			   vsi->stat_offsets_loaded,
4081 			   &oes->tx_errors, &es->tx_errors);
4082 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4083 			   vsi->stat_offsets_loaded,
4084 			   &oes->rx_discards, &es->rx_discards);
4085 
4086 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4087 			   I40E_GLV_GORCL(stat_idx),
4088 			   vsi->stat_offsets_loaded,
4089 			   &oes->rx_bytes, &es->rx_bytes);
4090 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4091 			   I40E_GLV_UPRCL(stat_idx),
4092 			   vsi->stat_offsets_loaded,
4093 			   &oes->rx_unicast, &es->rx_unicast);
4094 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4095 			   I40E_GLV_MPRCL(stat_idx),
4096 			   vsi->stat_offsets_loaded,
4097 			   &oes->rx_multicast, &es->rx_multicast);
4098 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4099 			   I40E_GLV_BPRCL(stat_idx),
4100 			   vsi->stat_offsets_loaded,
4101 			   &oes->rx_broadcast, &es->rx_broadcast);
4102 
4103 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4104 			   I40E_GLV_GOTCL(stat_idx),
4105 			   vsi->stat_offsets_loaded,
4106 			   &oes->tx_bytes, &es->tx_bytes);
4107 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4108 			   I40E_GLV_UPTCL(stat_idx),
4109 			   vsi->stat_offsets_loaded,
4110 			   &oes->tx_unicast, &es->tx_unicast);
4111 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4112 			   I40E_GLV_MPTCL(stat_idx),
4113 			   vsi->stat_offsets_loaded,
4114 			   &oes->tx_multicast, &es->tx_multicast);
4115 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4116 			   I40E_GLV_BPTCL(stat_idx),
4117 			   vsi->stat_offsets_loaded,
4118 			   &oes->tx_broadcast, &es->tx_broadcast);
4119 	vsi->stat_offsets_loaded = true;
4120 }
4121 
4122 void
4123 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4124 {
4125 	struct ixl_pf		*pf;
4126 	struct ifnet		*ifp;
4127 	struct i40e_eth_stats	*es;
4128 	u64			tx_discards;
4129 
4130 	struct i40e_hw_port_stats *nsd;
4131 
4132 	pf = vsi->back;
4133 	ifp = vsi->ifp;
4134 	es = &vsi->eth_stats;
4135 	nsd = &pf->stats;
4136 
4137 	ixl_update_eth_stats(vsi);
4138 
4139 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4140 	for (int i = 0; i < vsi->num_queues; i++)
4141 		tx_discards += vsi->queues[i].txr.br->br_drops;
4142 
4143 	/* Update ifnet stats */
4144 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4145 	                   es->rx_multicast +
4146 			   es->rx_broadcast);
4147 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4148 	                   es->tx_multicast +
4149 			   es->tx_broadcast);
4150 	IXL_SET_IBYTES(vsi, es->rx_bytes);
4151 	IXL_SET_OBYTES(vsi, es->tx_bytes);
4152 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4153 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4154 
4155 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4156 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4157 	    nsd->rx_jabber);
4158 	IXL_SET_OERRORS(vsi, es->tx_errors);
4159 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4160 	IXL_SET_OQDROPS(vsi, tx_discards);
4161 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4162 	IXL_SET_COLLISIONS(vsi, 0);
4163 }
4164 
4165 /**
4166  * Reset all of the stats for the given pf
4167  **/
4168 void
4169 ixl_pf_reset_stats(struct ixl_pf *pf)
4170 {
4171 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4172 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4173 	pf->stat_offsets_loaded = false;
4174 }
4175 
4176 /**
4177  * Resets all stats of the given vsi
4178  **/
4179 void
4180 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4181 {
4182 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4183 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4184 	vsi->stat_offsets_loaded = false;
4185 }
4186 
4187 /**
4188  * Read and update a 48 bit stat from the hw
4189  *
4190  * Since the device stats are not reset at PFReset, they likely will not
4191  * be zeroed when the driver starts.  We'll save the first values read
4192  * and use them as offsets to be subtracted from the raw values in order
4193  * to report stats that count from zero.
4194  **/
4195 void
4196 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4197 	bool offset_loaded, u64 *offset, u64 *stat)
4198 {
4199 	u64 new_data;
4200 
4201 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4202 	new_data = rd64(hw, loreg);
4203 #else
4204 	/*
4205 	 * Use two rd32's instead of one rd64; FreeBSD versions before
4206 	 * 10 don't support 64-bit bus reads/writes.
4207 	 */
4208 	new_data = rd32(hw, loreg);
4209 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4210 #endif
4211 
4212 	if (!offset_loaded)
4213 		*offset = new_data;
4214 	if (new_data >= *offset)
4215 		*stat = new_data - *offset;
4216 	else
4217 		*stat = (new_data + ((u64)1 << 48)) - *offset;
4218 	*stat &= 0xFFFFFFFFFFFFULL;
4219 }
4220 
4221 /**
4222  * Read and update a 32 bit stat from the hw
4223  **/
4224 void
4225 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4226 	bool offset_loaded, u64 *offset, u64 *stat)
4227 {
4228 	u32 new_data;
4229 
4230 	new_data = rd32(hw, reg);
4231 	if (!offset_loaded)
4232 		*offset = new_data;
4233 	if (new_data >= *offset)
4234 		*stat = (u32)(new_data - *offset);
4235 	else
4236 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4237 }
4238 
4239 void
4240 ixl_add_device_sysctls(struct ixl_pf *pf)
4241 {
4242 	device_t dev = pf->dev;
4243 	struct i40e_hw *hw = &pf->hw;
4244 
4245 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4246 	struct sysctl_oid_list *ctx_list =
4247 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4248 
4249 	struct sysctl_oid *debug_node;
4250 	struct sysctl_oid_list *debug_list;
4251 
4252 	struct sysctl_oid *fec_node;
4253 	struct sysctl_oid_list *fec_list;
4254 
4255 	/* Set up sysctls */
4256 	SYSCTL_ADD_PROC(ctx, ctx_list,
4257 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4258 	    pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4259 
4260 	SYSCTL_ADD_PROC(ctx, ctx_list,
4261 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4262 	    pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4263 
4264 	SYSCTL_ADD_PROC(ctx, ctx_list,
4265 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4266 	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
4267 
4268 	SYSCTL_ADD_PROC(ctx, ctx_list,
4269 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4270 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4271 
4272 	SYSCTL_ADD_PROC(ctx, ctx_list,
4273 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4274 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
4275 	    "Queues not allocated to a PF or VF");
4276 
4277 	SYSCTL_ADD_PROC(ctx, ctx_list,
4278 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4279 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
4280 	    "Immediately set TX ITR value for all queues");
4281 
4282 	SYSCTL_ADD_PROC(ctx, ctx_list,
4283 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4284 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
4285 	    "Immediately set RX ITR value for all queues");
4286 
4287 	SYSCTL_ADD_INT(ctx, ctx_list,
4288 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4289 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4290 
4291 	SYSCTL_ADD_INT(ctx, ctx_list,
4292 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4293 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4294 
4295 	/* Add FEC sysctls for 25G adapters */
4296 	/*
4297 	 * XXX: These settings can be changed, but that isn't supported,
4298 	 * so these are read-only for now.
4299 	 */
4300 	if (hw->device_id == I40E_DEV_ID_25G_B
4301 	    || hw->device_id == I40E_DEV_ID_25G_SFP28) {
4302 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4303 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4304 		fec_list = SYSCTL_CHILDREN(fec_node);
4305 
4306 		SYSCTL_ADD_PROC(ctx, fec_list,
4307 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
4308 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4309 
4310 		SYSCTL_ADD_PROC(ctx, fec_list,
4311 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
4312 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4313 
4314 		SYSCTL_ADD_PROC(ctx, fec_list,
4315 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
4316 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4317 
4318 		SYSCTL_ADD_PROC(ctx, fec_list,
4319 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
4320 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4321 
4322 		SYSCTL_ADD_PROC(ctx, fec_list,
4323 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
4324 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4325 	}
4326 
4327 	/* Add sysctls meant to print debug information, but don't list them
4328 	 * in "sysctl -a" output. */
4329 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4330 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4331 	debug_list = SYSCTL_CHILDREN(debug_node);
4332 
4333 	SYSCTL_ADD_UINT(ctx, debug_list,
4334 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4335 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
4336 
4337 	SYSCTL_ADD_UINT(ctx, debug_list,
4338 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4339 	    &pf->dbg_mask, 0, "Non-hared code debug message level");
4340 
4341 	SYSCTL_ADD_PROC(ctx, debug_list,
4342 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4343 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4344 
4345 	SYSCTL_ADD_PROC(ctx, debug_list,
4346 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4347 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4348 
4349 	SYSCTL_ADD_PROC(ctx, debug_list,
4350 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4351 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4352 
4353 	SYSCTL_ADD_PROC(ctx, debug_list,
4354 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4355 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4356 
4357 	SYSCTL_ADD_PROC(ctx, debug_list,
4358 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4359 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4360 
4361 	SYSCTL_ADD_PROC(ctx, debug_list,
4362 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4363 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4364 
4365 	SYSCTL_ADD_PROC(ctx, debug_list,
4366 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4367 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4368 
4369 	SYSCTL_ADD_PROC(ctx, debug_list,
4370 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4371 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4372 
4373 	SYSCTL_ADD_PROC(ctx, debug_list,
4374 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4375 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4376 
4377 	if (pf->has_i2c) {
4378 		SYSCTL_ADD_PROC(ctx, debug_list,
4379 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4380 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4381 
4382 		SYSCTL_ADD_PROC(ctx, debug_list,
4383 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4384 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4385 	}
4386 
4387 #ifdef PCI_IOV
4388 	SYSCTL_ADD_UINT(ctx, debug_list,
4389 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4390 	    0, "PF/VF Virtual Channel debug level");
4391 #endif
4392 }
4393 
4394 /*
4395  * Primarily for finding out how many queues can be assigned to VFs,
4396  * at runtime.
4397  */
4398 static int
4399 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4400 {
4401 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4402 	int queues;
4403 
4404 	IXL_PF_LOCK(pf);
4405 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4406 	IXL_PF_UNLOCK(pf);
4407 
4408 	return sysctl_handle_int(oidp, NULL, queues, req);
4409 }
4410 
4411 /*
4412 ** Set flow control using sysctl:
4413 ** 	0 - off
4414 **	1 - rx pause
4415 **	2 - tx pause
4416 **	3 - full
4417 */
4418 int
4419 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4420 {
4421 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4422 	struct i40e_hw *hw = &pf->hw;
4423 	device_t dev = pf->dev;
4424 	int requested_fc, error = 0;
4425 	enum i40e_status_code aq_error = 0;
4426 	u8 fc_aq_err = 0;
4427 
4428 	/* Get request */
4429 	requested_fc = pf->fc;
4430 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4431 	if ((error) || (req->newptr == NULL))
4432 		return (error);
4433 	if (requested_fc < 0 || requested_fc > 3) {
4434 		device_printf(dev,
4435 		    "Invalid fc mode; valid modes are 0 through 3\n");
4436 		return (EINVAL);
4437 	}
4438 
4439 	/* Set fc ability for port */
4440 	hw->fc.requested_mode = requested_fc;
4441 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4442 	if (aq_error) {
4443 		device_printf(dev,
4444 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4445 		    __func__, aq_error, fc_aq_err);
4446 		return (EIO);
4447 	}
4448 	pf->fc = requested_fc;
4449 
4450 	/* Get new link state */
4451 	i40e_msec_delay(250);
4452 	hw->phy.get_link_info = TRUE;
4453 	i40e_get_link_status(hw, &pf->link_up);
4454 
4455 	return (0);
4456 }
4457 
4458 char *
4459 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4460 {
4461 	int index;
4462 
4463 	char *speeds[] = {
4464 		"Unknown",
4465 		"100 Mbps",
4466 		"1 Gbps",
4467 		"10 Gbps",
4468 		"40 Gbps",
4469 		"20 Gbps",
4470 		"25 Gbps",
4471 	};
4472 
4473 	switch (link_speed) {
4474 	case I40E_LINK_SPEED_100MB:
4475 		index = 1;
4476 		break;
4477 	case I40E_LINK_SPEED_1GB:
4478 		index = 2;
4479 		break;
4480 	case I40E_LINK_SPEED_10GB:
4481 		index = 3;
4482 		break;
4483 	case I40E_LINK_SPEED_40GB:
4484 		index = 4;
4485 		break;
4486 	case I40E_LINK_SPEED_20GB:
4487 		index = 5;
4488 		break;
4489 	case I40E_LINK_SPEED_25GB:
4490 		index = 6;
4491 		break;
4492 	case I40E_LINK_SPEED_UNKNOWN:
4493 	default:
4494 		index = 0;
4495 		break;
4496 	}
4497 
4498 	return speeds[index];
4499 }
4500 
4501 int
4502 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4503 {
4504 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 	struct i40e_hw *hw = &pf->hw;
4506 	int error = 0;
4507 
4508 	ixl_update_link_status(pf);
4509 
4510 	error = sysctl_handle_string(oidp,
4511 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4512 	    8, req);
4513 	return (error);
4514 }
4515 
4516 static u8
4517 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4518 {
4519 	static u16 speedmap[6] = {
4520 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
4521 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
4522 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
4523 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
4524 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
4525 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
4526 	};
4527 	u8 retval = 0;
4528 
4529 	for (int i = 0; i < 6; i++) {
4530 		if (to_aq)
4531 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4532 		else
4533 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4534 	}
4535 
4536 	return (retval);
4537 }
4538 
4539 int
4540 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4541 {
4542 	struct i40e_hw *hw = &pf->hw;
4543 	device_t dev = pf->dev;
4544 	struct i40e_aq_get_phy_abilities_resp abilities;
4545 	struct i40e_aq_set_phy_config config;
4546 	enum i40e_status_code aq_error = 0;
4547 
4548 	/* Get current capability information */
4549 	aq_error = i40e_aq_get_phy_capabilities(hw,
4550 	    FALSE, FALSE, &abilities, NULL);
4551 	if (aq_error) {
4552 		device_printf(dev,
4553 		    "%s: Error getting phy capabilities %d,"
4554 		    " aq error: %d\n", __func__, aq_error,
4555 		    hw->aq.asq_last_status);
4556 		return (EIO);
4557 	}
4558 
4559 	/* Prepare new config */
4560 	bzero(&config, sizeof(config));
4561 	config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4562 	config.phy_type = abilities.phy_type;
4563 	config.phy_type_ext = abilities.phy_type_ext;
4564 	config.abilities = abilities.abilities
4565 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4566 	config.eee_capability = abilities.eee_capability;
4567 	config.eeer = abilities.eeer_val;
4568 	config.low_power_ctrl = abilities.d3_lpan;
4569 
4570 	/* Do aq command & restart link */
4571 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4572 	if (aq_error) {
4573 		device_printf(dev,
4574 		    "%s: Error setting new phy config %d,"
4575 		    " aq error: %d\n", __func__, aq_error,
4576 		    hw->aq.asq_last_status);
4577 		return (EIO);
4578 	}
4579 
4580 	return (0);
4581 }
4582 
4583 /*
4584 ** Control link advertise speed:
4585 **	Flags:
4586 **	 0x1 - advertise 100 Mb
4587 **	 0x2 - advertise 1G
4588 **	 0x4 - advertise 10G
4589 **	 0x8 - advertise 20G
4590 **	0x10 - advertise 25G
4591 **	0x20 - advertise 40G
4592 **
4593 **	Set to 0 to disable link
4594 */
4595 int
4596 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4597 {
4598 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4599 	struct i40e_hw *hw = &pf->hw;
4600 	device_t dev = pf->dev;
4601 	u8 converted_speeds;
4602 	int requested_ls = 0;
4603 	int error = 0;
4604 
4605 	/* Read in new mode */
4606 	requested_ls = pf->advertised_speed;
4607 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4608 	if ((error) || (req->newptr == NULL))
4609 		return (error);
4610 	/* Check if changing speeds is supported */
4611 	switch (hw->device_id) {
4612 	case I40E_DEV_ID_25G_B:
4613 	case I40E_DEV_ID_25G_SFP28:
4614 		device_printf(dev, "Changing advertised speeds not supported"
4615 		" on this device.\n");
4616 		return (EINVAL);
4617 	}
4618 	if (requested_ls < 0 || requested_ls > 0xff) {
4619 	}
4620 
4621 	/* Check for valid value */
4622 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4623 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4624 		device_printf(dev, "Invalid advertised speed; "
4625 		    "valid flags are: 0x%02x\n",
4626 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4627 		return (EINVAL);
4628 	}
4629 
4630 	error = ixl_set_advertised_speeds(pf, requested_ls);
4631 	if (error)
4632 		return (error);
4633 
4634 	pf->advertised_speed = requested_ls;
4635 	ixl_update_link_status(pf);
4636 	return (0);
4637 }
4638 
4639 /*
4640  * Input: bitmap of enum i40e_aq_link_speed
4641  */
4642 static u64
4643 ixl_max_aq_speed_to_value(u8 link_speeds)
4644 {
4645 	if (link_speeds & I40E_LINK_SPEED_40GB)
4646 		return IF_Gbps(40);
4647 	if (link_speeds & I40E_LINK_SPEED_25GB)
4648 		return IF_Gbps(25);
4649 	if (link_speeds & I40E_LINK_SPEED_20GB)
4650 		return IF_Gbps(20);
4651 	if (link_speeds & I40E_LINK_SPEED_10GB)
4652 		return IF_Gbps(10);
4653 	if (link_speeds & I40E_LINK_SPEED_1GB)
4654 		return IF_Gbps(1);
4655 	if (link_speeds & I40E_LINK_SPEED_100MB)
4656 		return IF_Mbps(100);
4657 	else
4658 		/* Minimum supported link speed */
4659 		return IF_Mbps(100);
4660 }
4661 
4662 /*
4663 ** Get the width and transaction speed of
4664 ** the bus this adapter is plugged into.
4665 */
4666 void
4667 ixl_get_bus_info(struct ixl_pf *pf)
4668 {
4669 	struct i40e_hw *hw = &pf->hw;
4670 	device_t dev = pf->dev;
4671         u16 link;
4672         u32 offset, num_ports;
4673 	u64 max_speed;
4674 
4675 	/* Some devices don't use PCIE */
4676 	if (hw->mac.type == I40E_MAC_X722)
4677 		return;
4678 
4679         /* Read PCI Express Capabilities Link Status Register */
4680         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4681         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4682 
4683 	/* Fill out hw struct with PCIE info */
4684 	i40e_set_pci_config_data(hw, link);
4685 
4686 	/* Use info to print out bandwidth messages */
4687         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4688             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4689             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4690             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4691             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4692             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4693             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4694             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4695             ("Unknown"));
4696 
4697 	/*
4698 	 * If adapter is in slot with maximum supported speed,
4699 	 * no warning message needs to be printed out.
4700 	 */
4701 	if (hw->bus.speed >= i40e_bus_speed_8000
4702 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
4703 		return;
4704 
4705 	num_ports = bitcount32(hw->func_caps.valid_functions);
4706 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4707 
4708 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4709                 device_printf(dev, "PCI-Express bandwidth available"
4710                     " for this device may be insufficient for"
4711                     " optimal performance.\n");
4712                 device_printf(dev, "Please move the device to a different"
4713 		    " PCI-e link with more lanes and/or higher"
4714 		    " transfer rate.\n");
4715         }
4716 }
4717 
4718 static int
4719 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4720 {
4721 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4722 	struct i40e_hw	*hw = &pf->hw;
4723 	struct sbuf	*sbuf;
4724 
4725 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4726 	ixl_nvm_version_str(hw, sbuf);
4727 	sbuf_finish(sbuf);
4728 	sbuf_delete(sbuf);
4729 
4730 	return 0;
4731 }
4732 
4733 void
4734 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4735 {
4736 	if ((nvma->command == I40E_NVM_READ) &&
4737 	    ((nvma->config & 0xFF) == 0xF) &&
4738 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
4739 	    (nvma->offset == 0) &&
4740 	    (nvma->data_size == 1)) {
4741 		// device_printf(dev, "- Get Driver Status Command\n");
4742 	}
4743 	else if (nvma->command == I40E_NVM_READ) {
4744 
4745 	}
4746 	else {
4747 		switch (nvma->command) {
4748 		case 0xB:
4749 			device_printf(dev, "- command: I40E_NVM_READ\n");
4750 			break;
4751 		case 0xC:
4752 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
4753 			break;
4754 		default:
4755 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4756 			break;
4757 		}
4758 
4759 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
4760 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4761 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4762 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4763 	}
4764 }
4765 
4766 int
4767 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4768 {
4769 	struct i40e_hw *hw = &pf->hw;
4770 	struct i40e_nvm_access *nvma;
4771 	device_t dev = pf->dev;
4772 	enum i40e_status_code status = 0;
4773 	int perrno;
4774 
4775 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
4776 
4777 	/* Sanity checks */
4778 	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4779 	    ifd->ifd_data == NULL) {
4780 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4781 		    __func__);
4782 		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4783 		    __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4784 		device_printf(dev, "%s: data pointer: %p\n", __func__,
4785 		    ifd->ifd_data);
4786 		return (EINVAL);
4787 	}
4788 
4789 	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4790 
4791 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
4792 		ixl_print_nvm_cmd(dev, nvma);
4793 
4794 	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4795 		int count = 0;
4796 		while (count++ < 100) {
4797 			i40e_msec_delay(100);
4798 			if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4799 				break;
4800 		}
4801 	}
4802 
4803 	if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4804 		IXL_PF_LOCK(pf);
4805 		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4806 		IXL_PF_UNLOCK(pf);
4807 	} else {
4808 		perrno = -EBUSY;
4809 	}
4810 
4811 	if (status)
4812 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
4813 		    i40e_stat_str(hw, status), perrno);
4814 
4815 	/*
4816 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
4817 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
4818 	 */
4819 	if (perrno == -EPERM)
4820 		return (-EACCES);
4821 	else
4822 		return (perrno);
4823 }
4824 
4825 /*********************************************************************
4826  *
4827  *  Media Ioctl callback
4828  *
4829  *  This routine is called whenever the user queries the status of
4830  *  the interface using ifconfig.
4831  *
4832  **********************************************************************/
4833 void
4834 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4835 {
4836 	struct ixl_vsi	*vsi = ifp->if_softc;
4837 	struct ixl_pf	*pf = vsi->back;
4838 	struct i40e_hw  *hw = &pf->hw;
4839 
4840 	INIT_DEBUGOUT("ixl_media_status: begin");
4841 	IXL_PF_LOCK(pf);
4842 
4843 	hw->phy.get_link_info = TRUE;
4844 	i40e_get_link_status(hw, &pf->link_up);
4845 	ixl_update_link_status(pf);
4846 
4847 	ifmr->ifm_status = IFM_AVALID;
4848 	ifmr->ifm_active = IFM_ETHER;
4849 
4850 	if (!pf->link_up) {
4851 		IXL_PF_UNLOCK(pf);
4852 		return;
4853 	}
4854 
4855 	ifmr->ifm_status |= IFM_ACTIVE;
4856 
4857 	/* Hardware always does full-duplex */
4858 	ifmr->ifm_active |= IFM_FDX;
4859 
4860 	switch (hw->phy.link_info.phy_type) {
4861 		/* 100 M */
4862 		case I40E_PHY_TYPE_100BASE_TX:
4863 			ifmr->ifm_active |= IFM_100_TX;
4864 			break;
4865 		/* 1 G */
4866 		case I40E_PHY_TYPE_1000BASE_T:
4867 			ifmr->ifm_active |= IFM_1000_T;
4868 			break;
4869 		case I40E_PHY_TYPE_1000BASE_SX:
4870 			ifmr->ifm_active |= IFM_1000_SX;
4871 			break;
4872 		case I40E_PHY_TYPE_1000BASE_LX:
4873 			ifmr->ifm_active |= IFM_1000_LX;
4874 			break;
4875 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4876 			ifmr->ifm_active |= IFM_OTHER;
4877 			break;
4878 		/* 10 G */
4879 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4880 			ifmr->ifm_active |= IFM_10G_TWINAX;
4881 			break;
4882 		case I40E_PHY_TYPE_10GBASE_SR:
4883 			ifmr->ifm_active |= IFM_10G_SR;
4884 			break;
4885 		case I40E_PHY_TYPE_10GBASE_LR:
4886 			ifmr->ifm_active |= IFM_10G_LR;
4887 			break;
4888 		case I40E_PHY_TYPE_10GBASE_T:
4889 			ifmr->ifm_active |= IFM_10G_T;
4890 			break;
4891 		case I40E_PHY_TYPE_XAUI:
4892 		case I40E_PHY_TYPE_XFI:
4893 		case I40E_PHY_TYPE_10GBASE_AOC:
4894 			ifmr->ifm_active |= IFM_OTHER;
4895 			break;
4896 		/* 25 G */
4897 		case I40E_PHY_TYPE_25GBASE_KR:
4898 			ifmr->ifm_active |= IFM_25G_KR;
4899 			break;
4900 		case I40E_PHY_TYPE_25GBASE_CR:
4901 			ifmr->ifm_active |= IFM_25G_CR;
4902 			break;
4903 		case I40E_PHY_TYPE_25GBASE_SR:
4904 			ifmr->ifm_active |= IFM_25G_SR;
4905 			break;
4906 		case I40E_PHY_TYPE_25GBASE_LR:
4907 			ifmr->ifm_active |= IFM_UNKNOWN;
4908 			break;
4909 		/* 40 G */
4910 		case I40E_PHY_TYPE_40GBASE_CR4:
4911 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
4912 			ifmr->ifm_active |= IFM_40G_CR4;
4913 			break;
4914 		case I40E_PHY_TYPE_40GBASE_SR4:
4915 			ifmr->ifm_active |= IFM_40G_SR4;
4916 			break;
4917 		case I40E_PHY_TYPE_40GBASE_LR4:
4918 			ifmr->ifm_active |= IFM_40G_LR4;
4919 			break;
4920 		case I40E_PHY_TYPE_XLAUI:
4921 			ifmr->ifm_active |= IFM_OTHER;
4922 			break;
4923 		case I40E_PHY_TYPE_1000BASE_KX:
4924 			ifmr->ifm_active |= IFM_1000_KX;
4925 			break;
4926 		case I40E_PHY_TYPE_SGMII:
4927 			ifmr->ifm_active |= IFM_1000_SGMII;
4928 			break;
4929 		/* ERJ: What's the difference between these? */
4930 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
4931 		case I40E_PHY_TYPE_10GBASE_CR1:
4932 			ifmr->ifm_active |= IFM_10G_CR1;
4933 			break;
4934 		case I40E_PHY_TYPE_10GBASE_KX4:
4935 			ifmr->ifm_active |= IFM_10G_KX4;
4936 			break;
4937 		case I40E_PHY_TYPE_10GBASE_KR:
4938 			ifmr->ifm_active |= IFM_10G_KR;
4939 			break;
4940 		case I40E_PHY_TYPE_SFI:
4941 			ifmr->ifm_active |= IFM_10G_SFI;
4942 			break;
4943 		/* Our single 20G media type */
4944 		case I40E_PHY_TYPE_20GBASE_KR2:
4945 			ifmr->ifm_active |= IFM_20G_KR2;
4946 			break;
4947 		case I40E_PHY_TYPE_40GBASE_KR4:
4948 			ifmr->ifm_active |= IFM_40G_KR4;
4949 			break;
4950 		case I40E_PHY_TYPE_XLPPI:
4951 		case I40E_PHY_TYPE_40GBASE_AOC:
4952 			ifmr->ifm_active |= IFM_40G_XLPPI;
4953 			break;
4954 		/* Unknown to driver */
4955 		default:
4956 			ifmr->ifm_active |= IFM_UNKNOWN;
4957 			break;
4958 	}
4959 	/* Report flow control status as well */
4960 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4961 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4962 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4963 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4964 
4965 	IXL_PF_UNLOCK(pf);
4966 }
4967 
4968 void
4969 ixl_init(void *arg)
4970 {
4971 	struct ixl_pf *pf = arg;
4972 
4973 	IXL_PF_LOCK(pf);
4974 	ixl_init_locked(pf);
4975 	IXL_PF_UNLOCK(pf);
4976 }
4977 
4978 /*
4979  * NOTE: Fortville does not support forcing media speeds. Instead,
4980  * use the set_advertise sysctl to set the speeds Fortville
4981  * will advertise or be allowed to operate at.
4982  */
4983 int
4984 ixl_media_change(struct ifnet * ifp)
4985 {
4986 	struct ixl_vsi *vsi = ifp->if_softc;
4987 	struct ifmedia *ifm = &vsi->media;
4988 
4989 	INIT_DEBUGOUT("ixl_media_change: begin");
4990 
4991 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4992 		return (EINVAL);
4993 
4994 	if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
4995 
4996 	return (ENODEV);
4997 }
4998 
4999 /*********************************************************************
5000  *  Ioctl entry point
5001  *
5002  *  ixl_ioctl is called when the user wants to configure the
5003  *  interface.
5004  *
5005  *  return 0 on success, positive on failure
5006  **********************************************************************/
5007 
5008 int
5009 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5010 {
5011 	struct ixl_vsi	*vsi = ifp->if_softc;
5012 	struct ixl_pf	*pf = vsi->back;
5013 	struct ifreq	*ifr = (struct ifreq *)data;
5014 	struct ifdrv	*ifd = (struct ifdrv *)data;
5015 #if defined(INET) || defined(INET6)
5016 	struct ifaddr *ifa = (struct ifaddr *)data;
5017 	bool		avoid_reset = FALSE;
5018 #endif
5019 	int             error = 0;
5020 
5021 	switch (command) {
5022 
5023         case SIOCSIFADDR:
5024 		IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5025 #ifdef INET
5026 		if (ifa->ifa_addr->sa_family == AF_INET)
5027 			avoid_reset = TRUE;
5028 #endif
5029 #ifdef INET6
5030 		if (ifa->ifa_addr->sa_family == AF_INET6)
5031 			avoid_reset = TRUE;
5032 #endif
5033 #if defined(INET) || defined(INET6)
5034 		/*
5035 		** Calling init results in link renegotiation,
5036 		** so we avoid doing it when possible.
5037 		*/
5038 		if (avoid_reset) {
5039 			ifp->if_flags |= IFF_UP;
5040 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5041 				ixl_init(pf);
5042 #ifdef INET
5043 			if (!(ifp->if_flags & IFF_NOARP))
5044 				arp_ifinit(ifp, ifa);
5045 #endif
5046 		} else
5047 			error = ether_ioctl(ifp, command, data);
5048 		break;
5049 #endif
5050 	case SIOCSIFMTU:
5051 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5052 		if (ifr->ifr_mtu > IXL_MAX_FRAME -
5053 		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5054 			error = EINVAL;
5055 		} else {
5056 			IXL_PF_LOCK(pf);
5057 			ifp->if_mtu = ifr->ifr_mtu;
5058 			vsi->max_frame_size =
5059 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5060 			    + ETHER_VLAN_ENCAP_LEN;
5061 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5062 				ixl_init_locked(pf);
5063 			IXL_PF_UNLOCK(pf);
5064 		}
5065 		break;
5066 	case SIOCSIFFLAGS:
5067 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5068 		IXL_PF_LOCK(pf);
5069 		if (ifp->if_flags & IFF_UP) {
5070 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5071 				if ((ifp->if_flags ^ pf->if_flags) &
5072 				    (IFF_PROMISC | IFF_ALLMULTI)) {
5073 					ixl_set_promisc(vsi);
5074 				}
5075 			} else {
5076 				IXL_PF_UNLOCK(pf);
5077 				ixl_init(pf);
5078 				IXL_PF_LOCK(pf);
5079 			}
5080 		} else {
5081 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5082 				ixl_stop_locked(pf);
5083 			}
5084 		}
5085 		pf->if_flags = ifp->if_flags;
5086 		IXL_PF_UNLOCK(pf);
5087 		break;
5088 	case SIOCSDRVSPEC:
5089 	case SIOCGDRVSPEC:
5090 		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5091 		    "Info)\n");
5092 
5093 		/* NVM update command */
5094 		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5095 			error = ixl_handle_nvmupd_cmd(pf, ifd);
5096 		else
5097 			error = EINVAL;
5098 		break;
5099 	case SIOCADDMULTI:
5100 		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5101 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5102 			IXL_PF_LOCK(pf);
5103 			ixl_disable_rings_intr(vsi);
5104 			ixl_add_multi(vsi);
5105 			ixl_enable_intr(vsi);
5106 			IXL_PF_UNLOCK(pf);
5107 		}
5108 		break;
5109 	case SIOCDELMULTI:
5110 		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5111 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5112 			IXL_PF_LOCK(pf);
5113 			ixl_disable_rings_intr(vsi);
5114 			ixl_del_multi(vsi);
5115 			ixl_enable_intr(vsi);
5116 			IXL_PF_UNLOCK(pf);
5117 		}
5118 		break;
5119 	case SIOCSIFMEDIA:
5120 	case SIOCGIFMEDIA:
5121 	case SIOCGIFXMEDIA:
5122 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5123 		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5124 		break;
5125 	case SIOCSIFCAP:
5126 	{
5127 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5128 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5129 
5130 		ixl_cap_txcsum_tso(vsi, ifp, mask);
5131 
5132 		if (mask & IFCAP_RXCSUM)
5133 			ifp->if_capenable ^= IFCAP_RXCSUM;
5134 		if (mask & IFCAP_RXCSUM_IPV6)
5135 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5136 		if (mask & IFCAP_LRO)
5137 			ifp->if_capenable ^= IFCAP_LRO;
5138 		if (mask & IFCAP_VLAN_HWTAGGING)
5139 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5140 		if (mask & IFCAP_VLAN_HWFILTER)
5141 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5142 		if (mask & IFCAP_VLAN_HWTSO)
5143 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5144 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5145 			IXL_PF_LOCK(pf);
5146 			ixl_init_locked(pf);
5147 			IXL_PF_UNLOCK(pf);
5148 		}
5149 		VLAN_CAPABILITIES(ifp);
5150 
5151 		break;
5152 	}
5153 #if __FreeBSD_version >= 1003000
5154 	case SIOCGI2C:
5155 	{
5156 		struct ifi2creq i2c;
5157 		int i;
5158 
5159 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5160 		if (!pf->has_i2c)
5161 			return (ENOTTY);
5162 
5163 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5164 		if (error != 0)
5165 			break;
5166 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5167 			error = EINVAL;
5168 			break;
5169 		}
5170 		if (i2c.len > sizeof(i2c.data)) {
5171 			error = EINVAL;
5172 			break;
5173 		}
5174 
5175 		for (i = 0; i < i2c.len; i++)
5176 			if (ixl_read_i2c_byte(pf, i2c.offset + i,
5177 			    i2c.dev_addr, &i2c.data[i]))
5178 				return (EIO);
5179 
5180 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5181 		break;
5182 	}
5183 #endif
5184 	default:
5185 		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5186 		error = ether_ioctl(ifp, command, data);
5187 		break;
5188 	}
5189 
5190 	return (error);
5191 }
5192 
5193 int
5194 ixl_find_i2c_interface(struct ixl_pf *pf)
5195 {
5196 	struct i40e_hw *hw = &pf->hw;
5197 	bool i2c_en, port_matched;
5198 	u32 reg;
5199 
5200 	for (int i = 0; i < 4; i++) {
5201 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5202 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5203 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5204 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5205 		    & BIT(hw->port);
5206 		if (i2c_en && port_matched)
5207 			return (i);
5208 	}
5209 
5210 	return (-1);
5211 }
5212 
5213 static char *
5214 ixl_phy_type_string(u32 bit_pos, bool ext)
5215 {
5216 	static char * phy_types_str[32] = {
5217 		"SGMII",
5218 		"1000BASE-KX",
5219 		"10GBASE-KX4",
5220 		"10GBASE-KR",
5221 		"40GBASE-KR4",
5222 		"XAUI",
5223 		"XFI",
5224 		"SFI",
5225 		"XLAUI",
5226 		"XLPPI",
5227 		"40GBASE-CR4",
5228 		"10GBASE-CR1",
5229 		"Reserved (12)",
5230 		"Reserved (13)",
5231 		"Reserved (14)",
5232 		"Reserved (15)",
5233 		"Reserved (16)",
5234 		"100BASE-TX",
5235 		"1000BASE-T",
5236 		"10GBASE-T",
5237 		"10GBASE-SR",
5238 		"10GBASE-LR",
5239 		"10GBASE-SFP+Cu",
5240 		"10GBASE-CR1",
5241 		"40GBASE-CR4",
5242 		"40GBASE-SR4",
5243 		"40GBASE-LR4",
5244 		"1000BASE-SX",
5245 		"1000BASE-LX",
5246 		"1000BASE-T Optical",
5247 		"20GBASE-KR2",
5248 		"Reserved (31)"
5249 	};
5250 	static char * ext_phy_types_str[4] = {
5251 		"25GBASE-KR",
5252 		"25GBASE-CR",
5253 		"25GBASE-SR",
5254 		"25GBASE-LR"
5255 	};
5256 
5257 	if (ext && bit_pos > 3) return "Invalid_Ext";
5258 	if (bit_pos > 31) return "Invalid";
5259 
5260 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5261 }
5262 
5263 int
5264 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5265 {
5266 	device_t dev = pf->dev;
5267 	struct i40e_hw *hw = &pf->hw;
5268 	struct i40e_aq_desc desc;
5269 	enum i40e_status_code status;
5270 
5271 	struct i40e_aqc_get_link_status *aq_link_status =
5272 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
5273 
5274 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5275 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5276 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5277 	if (status) {
5278 		device_printf(dev,
5279 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5280 		    __func__, i40e_stat_str(hw, status),
5281 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5282 		return (EIO);
5283 	}
5284 
5285 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5286 	return (0);
5287 }
5288 
5289 static char *
5290 ixl_phy_type_string_ls(u8 val)
5291 {
5292 	if (val >= 0x1F)
5293 		return ixl_phy_type_string(val - 0x1F, true);
5294 	else
5295 		return ixl_phy_type_string(val, false);
5296 }
5297 
5298 static int
5299 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5300 {
5301 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5302 	device_t dev = pf->dev;
5303 	struct sbuf *buf;
5304 	int error = 0;
5305 
5306 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5307 	if (!buf) {
5308 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5309 		return (ENOMEM);
5310 	}
5311 
5312 	struct i40e_aqc_get_link_status link_status;
5313 	error = ixl_aq_get_link_status(pf, &link_status);
5314 	if (error) {
5315 		sbuf_delete(buf);
5316 		return (error);
5317 	}
5318 
5319 	/* TODO: Add 25G types */
5320 	sbuf_printf(buf, "\n"
5321 	    "PHY Type : 0x%02x<%s>\n"
5322 	    "Speed    : 0x%02x\n"
5323 	    "Link info: 0x%02x\n"
5324 	    "AN info  : 0x%02x\n"
5325 	    "Ext info : 0x%02x\n"
5326 	    "Loopback : 0x%02x\n"
5327 	    "Max Frame: %d\n"
5328 	    "Config   : 0x%02x\n"
5329 	    "Power    : 0x%02x",
5330 	    link_status.phy_type,
5331 	    ixl_phy_type_string_ls(link_status.phy_type),
5332 	    link_status.link_speed,
5333 	    link_status.link_info,
5334 	    link_status.an_info,
5335 	    link_status.ext_info,
5336 	    link_status.loopback,
5337 	    link_status.max_frame_size,
5338 	    link_status.config,
5339 	    link_status.power_desc);
5340 
5341 	error = sbuf_finish(buf);
5342 	if (error)
5343 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5344 
5345 	sbuf_delete(buf);
5346 	return (error);
5347 }
5348 
5349 static int
5350 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5351 {
5352 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5353 	struct i40e_hw *hw = &pf->hw;
5354 	device_t dev = pf->dev;
5355 	enum i40e_status_code status;
5356 	struct i40e_aq_get_phy_abilities_resp abilities;
5357 	struct sbuf *buf;
5358 	int error = 0;
5359 
5360 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5361 	if (!buf) {
5362 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5363 		return (ENOMEM);
5364 	}
5365 
5366 	status = i40e_aq_get_phy_capabilities(hw,
5367 	    FALSE, FALSE, &abilities, NULL);
5368 	if (status) {
5369 		device_printf(dev,
5370 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5371 		    __func__, i40e_stat_str(hw, status),
5372 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5373 		sbuf_delete(buf);
5374 		return (EIO);
5375 	}
5376 
5377 	sbuf_printf(buf, "\n"
5378 	    "PHY Type : %08x",
5379 	    abilities.phy_type);
5380 
5381 	if (abilities.phy_type != 0) {
5382 		sbuf_printf(buf, "<");
5383 		for (int i = 0; i < 32; i++)
5384 			if ((1 << i) & abilities.phy_type)
5385 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5386 		sbuf_printf(buf, ">\n");
5387 	}
5388 
5389 	sbuf_printf(buf, "PHY Ext  : %02x",
5390 	    abilities.phy_type_ext);
5391 
5392 	if (abilities.phy_type_ext != 0) {
5393 		sbuf_printf(buf, "<");
5394 		for (int i = 0; i < 4; i++)
5395 			if ((1 << i) & abilities.phy_type_ext)
5396 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5397 		sbuf_printf(buf, ">");
5398 	}
5399 	sbuf_printf(buf, "\n");
5400 
5401 	sbuf_printf(buf,
5402 	    "Speed    : %02x\n"
5403 	    "Abilities: %02x\n"
5404 	    "EEE cap  : %04x\n"
5405 	    "EEER reg : %08x\n"
5406 	    "D3 Lpan  : %02x\n"
5407 	    "ID       : %02x %02x %02x %02x\n"
5408 	    "ModType  : %02x %02x %02x\n"
5409 	    "ModType E: %01x\n"
5410 	    "FEC Cfg  : %02x\n"
5411 	    "Ext CC   : %02x",
5412 	    abilities.link_speed,
5413 	    abilities.abilities, abilities.eee_capability,
5414 	    abilities.eeer_val, abilities.d3_lpan,
5415 	    abilities.phy_id[0], abilities.phy_id[1],
5416 	    abilities.phy_id[2], abilities.phy_id[3],
5417 	    abilities.module_type[0], abilities.module_type[1],
5418 	    abilities.module_type[2], abilities.phy_type_ext >> 5,
5419 	    abilities.phy_type_ext & 0x1F,
5420 	    abilities.ext_comp_code);
5421 
5422 	error = sbuf_finish(buf);
5423 	if (error)
5424 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5425 
5426 	sbuf_delete(buf);
5427 	return (error);
5428 }
5429 
5430 static int
5431 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5432 {
5433 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5434 	struct ixl_vsi *vsi = &pf->vsi;
5435 	struct ixl_mac_filter *f;
5436 	char *buf, *buf_i;
5437 
5438 	int error = 0;
5439 	int ftl_len = 0;
5440 	int ftl_counter = 0;
5441 	int buf_len = 0;
5442 	int entry_len = 42;
5443 
5444 	SLIST_FOREACH(f, &vsi->ftl, next) {
5445 		ftl_len++;
5446 	}
5447 
5448 	if (ftl_len < 1) {
5449 		sysctl_handle_string(oidp, "(none)", 6, req);
5450 		return (0);
5451 	}
5452 
5453 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5454 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5455 
5456 	sprintf(buf_i++, "\n");
5457 	SLIST_FOREACH(f, &vsi->ftl, next) {
5458 		sprintf(buf_i,
5459 		    MAC_FORMAT ", vlan %4d, flags %#06x",
5460 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5461 		buf_i += entry_len;
5462 		/* don't print '\n' for last entry */
5463 		if (++ftl_counter != ftl_len) {
5464 			sprintf(buf_i, "\n");
5465 			buf_i++;
5466 		}
5467 	}
5468 
5469 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5470 	if (error)
5471 		printf("sysctl error: %d\n", error);
5472 	free(buf, M_DEVBUF);
5473 	return error;
5474 }
5475 
5476 #define IXL_SW_RES_SIZE 0x14
5477 int
5478 ixl_res_alloc_cmp(const void *a, const void *b)
5479 {
5480 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5481 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5482 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5483 
5484 	return ((int)one->resource_type - (int)two->resource_type);
5485 }
5486 
5487 /*
5488  * Longest string length: 25
5489  */
5490 char *
5491 ixl_switch_res_type_string(u8 type)
5492 {
5493 	static char * ixl_switch_res_type_strings[0x14] = {
5494 		"VEB",
5495 		"VSI",
5496 		"Perfect Match MAC address",
5497 		"S-tag",
5498 		"(Reserved)",
5499 		"Multicast hash entry",
5500 		"Unicast hash entry",
5501 		"VLAN",
5502 		"VSI List entry",
5503 		"(Reserved)",
5504 		"VLAN Statistic Pool",
5505 		"Mirror Rule",
5506 		"Queue Set",
5507 		"Inner VLAN Forward filter",
5508 		"(Reserved)",
5509 		"Inner MAC",
5510 		"IP",
5511 		"GRE/VN1 Key",
5512 		"VN2 Key",
5513 		"Tunneling Port"
5514 	};
5515 
5516 	if (type < 0x14)
5517 		return ixl_switch_res_type_strings[type];
5518 	else
5519 		return "(Reserved)";
5520 }
5521 
5522 static int
5523 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5524 {
5525 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5526 	struct i40e_hw *hw = &pf->hw;
5527 	device_t dev = pf->dev;
5528 	struct sbuf *buf;
5529 	enum i40e_status_code status;
5530 	int error = 0;
5531 
5532 	u8 num_entries;
5533 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5534 
5535 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5536 	if (!buf) {
5537 		device_printf(dev, "Could not allocate sbuf for output.\n");
5538 		return (ENOMEM);
5539 	}
5540 
5541 	bzero(resp, sizeof(resp));
5542 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5543 				resp,
5544 				IXL_SW_RES_SIZE,
5545 				NULL);
5546 	if (status) {
5547 		device_printf(dev,
5548 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5549 		    __func__, i40e_stat_str(hw, status),
5550 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5551 		sbuf_delete(buf);
5552 		return (error);
5553 	}
5554 
5555 	/* Sort entries by type for display */
5556 	qsort(resp, num_entries,
5557 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5558 	    &ixl_res_alloc_cmp);
5559 
5560 	sbuf_cat(buf, "\n");
5561 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5562 	sbuf_printf(buf,
5563 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5564 	    "                          | (this)     | (all) | (this) | (all)       \n");
5565 	for (int i = 0; i < num_entries; i++) {
5566 		sbuf_printf(buf,
5567 		    "%25s | %10d   %5d   %6d   %12d",
5568 		    ixl_switch_res_type_string(resp[i].resource_type),
5569 		    resp[i].guaranteed,
5570 		    resp[i].total,
5571 		    resp[i].used,
5572 		    resp[i].total_unalloced);
5573 		if (i < num_entries - 1)
5574 			sbuf_cat(buf, "\n");
5575 	}
5576 
5577 	error = sbuf_finish(buf);
5578 	if (error)
5579 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5580 
5581 	sbuf_delete(buf);
5582 	return (error);
5583 }
5584 
5585 /*
5586 ** Caller must init and delete sbuf; this function will clear and
5587 ** finish it for caller.
5588 **
5589 ** XXX: Cannot use the SEID for this, since there is no longer a
5590 ** fixed mapping between SEID and element type.
5591 */
5592 char *
5593 ixl_switch_element_string(struct sbuf *s,
5594     struct i40e_aqc_switch_config_element_resp *element)
5595 {
5596 	sbuf_clear(s);
5597 
5598 	switch (element->element_type) {
5599 	case I40E_AQ_SW_ELEM_TYPE_MAC:
5600 		sbuf_printf(s, "MAC %3d", element->element_info);
5601 		break;
5602 	case I40E_AQ_SW_ELEM_TYPE_PF:
5603 		sbuf_printf(s, "PF  %3d", element->element_info);
5604 		break;
5605 	case I40E_AQ_SW_ELEM_TYPE_VF:
5606 		sbuf_printf(s, "VF  %3d", element->element_info);
5607 		break;
5608 	case I40E_AQ_SW_ELEM_TYPE_EMP:
5609 		sbuf_cat(s, "EMP");
5610 		break;
5611 	case I40E_AQ_SW_ELEM_TYPE_BMC:
5612 		sbuf_cat(s, "BMC");
5613 		break;
5614 	case I40E_AQ_SW_ELEM_TYPE_PV:
5615 		sbuf_cat(s, "PV");
5616 		break;
5617 	case I40E_AQ_SW_ELEM_TYPE_VEB:
5618 		sbuf_cat(s, "VEB");
5619 		break;
5620 	case I40E_AQ_SW_ELEM_TYPE_PA:
5621 		sbuf_cat(s, "PA");
5622 		break;
5623 	case I40E_AQ_SW_ELEM_TYPE_VSI:
5624 		sbuf_printf(s, "VSI %3d", element->element_info);
5625 		break;
5626 	default:
5627 		sbuf_cat(s, "?");
5628 		break;
5629 	}
5630 
5631 	sbuf_finish(s);
5632 	return sbuf_data(s);
5633 }
5634 
5635 static int
5636 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5637 {
5638 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5639 	struct i40e_hw *hw = &pf->hw;
5640 	device_t dev = pf->dev;
5641 	struct sbuf *buf;
5642 	struct sbuf *nmbuf;
5643 	enum i40e_status_code status;
5644 	int error = 0;
5645 	u16 next = 0;
5646 	u8 aq_buf[I40E_AQ_LARGE_BUF];
5647 
5648 	struct i40e_aqc_get_switch_config_resp *sw_config;
5649 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5650 
5651 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5652 	if (!buf) {
5653 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5654 		return (ENOMEM);
5655 	}
5656 
5657 	status = i40e_aq_get_switch_config(hw, sw_config,
5658 	    sizeof(aq_buf), &next, NULL);
5659 	if (status) {
5660 		device_printf(dev,
5661 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
5662 		    __func__, i40e_stat_str(hw, status),
5663 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5664 		sbuf_delete(buf);
5665 		return error;
5666 	}
5667 	if (next)
5668 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5669 		    __func__, next);
5670 
5671 	nmbuf = sbuf_new_auto();
5672 	if (!nmbuf) {
5673 		device_printf(dev, "Could not allocate sbuf for name output.\n");
5674 		sbuf_delete(buf);
5675 		return (ENOMEM);
5676 	}
5677 
5678 	sbuf_cat(buf, "\n");
5679 	/* Assuming <= 255 elements in switch */
5680 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5681 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5682 	/* Exclude:
5683 	** Revision -- all elements are revision 1 for now
5684 	*/
5685 	sbuf_printf(buf,
5686 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5687 	    "                |          |          | (uplink)\n");
5688 	for (int i = 0; i < sw_config->header.num_reported; i++) {
5689 		// "%4d (%8s) | %8s   %8s   %#8x",
5690 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5691 		sbuf_cat(buf, " ");
5692 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5693 		    &sw_config->element[i]));
5694 		sbuf_cat(buf, " | ");
5695 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5696 		sbuf_cat(buf, "   ");
5697 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5698 		sbuf_cat(buf, "   ");
5699 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5700 		if (i < sw_config->header.num_reported - 1)
5701 			sbuf_cat(buf, "\n");
5702 	}
5703 	sbuf_delete(nmbuf);
5704 
5705 	error = sbuf_finish(buf);
5706 	if (error)
5707 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5708 
5709 	sbuf_delete(buf);
5710 
5711 	return (error);
5712 }
5713 
5714 static int
5715 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5716 {
5717 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5718 	struct i40e_hw *hw = &pf->hw;
5719 	device_t dev = pf->dev;
5720 	struct sbuf *buf;
5721 	int error = 0;
5722 	enum i40e_status_code status;
5723 	u32 reg;
5724 
5725 	struct i40e_aqc_get_set_rss_key_data key_data;
5726 
5727 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5728 	if (!buf) {
5729 		device_printf(dev, "Could not allocate sbuf for output.\n");
5730 		return (ENOMEM);
5731 	}
5732 
5733 	sbuf_cat(buf, "\n");
5734 	if (hw->mac.type == I40E_MAC_X722) {
5735 		bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5736 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5737 		if (status)
5738 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5739 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5740 		sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5741 	} else {
5742 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5743 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5744 			sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5745 		}
5746 	}
5747 
5748 	error = sbuf_finish(buf);
5749 	if (error)
5750 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5751 	sbuf_delete(buf);
5752 
5753 	return (error);
5754 }
5755 
5756 static int
5757 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5758 {
5759 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5760 	struct i40e_hw *hw = &pf->hw;
5761 	device_t dev = pf->dev;
5762 	struct sbuf *buf;
5763 	int error = 0;
5764 	enum i40e_status_code status;
5765 	u8 hlut[512];
5766 	u32 reg;
5767 
5768 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5769 	if (!buf) {
5770 		device_printf(dev, "Could not allocate sbuf for output.\n");
5771 		return (ENOMEM);
5772 	}
5773 
5774 	sbuf_cat(buf, "\n");
5775 	if (hw->mac.type == I40E_MAC_X722) {
5776 		bzero(hlut, sizeof(hlut));
5777 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5778 		if (status)
5779 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5780 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5781 		sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5782 	} else {
5783 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5784 			reg = rd32(hw, I40E_PFQF_HLUT(i));
5785 			sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5786 		}
5787 	}
5788 
5789 	error = sbuf_finish(buf);
5790 	if (error)
5791 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5792 	sbuf_delete(buf);
5793 
5794 	return (error);
5795 }
5796 
5797 static int
5798 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
5799 {
5800 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5801 	struct i40e_hw *hw = &pf->hw;
5802 	u64 hena;
5803 
5804 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
5805 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
5806 
5807 	return sysctl_handle_long(oidp, NULL, hena, req);
5808 }
5809 
5810 /*
5811  * Sysctl to disable firmware's link management
5812  *
5813  * 1 - Disable link management on this port
5814  * 0 - Re-enable link management
5815  *
5816  * On normal NVMs, firmware manages link by default.
5817  */
5818 static int
5819 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
5820 {
5821 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5822 	struct i40e_hw *hw = &pf->hw;
5823 	device_t dev = pf->dev;
5824 	int requested_mode = -1;
5825 	enum i40e_status_code status = 0;
5826 	int error = 0;
5827 
5828 	/* Read in new mode */
5829 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
5830 	if ((error) || (req->newptr == NULL))
5831 		return (error);
5832 	/* Check for sane value */
5833 	if (requested_mode < 0 || requested_mode > 1) {
5834 		device_printf(dev, "Valid modes are 0 or 1\n");
5835 		return (EINVAL);
5836 	}
5837 
5838 	/* Set new mode */
5839 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
5840 	if (status) {
5841 		device_printf(dev,
5842 		    "%s: Error setting new phy debug mode %s,"
5843 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
5844 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5845 		return (EIO);
5846 	}
5847 
5848 	return (0);
5849 }
5850 
5851 /*
5852  * Sysctl to read a byte from I2C bus.
5853  *
5854  * Input: 32-bit value:
5855  * 	bits 0-7:   device address (0xA0 or 0xA2)
5856  * 	bits 8-15:  offset (0-255)
5857  *	bits 16-31: unused
5858  * Output: 8-bit value read
5859  */
5860 static int
5861 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
5862 {
5863 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5864 	device_t dev = pf->dev;
5865 	int input = -1, error = 0;
5866 
5867 	device_printf(dev, "%s: start\n", __func__);
5868 
5869 	u8 dev_addr, offset, output;
5870 
5871 	/* Read in I2C read parameters */
5872 	error = sysctl_handle_int(oidp, &input, 0, req);
5873 	if ((error) || (req->newptr == NULL))
5874 		return (error);
5875 	/* Validate device address */
5876 	dev_addr = input & 0xFF;
5877 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5878 		return (EINVAL);
5879 	}
5880 	offset = (input >> 8) & 0xFF;
5881 
5882 	error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
5883 	if (error)
5884 		return (error);
5885 
5886 	device_printf(dev, "%02X\n", output);
5887 	return (0);
5888 }
5889 
5890 /*
5891  * Sysctl to write a byte to the I2C bus.
5892  *
5893  * Input: 32-bit value:
5894  * 	bits 0-7:   device address (0xA0 or 0xA2)
5895  * 	bits 8-15:  offset (0-255)
5896  *	bits 16-23: value to write
5897  *	bits 24-31: unused
5898  * Output: 8-bit value written
5899  */
5900 static int
5901 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
5902 {
5903 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5904 	device_t dev = pf->dev;
5905 	int input = -1, error = 0;
5906 
5907 	u8 dev_addr, offset, value;
5908 
5909 	/* Read in I2C write parameters */
5910 	error = sysctl_handle_int(oidp, &input, 0, req);
5911 	if ((error) || (req->newptr == NULL))
5912 		return (error);
5913 	/* Validate device address */
5914 	dev_addr = input & 0xFF;
5915 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5916 		return (EINVAL);
5917 	}
5918 	offset = (input >> 8) & 0xFF;
5919 	value = (input >> 16) & 0xFF;
5920 
5921 	error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
5922 	if (error)
5923 		return (error);
5924 
5925 	device_printf(dev, "%02X written\n", value);
5926 	return (0);
5927 }
5928 
5929 static int
5930 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5931     u8 bit_pos, int *is_set)
5932 {
5933 	device_t dev = pf->dev;
5934 	struct i40e_hw *hw = &pf->hw;
5935 	enum i40e_status_code status;
5936 
5937 	status = i40e_aq_get_phy_capabilities(hw,
5938 	    FALSE, FALSE, abilities, NULL);
5939 	if (status) {
5940 		device_printf(dev,
5941 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5942 		    __func__, i40e_stat_str(hw, status),
5943 		    i40e_aq_str(hw, hw->aq.asq_last_status));
5944 		return (EIO);
5945 	}
5946 
5947 	*is_set = !!(abilities->phy_type_ext & bit_pos);
5948 	return (0);
5949 }
5950 
5951 static int
5952 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5953     u8 bit_pos, int set)
5954 {
5955 	device_t dev = pf->dev;
5956 	struct i40e_hw *hw = &pf->hw;
5957 	struct i40e_aq_set_phy_config config;
5958 	enum i40e_status_code status;
5959 
5960 	/* Set new PHY config */
5961 	memset(&config, 0, sizeof(config));
5962 	config.fec_config = abilities->phy_type_ext & ~(bit_pos);
5963 	if (set)
5964 		config.fec_config |= bit_pos;
5965 	if (config.fec_config != abilities->phy_type_ext) {
5966 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5967 		config.phy_type = abilities->phy_type;
5968 		config.phy_type_ext = abilities->phy_type_ext;
5969 		config.link_speed = abilities->link_speed;
5970 		config.eee_capability = abilities->eee_capability;
5971 		config.eeer = abilities->eeer_val;
5972 		config.low_power_ctrl = abilities->d3_lpan;
5973 		status = i40e_aq_set_phy_config(hw, &config, NULL);
5974 
5975 		if (status) {
5976 			device_printf(dev,
5977 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
5978 			    __func__, i40e_stat_str(hw, status),
5979 			    i40e_aq_str(hw, hw->aq.asq_last_status));
5980 			return (EIO);
5981 		}
5982 	}
5983 
5984 	return (0);
5985 }
5986 
5987 static int
5988 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
5989 {
5990 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5991 	int mode, error = 0;
5992 
5993 	struct i40e_aq_get_phy_abilities_resp abilities;
5994 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
5995 	if (error)
5996 		return (error);
5997 	/* Read in new mode */
5998 	error = sysctl_handle_int(oidp, &mode, 0, req);
5999 	if ((error) || (req->newptr == NULL))
6000 		return (error);
6001 
6002 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6003 }
6004 
6005 static int
6006 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6007 {
6008 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6009 	int mode, error = 0;
6010 
6011 	struct i40e_aq_get_phy_abilities_resp abilities;
6012 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
6013 	if (error)
6014 		return (error);
6015 	/* Read in new mode */
6016 	error = sysctl_handle_int(oidp, &mode, 0, req);
6017 	if ((error) || (req->newptr == NULL))
6018 		return (error);
6019 
6020 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6021 }
6022 
6023 static int
6024 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6025 {
6026 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6027 	int mode, error = 0;
6028 
6029 	struct i40e_aq_get_phy_abilities_resp abilities;
6030 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
6031 	if (error)
6032 		return (error);
6033 	/* Read in new mode */
6034 	error = sysctl_handle_int(oidp, &mode, 0, req);
6035 	if ((error) || (req->newptr == NULL))
6036 		return (error);
6037 
6038 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6039 }
6040 
6041 static int
6042 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6043 {
6044 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6045 	int mode, error = 0;
6046 
6047 	struct i40e_aq_get_phy_abilities_resp abilities;
6048 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
6049 	if (error)
6050 		return (error);
6051 	/* Read in new mode */
6052 	error = sysctl_handle_int(oidp, &mode, 0, req);
6053 	if ((error) || (req->newptr == NULL))
6054 		return (error);
6055 
6056 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6057 }
6058 
6059 static int
6060 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6061 {
6062 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
6063 	int mode, error = 0;
6064 
6065 	struct i40e_aq_get_phy_abilities_resp abilities;
6066 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
6067 	if (error)
6068 		return (error);
6069 	/* Read in new mode */
6070 	error = sysctl_handle_int(oidp, &mode, 0, req);
6071 	if ((error) || (req->newptr == NULL))
6072 		return (error);
6073 
6074 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6075 }
6076 
6077