xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 4133f23624058951a3b66e3ad735de980a485f36)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 static u_int
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
642 {
643 	struct ixl_vsi *vsi = arg;
644 
645 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
646 
647 	return (1);
648 }
649 
650 /*********************************************************************
651  * 	Filter Routines
652  *
653  *	Routines for multicast and vlan filter management.
654  *
655  *********************************************************************/
656 void
657 ixl_add_multi(struct ixl_vsi *vsi)
658 {
659 	struct ifnet		*ifp = vsi->ifp;
660 	struct i40e_hw		*hw = vsi->hw;
661 	int			mcnt = 0, flags;
662 
663 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
664 
665 	/*
666 	** First just get a count, to decide if we
667 	** we simply use multicast promiscuous.
668 	*/
669 	mcnt = if_llmaddr_count(ifp);
670 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 		/* delete existing MC filters */
672 		ixl_del_hw_filters(vsi, mcnt);
673 		i40e_aq_set_vsi_multicast_promiscuous(hw,
674 		    vsi->seid, TRUE, NULL);
675 		return;
676 	}
677 
678 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
679 	if (mcnt > 0) {
680 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 		ixl_add_hw_filters(vsi, flags, mcnt);
682 	}
683 
684 	IOCTL_DEBUGOUT("ixl_add_multi: end");
685 }
686 
687 static u_int
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
689 {
690 	struct ixl_mac_filter *f = arg;
691 
692 	if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
693 		return (1);
694 	else
695 		return (0);
696 }
697 
698 int
699 ixl_del_multi(struct ixl_vsi *vsi)
700 {
701 	struct ifnet		*ifp = vsi->ifp;
702 	struct ixl_mac_filter	*f;
703 	int			mcnt = 0;
704 
705 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
706 
707 	SLIST_FOREACH(f, &vsi->ftl, next)
708 		if ((f->flags & IXL_FILTER_USED) &&
709 		    (f->flags & IXL_FILTER_MC) &&
710 		    (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 			f->flags |= IXL_FILTER_DEL;
712 			mcnt++;
713 		}
714 
715 	if (mcnt > 0)
716 		ixl_del_hw_filters(vsi, mcnt);
717 
718 	return (mcnt);
719 }
720 
721 void
722 ixl_link_up_msg(struct ixl_pf *pf)
723 {
724 	struct i40e_hw *hw = &pf->hw;
725 	struct ifnet *ifp = pf->vsi.ifp;
726 	char *req_fec_string, *neg_fec_string;
727 	u8 fec_abilities;
728 
729 	fec_abilities = hw->phy.link_info.req_fec_info;
730 	/* If both RS and KR are requested, only show RS */
731 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 		req_fec_string = ixl_fec_string[0];
733 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 		req_fec_string = ixl_fec_string[1];
735 	else
736 		req_fec_string = ixl_fec_string[2];
737 
738 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 		neg_fec_string = ixl_fec_string[0];
740 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 		neg_fec_string = ixl_fec_string[1];
742 	else
743 		neg_fec_string = ixl_fec_string[2];
744 
745 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
746 	    ifp->if_xname,
747 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 	    req_fec_string, neg_fec_string,
749 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 		ixl_fc_string[1] : ixl_fc_string[0]);
755 }
756 
757 /*
758  * Configure admin queue/misc interrupt cause registers in hardware.
759  */
760 void
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
762 {
763 	struct i40e_hw *hw = &pf->hw;
764 	u32 reg;
765 
766 	/* First set up the adminq - vector 0 */
767 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
768 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
769 
770 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
772 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
779 
780 	/*
781 	 * 0x7FF is the end of the queue list.
782 	 * This means we won't use MSI-X vector 0 for a queue interrupt
783 	 * in MSI-X mode.
784 	 */
785 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
788 
789 	wr32(hw, I40E_PFINT_DYN_CTL0,
790 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
792 
793 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
794 }
795 
796 /*
797  * Configure queue interrupt cause registers in hardware.
798  *
799  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
800  */
801 void
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
803 {
804 	struct i40e_hw *hw = &pf->hw;
805 	struct ixl_vsi *vsi = &pf->vsi;
806 	u32		reg;
807 	u16		vector = 1;
808 
809 	// TODO: See if max is really necessary
810 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 		/* Make sure interrupt is disabled */
812 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 		/* Set linked list head to point to corresponding RX queue
814 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
820 
821 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 		wr32(hw, I40E_QINT_RQCTL(i), reg);
827 
828 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 		wr32(hw, I40E_QINT_TQCTL(i), reg);
834 	}
835 }
836 
837 /*
838  * Configure for single interrupt vector operation
839  */
840 void
841 ixl_configure_legacy(struct ixl_pf *pf)
842 {
843 	struct i40e_hw	*hw = &pf->hw;
844 	struct ixl_vsi	*vsi = &pf->vsi;
845 	u32 reg;
846 
847 // TODO: Fix
848 #if 0
849 	/* Configure ITR */
850 	vsi->tx_itr_setting = pf->tx_itr;
851 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 	    vsi->tx_itr_setting);
853 	txr->itr = vsi->tx_itr_setting;
854 
855 	vsi->rx_itr_setting = pf->rx_itr;
856 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 	    vsi->rx_itr_setting);
858 	rxr->itr = vsi->rx_itr_setting;
859 	/* XXX: Assuming only 1 queue in single interrupt mode */
860 #endif
861 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
862 
863 	/* Setup "other" causes */
864 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
867 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
872 	    ;
873 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
874 
875 	/* No ITR for non-queue interrupts */
876 	wr32(hw, I40E_PFINT_STAT_CTL0,
877 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
878 
879 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 	wr32(hw, I40E_PFINT_LNKLST0, 0);
881 
882 	/* Associate the queue pair to the vector and enable the q int */
883 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 	wr32(hw, I40E_QINT_RQCTL(0), reg);
887 
888 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 	wr32(hw, I40E_QINT_TQCTL(0), reg);
892 }
893 
894 void
895 ixl_free_pci_resources(struct ixl_pf *pf)
896 {
897 	struct ixl_vsi		*vsi = &pf->vsi;
898 	device_t		dev = iflib_get_dev(vsi->ctx);
899 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
900 
901 	/* We may get here before stations are set up */
902 	if (rx_que == NULL)
903 		goto early;
904 
905 	/*
906 	**  Release all MSI-X VSI resources:
907 	*/
908 	iflib_irq_free(vsi->ctx, &vsi->irq);
909 
910 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
912 early:
913 	if (pf->pci_mem != NULL)
914 		bus_release_resource(dev, SYS_RES_MEMORY,
915 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
916 }
917 
918 void
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
920 {
921 	/* Display supported media types */
922 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
924 
925 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
931 
932 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
954 
955 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
957 
958 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
972 
973 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
977 
978 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
990 }
991 
992 /*********************************************************************
993  *
994  *  Setup networking device structure and register an interface.
995  *
996  **********************************************************************/
997 int
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
999 {
1000 	struct ixl_vsi *vsi = &pf->vsi;
1001 	if_ctx_t ctx = vsi->ctx;
1002 	struct i40e_hw *hw = &pf->hw;
1003 	struct ifnet *ifp = iflib_get_ifp(ctx);
1004 	struct i40e_aq_get_phy_abilities_resp abilities;
1005 	enum i40e_status_code aq_error = 0;
1006 
1007 	INIT_DBG_DEV(dev, "begin");
1008 
1009 	vsi->shared->isc_max_frame_size =
1010 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 	    + ETHER_VLAN_ENCAP_LEN;
1012 
1013 	aq_error = i40e_aq_get_phy_capabilities(hw,
1014 	    FALSE, TRUE, &abilities, NULL);
1015 	/* May need delay to detect fiber correctly */
1016 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 		/* TODO: Maybe just retry this in a task... */
1018 		i40e_msec_delay(200);
1019 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 		    TRUE, &abilities, NULL);
1021 	}
1022 	if (aq_error) {
1023 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 			device_printf(dev, "Unknown PHY type detected!\n");
1025 		else
1026 			device_printf(dev,
1027 			    "Error getting supported media types, err %d,"
1028 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1029 	} else {
1030 		pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1033 #else
1034 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1035 #endif
1036 
1037 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1038 	}
1039 
1040 	/* Use autoselect media by default */
1041 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1043 
1044 	return (0);
1045 }
1046 
1047 /*
1048  * Input: bitmap of enum i40e_aq_link_speed
1049  */
1050 u64
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1052 {
1053 	if (link_speeds & I40E_LINK_SPEED_40GB)
1054 		return IF_Gbps(40);
1055 	if (link_speeds & I40E_LINK_SPEED_25GB)
1056 		return IF_Gbps(25);
1057 	if (link_speeds & I40E_LINK_SPEED_20GB)
1058 		return IF_Gbps(20);
1059 	if (link_speeds & I40E_LINK_SPEED_10GB)
1060 		return IF_Gbps(10);
1061 	if (link_speeds & I40E_LINK_SPEED_1GB)
1062 		return IF_Gbps(1);
1063 	if (link_speeds & I40E_LINK_SPEED_100MB)
1064 		return IF_Mbps(100);
1065 	else
1066 		/* Minimum supported link speed */
1067 		return IF_Mbps(100);
1068 }
1069 
1070 /*
1071 ** Run when the Admin Queue gets a link state change interrupt.
1072 */
1073 void
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1075 {
1076 	struct i40e_hw *hw = &pf->hw;
1077 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 	struct i40e_aqc_get_link_status *status =
1079 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1080 
1081 	/* Request link status from adapter */
1082 	hw->phy.get_link_info = TRUE;
1083 	i40e_get_link_status(hw, &pf->link_up);
1084 
1085 	/* Print out message if an unqualified module is found */
1086 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 	    (pf->advertised_speed) &&
1088 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1090 		device_printf(dev, "Link failed because "
1091 		    "an unqualified module was detected!\n");
1092 
1093 	/* OS link info is updated elsewhere */
1094 }
1095 
1096 /*********************************************************************
1097  *
1098  *  Get Firmware Switch configuration
1099  *	- this will need to be more robust when more complex
1100  *	  switch configurations are enabled.
1101  *
1102  **********************************************************************/
1103 int
1104 ixl_switch_config(struct ixl_pf *pf)
1105 {
1106 	struct i40e_hw	*hw = &pf->hw;
1107 	struct ixl_vsi	*vsi = &pf->vsi;
1108 	device_t 	dev = iflib_get_dev(vsi->ctx);
1109 	struct i40e_aqc_get_switch_config_resp *sw_config;
1110 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1111 	int	ret;
1112 	u16	next = 0;
1113 
1114 	memset(&aq_buf, 0, sizeof(aq_buf));
1115 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 	ret = i40e_aq_get_switch_config(hw, sw_config,
1117 	    sizeof(aq_buf), &next, NULL);
1118 	if (ret) {
1119 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1121 		return (ret);
1122 	}
1123 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1124 		device_printf(dev,
1125 		    "Switch config: header reported: %d in structure, %d total\n",
1126 		    sw_config->header.num_reported, sw_config->header.num_total);
1127 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1128 			device_printf(dev,
1129 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 			    sw_config->element[i].element_type,
1131 			    sw_config->element[i].seid,
1132 			    sw_config->element[i].uplink_seid,
1133 			    sw_config->element[i].downlink_seid);
1134 		}
1135 	}
1136 	/* Simplified due to a single VSI */
1137 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 	vsi->seid = sw_config->element[0].seid;
1140 	return (ret);
1141 }
1142 
1143 /*********************************************************************
1144  *
1145  *  Initialize the VSI:  this handles contexts, which means things
1146  *  			 like the number of descriptors, buffer size,
1147  *			 plus we init the rings thru this function.
1148  *
1149  **********************************************************************/
1150 int
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1152 {
1153 	struct ixl_pf *pf = vsi->back;
1154 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1155 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1156 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1157 	device_t		dev = iflib_get_dev(vsi->ctx);
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct i40e_vsi_context	ctxt;
1160 	int 			tc_queues;
1161 	int			err = 0;
1162 
1163 	memset(&ctxt, 0, sizeof(ctxt));
1164 	ctxt.seid = vsi->seid;
1165 	if (pf->veb_seid != 0)
1166 		ctxt.uplink_seid = pf->veb_seid;
1167 	ctxt.pf_num = hw->pf_id;
1168 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1169 	if (err) {
1170 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1172 		return (err);
1173 	}
1174 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 	    ctxt.uplink_seid, ctxt.vsi_number,
1179 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1182 	/*
1183 	** Set the queue and traffic class bits
1184 	**  - when multiple traffic classes are supported
1185 	**    this will need to be more robust.
1186 	*/
1187 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 	ctxt.info.queue_mapping[0] = 0;
1191 	/*
1192 	 * This VSI will only use traffic class 0; start traffic class 0's
1193 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 	 * the driver may not use all of them).
1195 	 */
1196 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1201 
1202 	/* Set VLAN receive stripping mode */
1203 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1207 	else
1208 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1209 
1210 #ifdef IXL_IW
1211 	/* Set TCP Enable for iWARP capable VSI */
1212 	if (ixl_enable_iwarp && pf->iw_enabled) {
1213 		ctxt.info.valid_sections |=
1214 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1216 	}
1217 #endif
1218 	/* Save VSI number and info for use later */
1219 	vsi->vsi_num = ctxt.vsi_number;
1220 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1221 
1222 	/* Reset VSI statistics */
1223 	ixl_vsi_reset_stats(vsi);
1224 	vsi->hw_filters_add = 0;
1225 	vsi->hw_filters_del = 0;
1226 
1227 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1228 
1229 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1230 	if (err) {
1231 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1233 		return (err);
1234 	}
1235 
1236 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 		struct tx_ring		*txr = &tx_que->txr;
1238 		struct i40e_hmc_obj_txq tctx;
1239 		u32			txctl;
1240 
1241 		/* Setup the HMC TX Context  */
1242 		bzero(&tctx, sizeof(tctx));
1243 		tctx.new_context = 1;
1244 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 		tctx.qlen = scctx->isc_ntxd[0];
1246 		tctx.fc_ena = 0;	/* Disable FCoE */
1247 		/*
1248 		 * This value needs to pulled from the VSI that this queue
1249 		 * is assigned to. Index into array is traffic class.
1250 		 */
1251 		tctx.rdylist = vsi->info.qs_handle[0];
1252 		/*
1253 		 * Set these to enable Head Writeback
1254 		 * - Address is last entry in TX ring (reserved for HWB index)
1255 		 * Leave these as 0 for Descriptor Writeback
1256 		 */
1257 		if (vsi->enable_head_writeback) {
1258 			tctx.head_wb_ena = 1;
1259 			tctx.head_wb_addr = txr->tx_paddr +
1260 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1261 		} else {
1262 			tctx.head_wb_ena = 0;
1263 			tctx.head_wb_addr = 0;
1264 		}
1265 		tctx.rdylist_act = 0;
1266 		err = i40e_clear_lan_tx_queue_context(hw, i);
1267 		if (err) {
1268 			device_printf(dev, "Unable to clear TX context\n");
1269 			break;
1270 		}
1271 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1272 		if (err) {
1273 			device_printf(dev, "Unable to set TX context\n");
1274 			break;
1275 		}
1276 		/* Associate the ring with this PF */
1277 		txctl = I40E_QTX_CTL_PF_QUEUE;
1278 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 		    I40E_QTX_CTL_PF_INDX_MASK);
1280 		wr32(hw, I40E_QTX_CTL(i), txctl);
1281 		ixl_flush(hw);
1282 
1283 		/* Do ring (re)init */
1284 		ixl_init_tx_ring(vsi, tx_que);
1285 	}
1286 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 		struct rx_ring 		*rxr = &rx_que->rxr;
1288 		struct i40e_hmc_obj_rxq rctx;
1289 
1290 		/* Next setup the HMC RX Context  */
1291 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1292 
1293 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1294 
1295 		/* Set up an RX context for the HMC */
1296 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1297 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1298 		/* ignore header split for now */
1299 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1300 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1301 		    scctx->isc_max_frame_size : max_rxmax;
1302 		rctx.dtype = 0;
1303 		rctx.dsize = 1;		/* do 32byte descriptors */
1304 		rctx.hsplit_0 = 0;	/* no header split */
1305 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1306 		rctx.qlen = scctx->isc_nrxd[0];
1307 		rctx.tphrdesc_ena = 1;
1308 		rctx.tphwdesc_ena = 1;
1309 		rctx.tphdata_ena = 0;	/* Header Split related */
1310 		rctx.tphhead_ena = 0;	/* Header Split related */
1311 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1312 		rctx.crcstrip = 1;
1313 		rctx.l2tsel = 1;
1314 		rctx.showiv = 1;	/* Strip inner VLAN header */
1315 		rctx.fc_ena = 0;	/* Disable FCoE */
1316 		rctx.prefena = 1;	/* Prefetch descriptors */
1317 
1318 		err = i40e_clear_lan_rx_queue_context(hw, i);
1319 		if (err) {
1320 			device_printf(dev,
1321 			    "Unable to clear RX context %d\n", i);
1322 			break;
1323 		}
1324 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1325 		if (err) {
1326 			device_printf(dev, "Unable to set RX context %d\n", i);
1327 			break;
1328 		}
1329 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1330 	}
1331 	return (err);
1332 }
1333 
1334 void
1335 ixl_free_mac_filters(struct ixl_vsi *vsi)
1336 {
1337 	struct ixl_mac_filter *f;
1338 
1339 	while (!SLIST_EMPTY(&vsi->ftl)) {
1340 		f = SLIST_FIRST(&vsi->ftl);
1341 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1342 		free(f, M_DEVBUF);
1343 	}
1344 }
1345 
1346 /*
1347 ** Provide a update to the queue RX
1348 ** interrupt moderation value.
1349 */
1350 void
1351 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1352 {
1353 	struct ixl_vsi	*vsi = que->vsi;
1354 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1355 	struct i40e_hw	*hw = vsi->hw;
1356 	struct rx_ring	*rxr = &que->rxr;
1357 	u16		rx_itr;
1358 	u16		rx_latency = 0;
1359 	int		rx_bytes;
1360 
1361 	/* Idle, do nothing */
1362 	if (rxr->bytes == 0)
1363 		return;
1364 
1365 	if (pf->dynamic_rx_itr) {
1366 		rx_bytes = rxr->bytes/rxr->itr;
1367 		rx_itr = rxr->itr;
1368 
1369 		/* Adjust latency range */
1370 		switch (rxr->latency) {
1371 		case IXL_LOW_LATENCY:
1372 			if (rx_bytes > 10) {
1373 				rx_latency = IXL_AVE_LATENCY;
1374 				rx_itr = IXL_ITR_20K;
1375 			}
1376 			break;
1377 		case IXL_AVE_LATENCY:
1378 			if (rx_bytes > 20) {
1379 				rx_latency = IXL_BULK_LATENCY;
1380 				rx_itr = IXL_ITR_8K;
1381 			} else if (rx_bytes <= 10) {
1382 				rx_latency = IXL_LOW_LATENCY;
1383 				rx_itr = IXL_ITR_100K;
1384 			}
1385 			break;
1386 		case IXL_BULK_LATENCY:
1387 			if (rx_bytes <= 20) {
1388 				rx_latency = IXL_AVE_LATENCY;
1389 				rx_itr = IXL_ITR_20K;
1390 			}
1391 			break;
1392        		 }
1393 
1394 		rxr->latency = rx_latency;
1395 
1396 		if (rx_itr != rxr->itr) {
1397 			/* do an exponential smoothing */
1398 			rx_itr = (10 * rx_itr * rxr->itr) /
1399 			    ((9 * rx_itr) + rxr->itr);
1400 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1401 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1402 			    rxr->me), rxr->itr);
1403 		}
1404 	} else { /* We may have have toggled to non-dynamic */
1405 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1406 			vsi->rx_itr_setting = pf->rx_itr;
1407 		/* Update the hardware if needed */
1408 		if (rxr->itr != vsi->rx_itr_setting) {
1409 			rxr->itr = vsi->rx_itr_setting;
1410 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1411 			    rxr->me), rxr->itr);
1412 		}
1413 	}
1414 	rxr->bytes = 0;
1415 	rxr->packets = 0;
1416 }
1417 
1418 
1419 /*
1420 ** Provide a update to the queue TX
1421 ** interrupt moderation value.
1422 */
1423 void
1424 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1425 {
1426 	struct ixl_vsi	*vsi = que->vsi;
1427 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1428 	struct i40e_hw	*hw = vsi->hw;
1429 	struct tx_ring	*txr = &que->txr;
1430 	u16		tx_itr;
1431 	u16		tx_latency = 0;
1432 	int		tx_bytes;
1433 
1434 
1435 	/* Idle, do nothing */
1436 	if (txr->bytes == 0)
1437 		return;
1438 
1439 	if (pf->dynamic_tx_itr) {
1440 		tx_bytes = txr->bytes/txr->itr;
1441 		tx_itr = txr->itr;
1442 
1443 		switch (txr->latency) {
1444 		case IXL_LOW_LATENCY:
1445 			if (tx_bytes > 10) {
1446 				tx_latency = IXL_AVE_LATENCY;
1447 				tx_itr = IXL_ITR_20K;
1448 			}
1449 			break;
1450 		case IXL_AVE_LATENCY:
1451 			if (tx_bytes > 20) {
1452 				tx_latency = IXL_BULK_LATENCY;
1453 				tx_itr = IXL_ITR_8K;
1454 			} else if (tx_bytes <= 10) {
1455 				tx_latency = IXL_LOW_LATENCY;
1456 				tx_itr = IXL_ITR_100K;
1457 			}
1458 			break;
1459 		case IXL_BULK_LATENCY:
1460 			if (tx_bytes <= 20) {
1461 				tx_latency = IXL_AVE_LATENCY;
1462 				tx_itr = IXL_ITR_20K;
1463 			}
1464 			break;
1465 		}
1466 
1467 		txr->latency = tx_latency;
1468 
1469 		if (tx_itr != txr->itr) {
1470        	         /* do an exponential smoothing */
1471 			tx_itr = (10 * tx_itr * txr->itr) /
1472 			    ((9 * tx_itr) + txr->itr);
1473 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1474 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1475 			    txr->me), txr->itr);
1476 		}
1477 
1478 	} else { /* We may have have toggled to non-dynamic */
1479 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1480 			vsi->tx_itr_setting = pf->tx_itr;
1481 		/* Update the hardware if needed */
1482 		if (txr->itr != vsi->tx_itr_setting) {
1483 			txr->itr = vsi->tx_itr_setting;
1484 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1485 			    txr->me), txr->itr);
1486 		}
1487 	}
1488 	txr->bytes = 0;
1489 	txr->packets = 0;
1490 	return;
1491 }
1492 
1493 #ifdef IXL_DEBUG
1494 /**
1495  * ixl_sysctl_qtx_tail_handler
1496  * Retrieves I40E_QTX_TAIL value from hardware
1497  * for a sysctl.
1498  */
1499 int
1500 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1501 {
1502 	struct ixl_tx_queue *tx_que;
1503 	int error;
1504 	u32 val;
1505 
1506 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1507 	if (!tx_que) return 0;
1508 
1509 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1510 	error = sysctl_handle_int(oidp, &val, 0, req);
1511 	if (error || !req->newptr)
1512 		return error;
1513 	return (0);
1514 }
1515 
1516 /**
1517  * ixl_sysctl_qrx_tail_handler
1518  * Retrieves I40E_QRX_TAIL value from hardware
1519  * for a sysctl.
1520  */
1521 int
1522 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1523 {
1524 	struct ixl_rx_queue *rx_que;
1525 	int error;
1526 	u32 val;
1527 
1528 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1529 	if (!rx_que) return 0;
1530 
1531 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1532 	error = sysctl_handle_int(oidp, &val, 0, req);
1533 	if (error || !req->newptr)
1534 		return error;
1535 	return (0);
1536 }
1537 #endif
1538 
1539 /*
1540  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1541  * Writes to the ITR registers immediately.
1542  */
1543 static int
1544 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1545 {
1546 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1547 	device_t dev = pf->dev;
1548 	int error = 0;
1549 	int requested_tx_itr;
1550 
1551 	requested_tx_itr = pf->tx_itr;
1552 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1553 	if ((error) || (req->newptr == NULL))
1554 		return (error);
1555 	if (pf->dynamic_tx_itr) {
1556 		device_printf(dev,
1557 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1558 		    return (EINVAL);
1559 	}
1560 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1561 		device_printf(dev,
1562 		    "Invalid TX itr value; value must be between 0 and %d\n",
1563 		        IXL_MAX_ITR);
1564 		return (EINVAL);
1565 	}
1566 
1567 	pf->tx_itr = requested_tx_itr;
1568 	ixl_configure_tx_itr(pf);
1569 
1570 	return (error);
1571 }
1572 
1573 /*
1574  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1575  * Writes to the ITR registers immediately.
1576  */
1577 static int
1578 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1579 {
1580 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1581 	device_t dev = pf->dev;
1582 	int error = 0;
1583 	int requested_rx_itr;
1584 
1585 	requested_rx_itr = pf->rx_itr;
1586 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1587 	if ((error) || (req->newptr == NULL))
1588 		return (error);
1589 	if (pf->dynamic_rx_itr) {
1590 		device_printf(dev,
1591 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1592 		    return (EINVAL);
1593 	}
1594 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1595 		device_printf(dev,
1596 		    "Invalid RX itr value; value must be between 0 and %d\n",
1597 		        IXL_MAX_ITR);
1598 		return (EINVAL);
1599 	}
1600 
1601 	pf->rx_itr = requested_rx_itr;
1602 	ixl_configure_rx_itr(pf);
1603 
1604 	return (error);
1605 }
1606 
1607 void
1608 ixl_add_hw_stats(struct ixl_pf *pf)
1609 {
1610 	struct ixl_vsi *vsi = &pf->vsi;
1611 	device_t dev = iflib_get_dev(vsi->ctx);
1612 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1613 
1614 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1615 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1616 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1617 
1618 	/* Driver statistics */
1619 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1620 			CTLFLAG_RD, &pf->admin_irq,
1621 			"Admin Queue IRQs received");
1622 
1623 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1624 
1625 	ixl_add_queues_sysctls(dev, vsi);
1626 
1627 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1628 }
1629 
1630 void
1631 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1632 	struct sysctl_oid_list *child,
1633 	struct i40e_hw_port_stats *stats)
1634 {
1635 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1636 				    CTLFLAG_RD, NULL, "Mac Statistics");
1637 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1638 
1639 	struct i40e_eth_stats *eth_stats = &stats->eth;
1640 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1641 
1642 	struct ixl_sysctl_info ctls[] =
1643 	{
1644 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1645 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1646 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1647 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1648 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1649 		/* Packet Reception Stats */
1650 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1651 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1652 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1653 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1654 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1655 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1656 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1657 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1658 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1659 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1660 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1661 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1662 		/* Packet Transmission Stats */
1663 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1664 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1665 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1666 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1667 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1668 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1669 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1670 		/* Flow control */
1671 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1672 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1673 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1674 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1675 		/* End */
1676 		{0,0,0}
1677 	};
1678 
1679 	struct ixl_sysctl_info *entry = ctls;
1680 	while (entry->stat != 0)
1681 	{
1682 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1683 				CTLFLAG_RD, entry->stat,
1684 				entry->description);
1685 		entry++;
1686 	}
1687 }
1688 
1689 void
1690 ixl_set_rss_key(struct ixl_pf *pf)
1691 {
1692 	struct i40e_hw *hw = &pf->hw;
1693 	struct ixl_vsi *vsi = &pf->vsi;
1694 	device_t	dev = pf->dev;
1695 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1696 	enum i40e_status_code status;
1697 
1698 #ifdef RSS
1699         /* Fetch the configured RSS key */
1700         rss_getkey((uint8_t *) &rss_seed);
1701 #else
1702 	ixl_get_default_rss_key(rss_seed);
1703 #endif
1704 	/* Fill out hash function seed */
1705 	if (hw->mac.type == I40E_MAC_X722) {
1706 		struct i40e_aqc_get_set_rss_key_data key_data;
1707 		bcopy(rss_seed, &key_data, 52);
1708 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1709 		if (status)
1710 			device_printf(dev,
1711 			    "i40e_aq_set_rss_key status %s, error %s\n",
1712 			    i40e_stat_str(hw, status),
1713 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1714 	} else {
1715 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1716 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1717 	}
1718 }
1719 
1720 /*
1721  * Configure enabled PCTYPES for RSS.
1722  */
1723 void
1724 ixl_set_rss_pctypes(struct ixl_pf *pf)
1725 {
1726 	struct i40e_hw *hw = &pf->hw;
1727 	u64		set_hena = 0, hena;
1728 
1729 #ifdef RSS
1730 	u32		rss_hash_config;
1731 
1732 	rss_hash_config = rss_gethashconfig();
1733 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1734                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1735 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1736                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1737 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1738                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1739 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1740                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1741 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1742 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1743 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1744                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1745         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1746                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1747 #else
1748 	if (hw->mac.type == I40E_MAC_X722)
1749 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1750 	else
1751 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1752 #endif
1753 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1754 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1755 	hena |= set_hena;
1756 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1757 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1758 
1759 }
1760 
1761 void
1762 ixl_set_rss_hlut(struct ixl_pf *pf)
1763 {
1764 	struct i40e_hw	*hw = &pf->hw;
1765 	struct ixl_vsi *vsi = &pf->vsi;
1766 	device_t	dev = iflib_get_dev(vsi->ctx);
1767 	int		i, que_id;
1768 	int		lut_entry_width;
1769 	u32		lut = 0;
1770 	enum i40e_status_code status;
1771 
1772 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1773 
1774 	/* Populate the LUT with max no. of queues in round robin fashion */
1775 	u8 hlut_buf[512];
1776 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1777 #ifdef RSS
1778 		/*
1779 		 * Fetch the RSS bucket id for the given indirection entry.
1780 		 * Cap it at the number of configured buckets (which is
1781 		 * num_queues.)
1782 		 */
1783 		que_id = rss_get_indirection_to_bucket(i);
1784 		que_id = que_id % vsi->num_rx_queues;
1785 #else
1786 		que_id = i % vsi->num_rx_queues;
1787 #endif
1788 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1789 		hlut_buf[i] = lut;
1790 	}
1791 
1792 	if (hw->mac.type == I40E_MAC_X722) {
1793 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1794 		if (status)
1795 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1796 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1797 	} else {
1798 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1799 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1800 		ixl_flush(hw);
1801 	}
1802 }
1803 
1804 /*
1805 ** Setup the PF's RSS parameters.
1806 */
1807 void
1808 ixl_config_rss(struct ixl_pf *pf)
1809 {
1810 	ixl_set_rss_key(pf);
1811 	ixl_set_rss_pctypes(pf);
1812 	ixl_set_rss_hlut(pf);
1813 }
1814 
1815 /*
1816 ** This routine updates vlan filters, called by init
1817 ** it scans the filter table and then updates the hw
1818 ** after a soft reset.
1819 */
1820 void
1821 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1822 {
1823 	struct ixl_mac_filter	*f;
1824 	int			cnt = 0, flags;
1825 
1826 	if (vsi->num_vlans == 0)
1827 		return;
1828 	/*
1829 	** Scan the filter list for vlan entries,
1830 	** mark them for addition and then call
1831 	** for the AQ update.
1832 	*/
1833 	SLIST_FOREACH(f, &vsi->ftl, next) {
1834 		if (f->flags & IXL_FILTER_VLAN) {
1835 			f->flags |=
1836 			    (IXL_FILTER_ADD |
1837 			    IXL_FILTER_USED);
1838 			cnt++;
1839 		}
1840 	}
1841 	if (cnt == 0) {
1842 		printf("setup vlan: no filters found!\n");
1843 		return;
1844 	}
1845 	flags = IXL_FILTER_VLAN;
1846 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1847 	ixl_add_hw_filters(vsi, flags, cnt);
1848 }
1849 
1850 /*
1851  * In some firmware versions there is default MAC/VLAN filter
1852  * configured which interferes with filters managed by driver.
1853  * Make sure it's removed.
1854  */
1855 void
1856 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1857 {
1858 	struct i40e_aqc_remove_macvlan_element_data e;
1859 
1860 	bzero(&e, sizeof(e));
1861 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1862 	e.vlan_tag = 0;
1863 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1864 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1865 
1866 	bzero(&e, sizeof(e));
1867 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1868 	e.vlan_tag = 0;
1869 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1870 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1871 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1872 }
1873 
1874 /*
1875 ** Initialize filter list and add filters that the hardware
1876 ** needs to know about.
1877 **
1878 ** Requires VSI's filter list & seid to be set before calling.
1879 */
1880 void
1881 ixl_init_filters(struct ixl_vsi *vsi)
1882 {
1883 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1884 
1885 	/* Initialize mac filter list for VSI */
1886 	SLIST_INIT(&vsi->ftl);
1887 
1888 	/* Receive broadcast Ethernet frames */
1889 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1890 
1891 	ixl_del_default_hw_filters(vsi);
1892 
1893 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1894 	/*
1895 	 * Prevent Tx flow control frames from being sent out by
1896 	 * non-firmware transmitters.
1897 	 * This affects every VSI in the PF.
1898 	 */
1899 	if (pf->enable_tx_fc_filter)
1900 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1901 }
1902 
1903 /*
1904 ** This routine adds mulicast filters
1905 */
1906 void
1907 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1908 {
1909 	struct ixl_mac_filter *f;
1910 
1911 	/* Does one already exist */
1912 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1913 	if (f != NULL)
1914 		return;
1915 
1916 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1917 	if (f != NULL)
1918 		f->flags |= IXL_FILTER_MC;
1919 	else
1920 		printf("WARNING: no filter available!!\n");
1921 }
1922 
1923 void
1924 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1925 {
1926 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1927 }
1928 
1929 /*
1930  * This routine adds a MAC/VLAN filter to the software filter
1931  * list, then adds that new filter to the HW if it doesn't already
1932  * exist in the SW filter list.
1933  */
1934 void
1935 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1936 {
1937 	struct ixl_mac_filter	*f, *tmp;
1938 	struct ixl_pf		*pf;
1939 	device_t		dev;
1940 
1941 	DEBUGOUT("ixl_add_filter: begin");
1942 
1943 	pf = vsi->back;
1944 	dev = pf->dev;
1945 
1946 	/* Does one already exist */
1947 	f = ixl_find_filter(vsi, macaddr, vlan);
1948 	if (f != NULL)
1949 		return;
1950 	/*
1951 	** Is this the first vlan being registered, if so we
1952 	** need to remove the ANY filter that indicates we are
1953 	** not in a vlan, and replace that with a 0 filter.
1954 	*/
1955 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1956 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1957 		if (tmp != NULL) {
1958 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 			ixl_add_filter(vsi, macaddr, 0);
1960 		}
1961 	}
1962 
1963 	f = ixl_new_filter(vsi, macaddr, vlan);
1964 	if (f == NULL) {
1965 		device_printf(dev, "WARNING: no filter available!!\n");
1966 		return;
1967 	}
1968 	if (f->vlan != IXL_VLAN_ANY)
1969 		f->flags |= IXL_FILTER_VLAN;
1970 	else
1971 		vsi->num_macs++;
1972 
1973 	f->flags |= IXL_FILTER_USED;
1974 	ixl_add_hw_filters(vsi, f->flags, 1);
1975 }
1976 
1977 void
1978 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1979 {
1980 	struct ixl_mac_filter *f;
1981 
1982 	f = ixl_find_filter(vsi, macaddr, vlan);
1983 	if (f == NULL)
1984 		return;
1985 
1986 	f->flags |= IXL_FILTER_DEL;
1987 	ixl_del_hw_filters(vsi, 1);
1988 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1989 		vsi->num_macs--;
1990 
1991 	/* Check if this is the last vlan removal */
1992 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1993 		/* Switch back to a non-vlan filter */
1994 		ixl_del_filter(vsi, macaddr, 0);
1995 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1996 	}
1997 	return;
1998 }
1999 
2000 /*
2001 ** Find the filter with both matching mac addr and vlan id
2002 */
2003 struct ixl_mac_filter *
2004 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2005 {
2006 	struct ixl_mac_filter	*f;
2007 
2008 	SLIST_FOREACH(f, &vsi->ftl, next) {
2009 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2010 		    && (f->vlan == vlan)) {
2011 			return (f);
2012 		}
2013 	}
2014 
2015 	return (NULL);
2016 }
2017 
2018 /*
2019 ** This routine takes additions to the vsi filter
2020 ** table and creates an Admin Queue call to create
2021 ** the filters in the hardware.
2022 */
2023 void
2024 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2025 {
2026 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2027 	struct ixl_mac_filter	*f;
2028 	struct ixl_pf		*pf;
2029 	struct i40e_hw		*hw;
2030 	device_t		dev;
2031 	enum i40e_status_code	status;
2032 	int			j = 0;
2033 
2034 	pf = vsi->back;
2035 	dev = vsi->dev;
2036 	hw = &pf->hw;
2037 
2038 	if (cnt < 1) {
2039 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2040 		return;
2041 	}
2042 
2043 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2044 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2045 	if (a == NULL) {
2046 		device_printf(dev, "add_hw_filters failed to get memory\n");
2047 		return;
2048 	}
2049 
2050 	/*
2051 	** Scan the filter list, each time we find one
2052 	** we add it to the admin queue array and turn off
2053 	** the add bit.
2054 	*/
2055 	SLIST_FOREACH(f, &vsi->ftl, next) {
2056 		if ((f->flags & flags) == flags) {
2057 			b = &a[j]; // a pox on fvl long names :)
2058 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2059 			if (f->vlan == IXL_VLAN_ANY) {
2060 				b->vlan_tag = 0;
2061 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2062 			} else {
2063 				b->vlan_tag = f->vlan;
2064 				b->flags = 0;
2065 			}
2066 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2067 			f->flags &= ~IXL_FILTER_ADD;
2068 			j++;
2069 
2070 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2071 			    MAC_FORMAT_ARGS(f->macaddr));
2072 		}
2073 		if (j == cnt)
2074 			break;
2075 	}
2076 	if (j > 0) {
2077 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2078 		if (status)
2079 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2080 			    "error %s\n", i40e_stat_str(hw, status),
2081 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2082 		else
2083 			vsi->hw_filters_add += j;
2084 	}
2085 	free(a, M_DEVBUF);
2086 	return;
2087 }
2088 
2089 /*
2090 ** This routine takes removals in the vsi filter
2091 ** table and creates an Admin Queue call to delete
2092 ** the filters in the hardware.
2093 */
2094 void
2095 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2096 {
2097 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2098 	struct ixl_pf		*pf;
2099 	struct i40e_hw		*hw;
2100 	device_t		dev;
2101 	struct ixl_mac_filter	*f, *f_temp;
2102 	enum i40e_status_code	status;
2103 	int			j = 0;
2104 
2105 	pf = vsi->back;
2106 	hw = &pf->hw;
2107 	dev = vsi->dev;
2108 
2109 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2110 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2111 	if (d == NULL) {
2112 		device_printf(dev, "%s: failed to get memory\n", __func__);
2113 		return;
2114 	}
2115 
2116 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2117 		if (f->flags & IXL_FILTER_DEL) {
2118 			e = &d[j]; // a pox on fvl long names :)
2119 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2120 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2121 			if (f->vlan == IXL_VLAN_ANY) {
2122 				e->vlan_tag = 0;
2123 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2124 			} else {
2125 				e->vlan_tag = f->vlan;
2126 			}
2127 
2128 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2129 			    MAC_FORMAT_ARGS(f->macaddr));
2130 
2131 			/* delete entry from vsi list */
2132 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2133 			free(f, M_DEVBUF);
2134 			j++;
2135 		}
2136 		if (j == cnt)
2137 			break;
2138 	}
2139 	if (j > 0) {
2140 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2141 		if (status) {
2142 			int sc = 0;
2143 			for (int i = 0; i < j; i++)
2144 				sc += (!d[i].error_code);
2145 			vsi->hw_filters_del += sc;
2146 			device_printf(dev,
2147 			    "Failed to remove %d/%d filters, error %s\n",
2148 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2149 		} else
2150 			vsi->hw_filters_del += j;
2151 	}
2152 	free(d, M_DEVBUF);
2153 	return;
2154 }
2155 
2156 int
2157 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2158 {
2159 	struct i40e_hw	*hw = &pf->hw;
2160 	int		error = 0;
2161 	u32		reg;
2162 	u16		pf_qidx;
2163 
2164 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2165 
2166 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2167 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2168 	    pf_qidx, vsi_qidx);
2169 
2170 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2171 
2172 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2173 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2174 	    I40E_QTX_ENA_QENA_STAT_MASK;
2175 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2176 	/* Verify the enable took */
2177 	for (int j = 0; j < 10; j++) {
2178 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2179 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2180 			break;
2181 		i40e_usec_delay(10);
2182 	}
2183 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2184 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2185 		    pf_qidx);
2186 		error = ETIMEDOUT;
2187 	}
2188 
2189 	return (error);
2190 }
2191 
2192 int
2193 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2194 {
2195 	struct i40e_hw	*hw = &pf->hw;
2196 	int		error = 0;
2197 	u32		reg;
2198 	u16		pf_qidx;
2199 
2200 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2201 
2202 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2203 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2204 	    pf_qidx, vsi_qidx);
2205 
2206 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2207 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2208 	    I40E_QRX_ENA_QENA_STAT_MASK;
2209 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2210 	/* Verify the enable took */
2211 	for (int j = 0; j < 10; j++) {
2212 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2213 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2214 			break;
2215 		i40e_usec_delay(10);
2216 	}
2217 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2218 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2219 		    pf_qidx);
2220 		error = ETIMEDOUT;
2221 	}
2222 
2223 	return (error);
2224 }
2225 
2226 int
2227 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2228 {
2229 	int error = 0;
2230 
2231 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2232 	/* Called function already prints error message */
2233 	if (error)
2234 		return (error);
2235 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2236 	return (error);
2237 }
2238 
2239 /* For PF VSI only */
2240 int
2241 ixl_enable_rings(struct ixl_vsi *vsi)
2242 {
2243 	struct ixl_pf	*pf = vsi->back;
2244 	int		error = 0;
2245 
2246 	for (int i = 0; i < vsi->num_tx_queues; i++)
2247 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2248 
2249 	for (int i = 0; i < vsi->num_rx_queues; i++)
2250 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2251 
2252 	return (error);
2253 }
2254 
2255 /*
2256  * Returns error on first ring that is detected hung.
2257  */
2258 int
2259 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2260 {
2261 	struct i40e_hw	*hw = &pf->hw;
2262 	int		error = 0;
2263 	u32		reg;
2264 	u16		pf_qidx;
2265 
2266 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2267 
2268 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2269 	i40e_usec_delay(500);
2270 
2271 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2272 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2273 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2274 	/* Verify the disable took */
2275 	for (int j = 0; j < 10; j++) {
2276 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2277 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2278 			break;
2279 		i40e_msec_delay(10);
2280 	}
2281 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2282 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2283 		    pf_qidx);
2284 		error = ETIMEDOUT;
2285 	}
2286 
2287 	return (error);
2288 }
2289 
2290 /*
2291  * Returns error on first ring that is detected hung.
2292  */
2293 int
2294 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2295 {
2296 	struct i40e_hw	*hw = &pf->hw;
2297 	int		error = 0;
2298 	u32		reg;
2299 	u16		pf_qidx;
2300 
2301 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2302 
2303 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2304 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2305 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2306 	/* Verify the disable took */
2307 	for (int j = 0; j < 10; j++) {
2308 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2309 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2310 			break;
2311 		i40e_msec_delay(10);
2312 	}
2313 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2314 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2315 		    pf_qidx);
2316 		error = ETIMEDOUT;
2317 	}
2318 
2319 	return (error);
2320 }
2321 
2322 int
2323 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2324 {
2325 	int error = 0;
2326 
2327 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2328 	/* Called function already prints error message */
2329 	if (error)
2330 		return (error);
2331 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2332 	return (error);
2333 }
2334 
2335 int
2336 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2337 {
2338 	int error = 0;
2339 
2340 	for (int i = 0; i < vsi->num_tx_queues; i++)
2341 		error = ixl_disable_tx_ring(pf, qtag, i);
2342 
2343 	for (int i = 0; i < vsi->num_rx_queues; i++)
2344 		error = ixl_disable_rx_ring(pf, qtag, i);
2345 
2346 	return (error);
2347 }
2348 
2349 static void
2350 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2351 {
2352 	struct i40e_hw *hw = &pf->hw;
2353 	device_t dev = pf->dev;
2354 	struct ixl_vf *vf;
2355 	bool mdd_detected = false;
2356 	bool pf_mdd_detected = false;
2357 	bool vf_mdd_detected = false;
2358 	u16 vf_num, queue;
2359 	u8 pf_num, event;
2360 	u8 pf_mdet_num, vp_mdet_num;
2361 	u32 reg;
2362 
2363 	/* find what triggered the MDD event */
2364 	reg = rd32(hw, I40E_GL_MDET_TX);
2365 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2366 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2367 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2368 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2369 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2370 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2371 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2372 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2373 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2374 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2375 		mdd_detected = true;
2376 	}
2377 
2378 	if (!mdd_detected)
2379 		return;
2380 
2381 	reg = rd32(hw, I40E_PF_MDET_TX);
2382 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2383 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2384 		pf_mdet_num = hw->pf_id;
2385 		pf_mdd_detected = true;
2386 	}
2387 
2388 	/* Check if MDD was caused by a VF */
2389 	for (int i = 0; i < pf->num_vfs; i++) {
2390 		vf = &(pf->vfs[i]);
2391 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2392 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2393 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2394 			vp_mdet_num = i;
2395 			vf->num_mdd_events++;
2396 			vf_mdd_detected = true;
2397 		}
2398 	}
2399 
2400 	/* Print out an error message */
2401 	if (vf_mdd_detected && pf_mdd_detected)
2402 		device_printf(dev,
2403 		    "Malicious Driver Detection event %d"
2404 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2405 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2406 	else if (vf_mdd_detected && !pf_mdd_detected)
2407 		device_printf(dev,
2408 		    "Malicious Driver Detection event %d"
2409 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2410 		    event, queue, pf_num, vf_num, vp_mdet_num);
2411 	else if (!vf_mdd_detected && pf_mdd_detected)
2412 		device_printf(dev,
2413 		    "Malicious Driver Detection event %d"
2414 		    " on TX queue %d, pf number %d (PF-%d)\n",
2415 		    event, queue, pf_num, pf_mdet_num);
2416 	/* Theoretically shouldn't happen */
2417 	else
2418 		device_printf(dev,
2419 		    "TX Malicious Driver Detection event (unknown)\n");
2420 }
2421 
2422 static void
2423 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2424 {
2425 	struct i40e_hw *hw = &pf->hw;
2426 	device_t dev = pf->dev;
2427 	struct ixl_vf *vf;
2428 	bool mdd_detected = false;
2429 	bool pf_mdd_detected = false;
2430 	bool vf_mdd_detected = false;
2431 	u16 queue;
2432 	u8 pf_num, event;
2433 	u8 pf_mdet_num, vp_mdet_num;
2434 	u32 reg;
2435 
2436 	/*
2437 	 * GL_MDET_RX doesn't contain VF number information, unlike
2438 	 * GL_MDET_TX.
2439 	 */
2440 	reg = rd32(hw, I40E_GL_MDET_RX);
2441 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2442 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2443 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2444 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2445 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2446 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2447 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2448 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2449 		mdd_detected = true;
2450 	}
2451 
2452 	if (!mdd_detected)
2453 		return;
2454 
2455 	reg = rd32(hw, I40E_PF_MDET_RX);
2456 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2457 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2458 		pf_mdet_num = hw->pf_id;
2459 		pf_mdd_detected = true;
2460 	}
2461 
2462 	/* Check if MDD was caused by a VF */
2463 	for (int i = 0; i < pf->num_vfs; i++) {
2464 		vf = &(pf->vfs[i]);
2465 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2466 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2467 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2468 			vp_mdet_num = i;
2469 			vf->num_mdd_events++;
2470 			vf_mdd_detected = true;
2471 		}
2472 	}
2473 
2474 	/* Print out an error message */
2475 	if (vf_mdd_detected && pf_mdd_detected)
2476 		device_printf(dev,
2477 		    "Malicious Driver Detection event %d"
2478 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2479 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2480 	else if (vf_mdd_detected && !pf_mdd_detected)
2481 		device_printf(dev,
2482 		    "Malicious Driver Detection event %d"
2483 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2484 		    event, queue, pf_num, vp_mdet_num);
2485 	else if (!vf_mdd_detected && pf_mdd_detected)
2486 		device_printf(dev,
2487 		    "Malicious Driver Detection event %d"
2488 		    " on RX queue %d, pf number %d (PF-%d)\n",
2489 		    event, queue, pf_num, pf_mdet_num);
2490 	/* Theoretically shouldn't happen */
2491 	else
2492 		device_printf(dev,
2493 		    "RX Malicious Driver Detection event (unknown)\n");
2494 }
2495 
2496 /**
2497  * ixl_handle_mdd_event
2498  *
2499  * Called from interrupt handler to identify possibly malicious vfs
2500  * (But also detects events from the PF, as well)
2501  **/
2502 void
2503 ixl_handle_mdd_event(struct ixl_pf *pf)
2504 {
2505 	struct i40e_hw *hw = &pf->hw;
2506 	u32 reg;
2507 
2508 	/*
2509 	 * Handle both TX/RX because it's possible they could
2510 	 * both trigger in the same interrupt.
2511 	 */
2512 	ixl_handle_tx_mdd_event(pf);
2513 	ixl_handle_rx_mdd_event(pf);
2514 
2515 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2516 
2517 	/* re-enable mdd interrupt cause */
2518 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2519 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2520 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2521 	ixl_flush(hw);
2522 }
2523 
2524 void
2525 ixl_enable_intr(struct ixl_vsi *vsi)
2526 {
2527 	struct i40e_hw		*hw = vsi->hw;
2528 	struct ixl_rx_queue	*que = vsi->rx_queues;
2529 
2530 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2531 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2532 			ixl_enable_queue(hw, que->rxr.me);
2533 	} else
2534 		ixl_enable_intr0(hw);
2535 }
2536 
2537 void
2538 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2539 {
2540 	struct i40e_hw		*hw = vsi->hw;
2541 	struct ixl_rx_queue	*que = vsi->rx_queues;
2542 
2543 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 		ixl_disable_queue(hw, que->rxr.me);
2545 }
2546 
2547 void
2548 ixl_enable_intr0(struct i40e_hw *hw)
2549 {
2550 	u32		reg;
2551 
2552 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2553 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2554 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2555 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2556 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2557 }
2558 
2559 void
2560 ixl_disable_intr0(struct i40e_hw *hw)
2561 {
2562 	u32		reg;
2563 
2564 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2565 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2566 	ixl_flush(hw);
2567 }
2568 
2569 void
2570 ixl_enable_queue(struct i40e_hw *hw, int id)
2571 {
2572 	u32		reg;
2573 
2574 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2575 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2576 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2577 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2578 }
2579 
2580 void
2581 ixl_disable_queue(struct i40e_hw *hw, int id)
2582 {
2583 	u32		reg;
2584 
2585 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2586 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2587 }
2588 
2589 void
2590 ixl_update_stats_counters(struct ixl_pf *pf)
2591 {
2592 	struct i40e_hw	*hw = &pf->hw;
2593 	struct ixl_vsi	*vsi = &pf->vsi;
2594 	struct ixl_vf	*vf;
2595 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2596 
2597 	struct i40e_hw_port_stats *nsd = &pf->stats;
2598 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2599 
2600 	/* Update hw stats */
2601 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2602 			   pf->stat_offsets_loaded,
2603 			   &osd->crc_errors, &nsd->crc_errors);
2604 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2605 			   pf->stat_offsets_loaded,
2606 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2607 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2608 			   I40E_GLPRT_GORCL(hw->port),
2609 			   pf->stat_offsets_loaded,
2610 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2611 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2612 			   I40E_GLPRT_GOTCL(hw->port),
2613 			   pf->stat_offsets_loaded,
2614 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2615 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2616 			   pf->stat_offsets_loaded,
2617 			   &osd->eth.rx_discards,
2618 			   &nsd->eth.rx_discards);
2619 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2620 			   I40E_GLPRT_UPRCL(hw->port),
2621 			   pf->stat_offsets_loaded,
2622 			   &osd->eth.rx_unicast,
2623 			   &nsd->eth.rx_unicast);
2624 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2625 			   I40E_GLPRT_UPTCL(hw->port),
2626 			   pf->stat_offsets_loaded,
2627 			   &osd->eth.tx_unicast,
2628 			   &nsd->eth.tx_unicast);
2629 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2630 			   I40E_GLPRT_MPRCL(hw->port),
2631 			   pf->stat_offsets_loaded,
2632 			   &osd->eth.rx_multicast,
2633 			   &nsd->eth.rx_multicast);
2634 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2635 			   I40E_GLPRT_MPTCL(hw->port),
2636 			   pf->stat_offsets_loaded,
2637 			   &osd->eth.tx_multicast,
2638 			   &nsd->eth.tx_multicast);
2639 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2640 			   I40E_GLPRT_BPRCL(hw->port),
2641 			   pf->stat_offsets_loaded,
2642 			   &osd->eth.rx_broadcast,
2643 			   &nsd->eth.rx_broadcast);
2644 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2645 			   I40E_GLPRT_BPTCL(hw->port),
2646 			   pf->stat_offsets_loaded,
2647 			   &osd->eth.tx_broadcast,
2648 			   &nsd->eth.tx_broadcast);
2649 
2650 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2651 			   pf->stat_offsets_loaded,
2652 			   &osd->tx_dropped_link_down,
2653 			   &nsd->tx_dropped_link_down);
2654 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2655 			   pf->stat_offsets_loaded,
2656 			   &osd->mac_local_faults,
2657 			   &nsd->mac_local_faults);
2658 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2659 			   pf->stat_offsets_loaded,
2660 			   &osd->mac_remote_faults,
2661 			   &nsd->mac_remote_faults);
2662 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2663 			   pf->stat_offsets_loaded,
2664 			   &osd->rx_length_errors,
2665 			   &nsd->rx_length_errors);
2666 
2667 	/* Flow control (LFC) stats */
2668 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2669 			   pf->stat_offsets_loaded,
2670 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2671 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2672 			   pf->stat_offsets_loaded,
2673 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2674 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2675 			   pf->stat_offsets_loaded,
2676 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2677 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2678 			   pf->stat_offsets_loaded,
2679 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2680 
2681 	/*
2682 	 * For watchdog management we need to know if we have been paused
2683 	 * during the last interval, so capture that here.
2684 	 */
2685 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2686 		vsi->shared->isc_pause_frames = 1;
2687 
2688 	/* Packet size stats rx */
2689 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2690 			   I40E_GLPRT_PRC64L(hw->port),
2691 			   pf->stat_offsets_loaded,
2692 			   &osd->rx_size_64, &nsd->rx_size_64);
2693 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2694 			   I40E_GLPRT_PRC127L(hw->port),
2695 			   pf->stat_offsets_loaded,
2696 			   &osd->rx_size_127, &nsd->rx_size_127);
2697 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2698 			   I40E_GLPRT_PRC255L(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->rx_size_255, &nsd->rx_size_255);
2701 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2702 			   I40E_GLPRT_PRC511L(hw->port),
2703 			   pf->stat_offsets_loaded,
2704 			   &osd->rx_size_511, &nsd->rx_size_511);
2705 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2706 			   I40E_GLPRT_PRC1023L(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2709 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2710 			   I40E_GLPRT_PRC1522L(hw->port),
2711 			   pf->stat_offsets_loaded,
2712 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2713 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2714 			   I40E_GLPRT_PRC9522L(hw->port),
2715 			   pf->stat_offsets_loaded,
2716 			   &osd->rx_size_big, &nsd->rx_size_big);
2717 
2718 	/* Packet size stats tx */
2719 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2720 			   I40E_GLPRT_PTC64L(hw->port),
2721 			   pf->stat_offsets_loaded,
2722 			   &osd->tx_size_64, &nsd->tx_size_64);
2723 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2724 			   I40E_GLPRT_PTC127L(hw->port),
2725 			   pf->stat_offsets_loaded,
2726 			   &osd->tx_size_127, &nsd->tx_size_127);
2727 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2728 			   I40E_GLPRT_PTC255L(hw->port),
2729 			   pf->stat_offsets_loaded,
2730 			   &osd->tx_size_255, &nsd->tx_size_255);
2731 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2732 			   I40E_GLPRT_PTC511L(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->tx_size_511, &nsd->tx_size_511);
2735 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2736 			   I40E_GLPRT_PTC1023L(hw->port),
2737 			   pf->stat_offsets_loaded,
2738 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2739 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2740 			   I40E_GLPRT_PTC1522L(hw->port),
2741 			   pf->stat_offsets_loaded,
2742 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2743 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2744 			   I40E_GLPRT_PTC9522L(hw->port),
2745 			   pf->stat_offsets_loaded,
2746 			   &osd->tx_size_big, &nsd->tx_size_big);
2747 
2748 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2749 			   pf->stat_offsets_loaded,
2750 			   &osd->rx_undersize, &nsd->rx_undersize);
2751 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2752 			   pf->stat_offsets_loaded,
2753 			   &osd->rx_fragments, &nsd->rx_fragments);
2754 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2755 			   pf->stat_offsets_loaded,
2756 			   &osd->rx_oversize, &nsd->rx_oversize);
2757 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2758 			   pf->stat_offsets_loaded,
2759 			   &osd->rx_jabber, &nsd->rx_jabber);
2760 	pf->stat_offsets_loaded = true;
2761 	/* End hw stats */
2762 
2763 	/* Update vsi stats */
2764 	ixl_update_vsi_stats(vsi);
2765 
2766 	for (int i = 0; i < pf->num_vfs; i++) {
2767 		vf = &pf->vfs[i];
2768 		if (vf->vf_flags & VF_FLAG_ENABLED)
2769 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2770 	}
2771 }
2772 
2773 int
2774 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2775 {
2776 	struct i40e_hw *hw = &pf->hw;
2777 	device_t dev = pf->dev;
2778 	int error = 0;
2779 
2780 	error = i40e_shutdown_lan_hmc(hw);
2781 	if (error)
2782 		device_printf(dev,
2783 		    "Shutdown LAN HMC failed with code %d\n", error);
2784 
2785 	ixl_disable_intr0(hw);
2786 
2787 	error = i40e_shutdown_adminq(hw);
2788 	if (error)
2789 		device_printf(dev,
2790 		    "Shutdown Admin queue failed with code %d\n", error);
2791 
2792 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2793 	return (error);
2794 }
2795 
2796 int
2797 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2798 {
2799 	struct i40e_hw *hw = &pf->hw;
2800 	struct ixl_vsi *vsi = &pf->vsi;
2801 	device_t dev = pf->dev;
2802 	int error = 0;
2803 
2804 	device_printf(dev, "Rebuilding driver state...\n");
2805 
2806 	error = i40e_pf_reset(hw);
2807 	if (error) {
2808 		device_printf(dev, "PF reset failure %s\n",
2809 		    i40e_stat_str(hw, error));
2810 		goto ixl_rebuild_hw_structs_after_reset_err;
2811 	}
2812 
2813 	/* Setup */
2814 	error = i40e_init_adminq(hw);
2815 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2816 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2817 		    error);
2818 		goto ixl_rebuild_hw_structs_after_reset_err;
2819 	}
2820 
2821 	i40e_clear_pxe_mode(hw);
2822 
2823 	error = ixl_get_hw_capabilities(pf);
2824 	if (error) {
2825 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2826 		goto ixl_rebuild_hw_structs_after_reset_err;
2827 	}
2828 
2829 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2830 	    hw->func_caps.num_rx_qp, 0, 0);
2831 	if (error) {
2832 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2833 		goto ixl_rebuild_hw_structs_after_reset_err;
2834 	}
2835 
2836 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2837 	if (error) {
2838 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2839 		goto ixl_rebuild_hw_structs_after_reset_err;
2840 	}
2841 
2842 	/* reserve a contiguous allocation for the PF's VSI */
2843 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2844 	if (error) {
2845 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2846 		    error);
2847 		/* TODO: error handling */
2848 	}
2849 
2850 	error = ixl_switch_config(pf);
2851 	if (error) {
2852 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2853 		     error);
2854 		error = EIO;
2855 		goto ixl_rebuild_hw_structs_after_reset_err;
2856 	}
2857 
2858 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2859 	    NULL);
2860         if (error) {
2861 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2862 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2863 		error = EIO;
2864 		goto ixl_rebuild_hw_structs_after_reset_err;
2865 	}
2866 
2867 	u8 set_fc_err_mask;
2868 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2869 	if (error) {
2870 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2871 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2872 		error = EIO;
2873 		goto ixl_rebuild_hw_structs_after_reset_err;
2874 	}
2875 
2876 	/* Remove default filters reinstalled by FW on reset */
2877 	ixl_del_default_hw_filters(vsi);
2878 
2879 	/* Determine link state */
2880 	if (ixl_attach_get_link_status(pf)) {
2881 		error = EINVAL;
2882 		/* TODO: error handling */
2883 	}
2884 
2885 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2886 	ixl_get_fw_lldp_status(pf);
2887 
2888 	/* Keep admin queue interrupts active while driver is loaded */
2889 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2890  		ixl_configure_intr0_msix(pf);
2891  		ixl_enable_intr0(hw);
2892 	}
2893 
2894 	device_printf(dev, "Rebuilding driver state done.\n");
2895 	return (0);
2896 
2897 ixl_rebuild_hw_structs_after_reset_err:
2898 	device_printf(dev, "Reload the driver to recover\n");
2899 	return (error);
2900 }
2901 
2902 void
2903 ixl_handle_empr_reset(struct ixl_pf *pf)
2904 {
2905 	struct ixl_vsi	*vsi = &pf->vsi;
2906 	struct i40e_hw	*hw = &pf->hw;
2907 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2908 	int count = 0;
2909 	u32 reg;
2910 
2911 	ixl_prepare_for_reset(pf, is_up);
2912 
2913 	/* Typically finishes within 3-4 seconds */
2914 	while (count++ < 100) {
2915 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2916 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2917 		if (reg)
2918 			i40e_msec_delay(100);
2919 		else
2920 			break;
2921 	}
2922 	ixl_dbg(pf, IXL_DBG_INFO,
2923 			"Reset wait count: %d\n", count);
2924 
2925 	ixl_rebuild_hw_structs_after_reset(pf);
2926 
2927 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2928 }
2929 
2930 /**
2931  * Update VSI-specific ethernet statistics counters.
2932  **/
2933 void
2934 ixl_update_eth_stats(struct ixl_vsi *vsi)
2935 {
2936 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2937 	struct i40e_hw *hw = &pf->hw;
2938 	struct i40e_eth_stats *es;
2939 	struct i40e_eth_stats *oes;
2940 	struct i40e_hw_port_stats *nsd;
2941 	u16 stat_idx = vsi->info.stat_counter_idx;
2942 
2943 	es = &vsi->eth_stats;
2944 	oes = &vsi->eth_stats_offsets;
2945 	nsd = &pf->stats;
2946 
2947 	/* Gather up the stats that the hw collects */
2948 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2949 			   vsi->stat_offsets_loaded,
2950 			   &oes->tx_errors, &es->tx_errors);
2951 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2952 			   vsi->stat_offsets_loaded,
2953 			   &oes->rx_discards, &es->rx_discards);
2954 
2955 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2956 			   I40E_GLV_GORCL(stat_idx),
2957 			   vsi->stat_offsets_loaded,
2958 			   &oes->rx_bytes, &es->rx_bytes);
2959 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2960 			   I40E_GLV_UPRCL(stat_idx),
2961 			   vsi->stat_offsets_loaded,
2962 			   &oes->rx_unicast, &es->rx_unicast);
2963 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2964 			   I40E_GLV_MPRCL(stat_idx),
2965 			   vsi->stat_offsets_loaded,
2966 			   &oes->rx_multicast, &es->rx_multicast);
2967 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2968 			   I40E_GLV_BPRCL(stat_idx),
2969 			   vsi->stat_offsets_loaded,
2970 			   &oes->rx_broadcast, &es->rx_broadcast);
2971 
2972 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2973 			   I40E_GLV_GOTCL(stat_idx),
2974 			   vsi->stat_offsets_loaded,
2975 			   &oes->tx_bytes, &es->tx_bytes);
2976 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2977 			   I40E_GLV_UPTCL(stat_idx),
2978 			   vsi->stat_offsets_loaded,
2979 			   &oes->tx_unicast, &es->tx_unicast);
2980 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2981 			   I40E_GLV_MPTCL(stat_idx),
2982 			   vsi->stat_offsets_loaded,
2983 			   &oes->tx_multicast, &es->tx_multicast);
2984 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2985 			   I40E_GLV_BPTCL(stat_idx),
2986 			   vsi->stat_offsets_loaded,
2987 			   &oes->tx_broadcast, &es->tx_broadcast);
2988 	vsi->stat_offsets_loaded = true;
2989 }
2990 
2991 void
2992 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2993 {
2994 	struct ixl_pf		*pf;
2995 	struct ifnet		*ifp;
2996 	struct i40e_eth_stats	*es;
2997 	u64			tx_discards;
2998 
2999 	struct i40e_hw_port_stats *nsd;
3000 
3001 	pf = vsi->back;
3002 	ifp = vsi->ifp;
3003 	es = &vsi->eth_stats;
3004 	nsd = &pf->stats;
3005 
3006 	ixl_update_eth_stats(vsi);
3007 
3008 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3009 
3010 	/* Update ifnet stats */
3011 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3012 	                   es->rx_multicast +
3013 			   es->rx_broadcast);
3014 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3015 	                   es->tx_multicast +
3016 			   es->tx_broadcast);
3017 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3018 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3019 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3020 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3021 
3022 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3023 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3024 	    nsd->rx_jabber);
3025 	IXL_SET_OERRORS(vsi, es->tx_errors);
3026 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3027 	IXL_SET_OQDROPS(vsi, tx_discards);
3028 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3029 	IXL_SET_COLLISIONS(vsi, 0);
3030 }
3031 
3032 /**
3033  * Reset all of the stats for the given pf
3034  **/
3035 void
3036 ixl_pf_reset_stats(struct ixl_pf *pf)
3037 {
3038 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3039 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3040 	pf->stat_offsets_loaded = false;
3041 }
3042 
3043 /**
3044  * Resets all stats of the given vsi
3045  **/
3046 void
3047 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3048 {
3049 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3050 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3051 	vsi->stat_offsets_loaded = false;
3052 }
3053 
3054 /**
3055  * Read and update a 48 bit stat from the hw
3056  *
3057  * Since the device stats are not reset at PFReset, they likely will not
3058  * be zeroed when the driver starts.  We'll save the first values read
3059  * and use them as offsets to be subtracted from the raw values in order
3060  * to report stats that count from zero.
3061  **/
3062 void
3063 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3064 	bool offset_loaded, u64 *offset, u64 *stat)
3065 {
3066 	u64 new_data;
3067 
3068 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3069 	new_data = rd64(hw, loreg);
3070 #else
3071 	/*
3072 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3073 	 * 10 don't support 64-bit bus reads/writes.
3074 	 */
3075 	new_data = rd32(hw, loreg);
3076 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3077 #endif
3078 
3079 	if (!offset_loaded)
3080 		*offset = new_data;
3081 	if (new_data >= *offset)
3082 		*stat = new_data - *offset;
3083 	else
3084 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3085 	*stat &= 0xFFFFFFFFFFFFULL;
3086 }
3087 
3088 /**
3089  * Read and update a 32 bit stat from the hw
3090  **/
3091 void
3092 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3093 	bool offset_loaded, u64 *offset, u64 *stat)
3094 {
3095 	u32 new_data;
3096 
3097 	new_data = rd32(hw, reg);
3098 	if (!offset_loaded)
3099 		*offset = new_data;
3100 	if (new_data >= *offset)
3101 		*stat = (u32)(new_data - *offset);
3102 	else
3103 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3104 }
3105 
3106 void
3107 ixl_add_device_sysctls(struct ixl_pf *pf)
3108 {
3109 	device_t dev = pf->dev;
3110 	struct i40e_hw *hw = &pf->hw;
3111 
3112 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3113 	struct sysctl_oid_list *ctx_list =
3114 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3115 
3116 	struct sysctl_oid *debug_node;
3117 	struct sysctl_oid_list *debug_list;
3118 
3119 	struct sysctl_oid *fec_node;
3120 	struct sysctl_oid_list *fec_list;
3121 
3122 	/* Set up sysctls */
3123 	SYSCTL_ADD_PROC(ctx, ctx_list,
3124 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3125 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3126 
3127 	SYSCTL_ADD_PROC(ctx, ctx_list,
3128 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3129 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3130 
3131 	SYSCTL_ADD_PROC(ctx, ctx_list,
3132 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3133 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3134 
3135 	SYSCTL_ADD_PROC(ctx, ctx_list,
3136 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3137 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3138 
3139 	SYSCTL_ADD_PROC(ctx, ctx_list,
3140 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3141 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3142 
3143 	SYSCTL_ADD_PROC(ctx, ctx_list,
3144 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3145 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3146 	    "Queues not allocated to a PF or VF");
3147 
3148 	SYSCTL_ADD_PROC(ctx, ctx_list,
3149 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3150 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3151 	    "Immediately set TX ITR value for all queues");
3152 
3153 	SYSCTL_ADD_PROC(ctx, ctx_list,
3154 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3155 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3156 	    "Immediately set RX ITR value for all queues");
3157 
3158 	SYSCTL_ADD_INT(ctx, ctx_list,
3159 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3160 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3161 
3162 	SYSCTL_ADD_INT(ctx, ctx_list,
3163 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3164 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3165 
3166 	/* Add FEC sysctls for 25G adapters */
3167 	if (i40e_is_25G_device(hw->device_id)) {
3168 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3169 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3170 		fec_list = SYSCTL_CHILDREN(fec_node);
3171 
3172 		SYSCTL_ADD_PROC(ctx, fec_list,
3173 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3174 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3175 
3176 		SYSCTL_ADD_PROC(ctx, fec_list,
3177 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3178 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3179 
3180 		SYSCTL_ADD_PROC(ctx, fec_list,
3181 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3182 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3183 
3184 		SYSCTL_ADD_PROC(ctx, fec_list,
3185 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3186 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3187 
3188 		SYSCTL_ADD_PROC(ctx, fec_list,
3189 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3190 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3191 	}
3192 
3193 	SYSCTL_ADD_PROC(ctx, ctx_list,
3194 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3195 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3196 
3197 	/* Add sysctls meant to print debug information, but don't list them
3198 	 * in "sysctl -a" output. */
3199 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3200 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3201 	debug_list = SYSCTL_CHILDREN(debug_node);
3202 
3203 	SYSCTL_ADD_UINT(ctx, debug_list,
3204 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3205 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3206 
3207 	SYSCTL_ADD_UINT(ctx, debug_list,
3208 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3209 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3210 
3211 	SYSCTL_ADD_PROC(ctx, debug_list,
3212 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3213 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3214 
3215 	SYSCTL_ADD_PROC(ctx, debug_list,
3216 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3217 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3218 
3219 	SYSCTL_ADD_PROC(ctx, debug_list,
3220 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3221 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3222 
3223 	SYSCTL_ADD_PROC(ctx, debug_list,
3224 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3225 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3226 
3227 	SYSCTL_ADD_PROC(ctx, debug_list,
3228 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3229 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3230 
3231 	SYSCTL_ADD_PROC(ctx, debug_list,
3232 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3233 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3234 
3235 	SYSCTL_ADD_PROC(ctx, debug_list,
3236 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3237 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3238 
3239 	SYSCTL_ADD_PROC(ctx, debug_list,
3240 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3241 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3242 
3243 	SYSCTL_ADD_PROC(ctx, debug_list,
3244 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3245 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3246 
3247 	SYSCTL_ADD_PROC(ctx, debug_list,
3248 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3249 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3250 
3251 	SYSCTL_ADD_PROC(ctx, debug_list,
3252 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3253 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3254 
3255 	SYSCTL_ADD_PROC(ctx, debug_list,
3256 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3258 
3259 	SYSCTL_ADD_PROC(ctx, debug_list,
3260 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3261 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3262 
3263 	SYSCTL_ADD_PROC(ctx, debug_list,
3264 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3265 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3266 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3267 
3268 	SYSCTL_ADD_PROC(ctx, debug_list,
3269 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3270 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3271 
3272 	if (pf->has_i2c) {
3273 		SYSCTL_ADD_PROC(ctx, debug_list,
3274 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3275 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3276 
3277 		SYSCTL_ADD_PROC(ctx, debug_list,
3278 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3279 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3280 
3281 		SYSCTL_ADD_PROC(ctx, debug_list,
3282 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3283 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3284 	}
3285 }
3286 
3287 /*
3288  * Primarily for finding out how many queues can be assigned to VFs,
3289  * at runtime.
3290  */
3291 static int
3292 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3293 {
3294 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3295 	int queues;
3296 
3297 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3298 
3299 	return sysctl_handle_int(oidp, NULL, queues, req);
3300 }
3301 
3302 /*
3303 ** Set flow control using sysctl:
3304 ** 	0 - off
3305 **	1 - rx pause
3306 **	2 - tx pause
3307 **	3 - full
3308 */
3309 int
3310 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3311 {
3312 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3313 	struct i40e_hw *hw = &pf->hw;
3314 	device_t dev = pf->dev;
3315 	int requested_fc, error = 0;
3316 	enum i40e_status_code aq_error = 0;
3317 	u8 fc_aq_err = 0;
3318 
3319 	/* Get request */
3320 	requested_fc = pf->fc;
3321 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3322 	if ((error) || (req->newptr == NULL))
3323 		return (error);
3324 	if (requested_fc < 0 || requested_fc > 3) {
3325 		device_printf(dev,
3326 		    "Invalid fc mode; valid modes are 0 through 3\n");
3327 		return (EINVAL);
3328 	}
3329 
3330 	/* Set fc ability for port */
3331 	hw->fc.requested_mode = requested_fc;
3332 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3333 	if (aq_error) {
3334 		device_printf(dev,
3335 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3336 		    __func__, aq_error, fc_aq_err);
3337 		return (EIO);
3338 	}
3339 	pf->fc = requested_fc;
3340 
3341 	return (0);
3342 }
3343 
3344 char *
3345 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3346 {
3347 	int index;
3348 
3349 	char *speeds[] = {
3350 		"Unknown",
3351 		"100 Mbps",
3352 		"1 Gbps",
3353 		"10 Gbps",
3354 		"40 Gbps",
3355 		"20 Gbps",
3356 		"25 Gbps",
3357 	};
3358 
3359 	switch (link_speed) {
3360 	case I40E_LINK_SPEED_100MB:
3361 		index = 1;
3362 		break;
3363 	case I40E_LINK_SPEED_1GB:
3364 		index = 2;
3365 		break;
3366 	case I40E_LINK_SPEED_10GB:
3367 		index = 3;
3368 		break;
3369 	case I40E_LINK_SPEED_40GB:
3370 		index = 4;
3371 		break;
3372 	case I40E_LINK_SPEED_20GB:
3373 		index = 5;
3374 		break;
3375 	case I40E_LINK_SPEED_25GB:
3376 		index = 6;
3377 		break;
3378 	case I40E_LINK_SPEED_UNKNOWN:
3379 	default:
3380 		index = 0;
3381 		break;
3382 	}
3383 
3384 	return speeds[index];
3385 }
3386 
3387 int
3388 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3389 {
3390 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3391 	struct i40e_hw *hw = &pf->hw;
3392 	int error = 0;
3393 
3394 	ixl_update_link_status(pf);
3395 
3396 	error = sysctl_handle_string(oidp,
3397 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3398 	    8, req);
3399 	return (error);
3400 }
3401 
3402 /*
3403  * Converts 8-bit speeds value to and from sysctl flags and
3404  * Admin Queue flags.
3405  */
3406 static u8
3407 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3408 {
3409 	static u16 speedmap[6] = {
3410 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3411 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3412 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3413 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3414 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3415 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3416 	};
3417 	u8 retval = 0;
3418 
3419 	for (int i = 0; i < 6; i++) {
3420 		if (to_aq)
3421 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3422 		else
3423 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3424 	}
3425 
3426 	return (retval);
3427 }
3428 
3429 int
3430 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3431 {
3432 	struct i40e_hw *hw = &pf->hw;
3433 	device_t dev = pf->dev;
3434 	struct i40e_aq_get_phy_abilities_resp abilities;
3435 	struct i40e_aq_set_phy_config config;
3436 	enum i40e_status_code aq_error = 0;
3437 
3438 	/* Get current capability information */
3439 	aq_error = i40e_aq_get_phy_capabilities(hw,
3440 	    FALSE, FALSE, &abilities, NULL);
3441 	if (aq_error) {
3442 		device_printf(dev,
3443 		    "%s: Error getting phy capabilities %d,"
3444 		    " aq error: %d\n", __func__, aq_error,
3445 		    hw->aq.asq_last_status);
3446 		return (EIO);
3447 	}
3448 
3449 	/* Prepare new config */
3450 	bzero(&config, sizeof(config));
3451 	if (from_aq)
3452 		config.link_speed = speeds;
3453 	else
3454 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3455 	config.phy_type = abilities.phy_type;
3456 	config.phy_type_ext = abilities.phy_type_ext;
3457 	config.abilities = abilities.abilities
3458 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3459 	config.eee_capability = abilities.eee_capability;
3460 	config.eeer = abilities.eeer_val;
3461 	config.low_power_ctrl = abilities.d3_lpan;
3462 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3463 
3464 	/* Do aq command & restart link */
3465 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3466 	if (aq_error) {
3467 		device_printf(dev,
3468 		    "%s: Error setting new phy config %d,"
3469 		    " aq error: %d\n", __func__, aq_error,
3470 		    hw->aq.asq_last_status);
3471 		return (EIO);
3472 	}
3473 
3474 	return (0);
3475 }
3476 
3477 /*
3478 ** Supported link speedsL
3479 **	Flags:
3480 **	 0x1 - 100 Mb
3481 **	 0x2 - 1G
3482 **	 0x4 - 10G
3483 **	 0x8 - 20G
3484 **	0x10 - 25G
3485 **	0x20 - 40G
3486 */
3487 static int
3488 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3489 {
3490 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3491 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3492 
3493 	return sysctl_handle_int(oidp, NULL, supported, req);
3494 }
3495 
3496 /*
3497 ** Control link advertise speed:
3498 **	Flags:
3499 **	 0x1 - advertise 100 Mb
3500 **	 0x2 - advertise 1G
3501 **	 0x4 - advertise 10G
3502 **	 0x8 - advertise 20G
3503 **	0x10 - advertise 25G
3504 **	0x20 - advertise 40G
3505 **
3506 **	Set to 0 to disable link
3507 */
3508 int
3509 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3510 {
3511 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3512 	device_t dev = pf->dev;
3513 	u8 converted_speeds;
3514 	int requested_ls = 0;
3515 	int error = 0;
3516 
3517 	/* Read in new mode */
3518 	requested_ls = pf->advertised_speed;
3519 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3520 	if ((error) || (req->newptr == NULL))
3521 		return (error);
3522 
3523 	/* Error out if bits outside of possible flag range are set */
3524 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3525 		device_printf(dev, "Input advertised speed out of range; "
3526 		    "valid flags are: 0x%02x\n",
3527 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3528 		return (EINVAL);
3529 	}
3530 
3531 	/* Check if adapter supports input value */
3532 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3533 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3534 		device_printf(dev, "Invalid advertised speed; "
3535 		    "valid flags are: 0x%02x\n",
3536 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3537 		return (EINVAL);
3538 	}
3539 
3540 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3541 	if (error)
3542 		return (error);
3543 
3544 	pf->advertised_speed = requested_ls;
3545 	ixl_update_link_status(pf);
3546 	return (0);
3547 }
3548 
3549 /*
3550 ** Get the width and transaction speed of
3551 ** the bus this adapter is plugged into.
3552 */
3553 void
3554 ixl_get_bus_info(struct ixl_pf *pf)
3555 {
3556 	struct i40e_hw *hw = &pf->hw;
3557 	device_t dev = pf->dev;
3558         u16 link;
3559         u32 offset, num_ports;
3560 	u64 max_speed;
3561 
3562 	/* Some devices don't use PCIE */
3563 	if (hw->mac.type == I40E_MAC_X722)
3564 		return;
3565 
3566         /* Read PCI Express Capabilities Link Status Register */
3567         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3568         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3569 
3570 	/* Fill out hw struct with PCIE info */
3571 	i40e_set_pci_config_data(hw, link);
3572 
3573 	/* Use info to print out bandwidth messages */
3574         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3575             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3576             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3577             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3578             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3579             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3580             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3581             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3582             ("Unknown"));
3583 
3584 	/*
3585 	 * If adapter is in slot with maximum supported speed,
3586 	 * no warning message needs to be printed out.
3587 	 */
3588 	if (hw->bus.speed >= i40e_bus_speed_8000
3589 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3590 		return;
3591 
3592 	num_ports = bitcount32(hw->func_caps.valid_functions);
3593 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3594 
3595 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3596                 device_printf(dev, "PCI-Express bandwidth available"
3597                     " for this device may be insufficient for"
3598                     " optimal performance.\n");
3599                 device_printf(dev, "Please move the device to a different"
3600 		    " PCI-e link with more lanes and/or higher"
3601 		    " transfer rate.\n");
3602         }
3603 }
3604 
3605 static int
3606 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3607 {
3608 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3609 	struct i40e_hw	*hw = &pf->hw;
3610 	struct sbuf	*sbuf;
3611 
3612 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3613 	ixl_nvm_version_str(hw, sbuf);
3614 	sbuf_finish(sbuf);
3615 	sbuf_delete(sbuf);
3616 
3617 	return (0);
3618 }
3619 
3620 void
3621 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3622 {
3623 	if ((nvma->command == I40E_NVM_READ) &&
3624 	    ((nvma->config & 0xFF) == 0xF) &&
3625 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3626 	    (nvma->offset == 0) &&
3627 	    (nvma->data_size == 1)) {
3628 		// device_printf(dev, "- Get Driver Status Command\n");
3629 	}
3630 	else if (nvma->command == I40E_NVM_READ) {
3631 
3632 	}
3633 	else {
3634 		switch (nvma->command) {
3635 		case 0xB:
3636 			device_printf(dev, "- command: I40E_NVM_READ\n");
3637 			break;
3638 		case 0xC:
3639 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3640 			break;
3641 		default:
3642 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3643 			break;
3644 		}
3645 
3646 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3647 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3648 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3649 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3650 	}
3651 }
3652 
3653 int
3654 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3655 {
3656 	struct i40e_hw *hw = &pf->hw;
3657 	struct i40e_nvm_access *nvma;
3658 	device_t dev = pf->dev;
3659 	enum i40e_status_code status = 0;
3660 	size_t nvma_size, ifd_len, exp_len;
3661 	int err, perrno;
3662 
3663 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3664 
3665 	/* Sanity checks */
3666 	nvma_size = sizeof(struct i40e_nvm_access);
3667 	ifd_len = ifd->ifd_len;
3668 
3669 	if (ifd_len < nvma_size ||
3670 	    ifd->ifd_data == NULL) {
3671 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3672 		    __func__);
3673 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3674 		    __func__, ifd_len, nvma_size);
3675 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3676 		    ifd->ifd_data);
3677 		return (EINVAL);
3678 	}
3679 
3680 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3681 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3682 	if (err) {
3683 		device_printf(dev, "%s: Cannot get request from user space\n",
3684 		    __func__);
3685 		free(nvma, M_DEVBUF);
3686 		return (err);
3687 	}
3688 
3689 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3690 		ixl_print_nvm_cmd(dev, nvma);
3691 
3692 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3693 		int count = 0;
3694 		while (count++ < 100) {
3695 			i40e_msec_delay(100);
3696 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3697 				break;
3698 		}
3699 	}
3700 
3701 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3702 		free(nvma, M_DEVBUF);
3703 		return (-EBUSY);
3704 	}
3705 
3706 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3707 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3708 		    __func__);
3709 		free(nvma, M_DEVBUF);
3710 		return (EINVAL);
3711 	}
3712 
3713 	/*
3714 	 * Older versions of the NVM update tool don't set ifd_len to the size
3715 	 * of the entire buffer passed to the ioctl. Check the data_size field
3716 	 * in the contained i40e_nvm_access struct and ensure everything is
3717 	 * copied in from userspace.
3718 	 */
3719 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3720 
3721 	if (ifd_len < exp_len) {
3722 		ifd_len = exp_len;
3723 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3724 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3725 		if (err) {
3726 			device_printf(dev, "%s: Cannot get request from user space\n",
3727 					__func__);
3728 			free(nvma, M_DEVBUF);
3729 			return (err);
3730 		}
3731 	}
3732 
3733 	// TODO: Might need a different lock here
3734 	// IXL_PF_LOCK(pf);
3735 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3736 	// IXL_PF_UNLOCK(pf);
3737 
3738 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3739 	free(nvma, M_DEVBUF);
3740 	if (err) {
3741 		device_printf(dev, "%s: Cannot return data to user space\n",
3742 				__func__);
3743 		return (err);
3744 	}
3745 
3746 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3747 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3748 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3749 		    i40e_stat_str(hw, status), perrno);
3750 
3751 	/*
3752 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3753 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3754 	 */
3755 	if (perrno == -EPERM)
3756 		return (-EACCES);
3757 	else
3758 		return (perrno);
3759 }
3760 
3761 int
3762 ixl_find_i2c_interface(struct ixl_pf *pf)
3763 {
3764 	struct i40e_hw *hw = &pf->hw;
3765 	bool i2c_en, port_matched;
3766 	u32 reg;
3767 
3768 	for (int i = 0; i < 4; i++) {
3769 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3770 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3771 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3772 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3773 		    & BIT(hw->port);
3774 		if (i2c_en && port_matched)
3775 			return (i);
3776 	}
3777 
3778 	return (-1);
3779 }
3780 
3781 static char *
3782 ixl_phy_type_string(u32 bit_pos, bool ext)
3783 {
3784 	static char * phy_types_str[32] = {
3785 		"SGMII",
3786 		"1000BASE-KX",
3787 		"10GBASE-KX4",
3788 		"10GBASE-KR",
3789 		"40GBASE-KR4",
3790 		"XAUI",
3791 		"XFI",
3792 		"SFI",
3793 		"XLAUI",
3794 		"XLPPI",
3795 		"40GBASE-CR4",
3796 		"10GBASE-CR1",
3797 		"SFP+ Active DA",
3798 		"QSFP+ Active DA",
3799 		"Reserved (14)",
3800 		"Reserved (15)",
3801 		"Reserved (16)",
3802 		"100BASE-TX",
3803 		"1000BASE-T",
3804 		"10GBASE-T",
3805 		"10GBASE-SR",
3806 		"10GBASE-LR",
3807 		"10GBASE-SFP+Cu",
3808 		"10GBASE-CR1",
3809 		"40GBASE-CR4",
3810 		"40GBASE-SR4",
3811 		"40GBASE-LR4",
3812 		"1000BASE-SX",
3813 		"1000BASE-LX",
3814 		"1000BASE-T Optical",
3815 		"20GBASE-KR2",
3816 		"Reserved (31)"
3817 	};
3818 	static char * ext_phy_types_str[8] = {
3819 		"25GBASE-KR",
3820 		"25GBASE-CR",
3821 		"25GBASE-SR",
3822 		"25GBASE-LR",
3823 		"25GBASE-AOC",
3824 		"25GBASE-ACC",
3825 		"Reserved (6)",
3826 		"Reserved (7)"
3827 	};
3828 
3829 	if (ext && bit_pos > 7) return "Invalid_Ext";
3830 	if (bit_pos > 31) return "Invalid";
3831 
3832 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3833 }
3834 
3835 /* TODO: ERJ: I don't this is necessary anymore. */
3836 int
3837 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3838 {
3839 	device_t dev = pf->dev;
3840 	struct i40e_hw *hw = &pf->hw;
3841 	struct i40e_aq_desc desc;
3842 	enum i40e_status_code status;
3843 
3844 	struct i40e_aqc_get_link_status *aq_link_status =
3845 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3846 
3847 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3848 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3849 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3850 	if (status) {
3851 		device_printf(dev,
3852 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3853 		    __func__, i40e_stat_str(hw, status),
3854 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3855 		return (EIO);
3856 	}
3857 
3858 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3859 	return (0);
3860 }
3861 
3862 static char *
3863 ixl_phy_type_string_ls(u8 val)
3864 {
3865 	if (val >= 0x1F)
3866 		return ixl_phy_type_string(val - 0x1F, true);
3867 	else
3868 		return ixl_phy_type_string(val, false);
3869 }
3870 
3871 static int
3872 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3873 {
3874 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3875 	device_t dev = pf->dev;
3876 	struct sbuf *buf;
3877 	int error = 0;
3878 
3879 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3880 	if (!buf) {
3881 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3882 		return (ENOMEM);
3883 	}
3884 
3885 	struct i40e_aqc_get_link_status link_status;
3886 	error = ixl_aq_get_link_status(pf, &link_status);
3887 	if (error) {
3888 		sbuf_delete(buf);
3889 		return (error);
3890 	}
3891 
3892 	sbuf_printf(buf, "\n"
3893 	    "PHY Type : 0x%02x<%s>\n"
3894 	    "Speed    : 0x%02x\n"
3895 	    "Link info: 0x%02x\n"
3896 	    "AN info  : 0x%02x\n"
3897 	    "Ext info : 0x%02x\n"
3898 	    "Loopback : 0x%02x\n"
3899 	    "Max Frame: %d\n"
3900 	    "Config   : 0x%02x\n"
3901 	    "Power    : 0x%02x",
3902 	    link_status.phy_type,
3903 	    ixl_phy_type_string_ls(link_status.phy_type),
3904 	    link_status.link_speed,
3905 	    link_status.link_info,
3906 	    link_status.an_info,
3907 	    link_status.ext_info,
3908 	    link_status.loopback,
3909 	    link_status.max_frame_size,
3910 	    link_status.config,
3911 	    link_status.power_desc);
3912 
3913 	error = sbuf_finish(buf);
3914 	if (error)
3915 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3916 
3917 	sbuf_delete(buf);
3918 	return (error);
3919 }
3920 
3921 static int
3922 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3923 {
3924 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3925 	struct i40e_hw *hw = &pf->hw;
3926 	device_t dev = pf->dev;
3927 	enum i40e_status_code status;
3928 	struct i40e_aq_get_phy_abilities_resp abilities;
3929 	struct sbuf *buf;
3930 	int error = 0;
3931 
3932 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3933 	if (!buf) {
3934 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3935 		return (ENOMEM);
3936 	}
3937 
3938 	status = i40e_aq_get_phy_capabilities(hw,
3939 	    FALSE, FALSE, &abilities, NULL);
3940 	if (status) {
3941 		device_printf(dev,
3942 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3943 		    __func__, i40e_stat_str(hw, status),
3944 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3945 		sbuf_delete(buf);
3946 		return (EIO);
3947 	}
3948 
3949 	sbuf_printf(buf, "\n"
3950 	    "PHY Type : %08x",
3951 	    abilities.phy_type);
3952 
3953 	if (abilities.phy_type != 0) {
3954 		sbuf_printf(buf, "<");
3955 		for (int i = 0; i < 32; i++)
3956 			if ((1 << i) & abilities.phy_type)
3957 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3958 		sbuf_printf(buf, ">\n");
3959 	}
3960 
3961 	sbuf_printf(buf, "PHY Ext  : %02x",
3962 	    abilities.phy_type_ext);
3963 
3964 	if (abilities.phy_type_ext != 0) {
3965 		sbuf_printf(buf, "<");
3966 		for (int i = 0; i < 4; i++)
3967 			if ((1 << i) & abilities.phy_type_ext)
3968 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3969 		sbuf_printf(buf, ">");
3970 	}
3971 	sbuf_printf(buf, "\n");
3972 
3973 	sbuf_printf(buf,
3974 	    "Speed    : %02x\n"
3975 	    "Abilities: %02x\n"
3976 	    "EEE cap  : %04x\n"
3977 	    "EEER reg : %08x\n"
3978 	    "D3 Lpan  : %02x\n"
3979 	    "ID       : %02x %02x %02x %02x\n"
3980 	    "ModType  : %02x %02x %02x\n"
3981 	    "ModType E: %01x\n"
3982 	    "FEC Cfg  : %02x\n"
3983 	    "Ext CC   : %02x",
3984 	    abilities.link_speed,
3985 	    abilities.abilities, abilities.eee_capability,
3986 	    abilities.eeer_val, abilities.d3_lpan,
3987 	    abilities.phy_id[0], abilities.phy_id[1],
3988 	    abilities.phy_id[2], abilities.phy_id[3],
3989 	    abilities.module_type[0], abilities.module_type[1],
3990 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3991 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3992 	    abilities.ext_comp_code);
3993 
3994 	error = sbuf_finish(buf);
3995 	if (error)
3996 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3997 
3998 	sbuf_delete(buf);
3999 	return (error);
4000 }
4001 
4002 static int
4003 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4004 {
4005 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4006 	struct ixl_vsi *vsi = &pf->vsi;
4007 	struct ixl_mac_filter *f;
4008 	device_t dev = pf->dev;
4009 	int error = 0, ftl_len = 0, ftl_counter = 0;
4010 
4011 	struct sbuf *buf;
4012 
4013 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4014 	if (!buf) {
4015 		device_printf(dev, "Could not allocate sbuf for output.\n");
4016 		return (ENOMEM);
4017 	}
4018 
4019 	sbuf_printf(buf, "\n");
4020 
4021 	/* Print MAC filters */
4022 	sbuf_printf(buf, "PF Filters:\n");
4023 	SLIST_FOREACH(f, &vsi->ftl, next)
4024 		ftl_len++;
4025 
4026 	if (ftl_len < 1)
4027 		sbuf_printf(buf, "(none)\n");
4028 	else {
4029 		SLIST_FOREACH(f, &vsi->ftl, next) {
4030 			sbuf_printf(buf,
4031 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4032 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4033 			/* don't print '\n' for last entry */
4034 			if (++ftl_counter != ftl_len)
4035 				sbuf_printf(buf, "\n");
4036 		}
4037 	}
4038 
4039 #ifdef PCI_IOV
4040 	/* TODO: Give each VF its own filter list sysctl */
4041 	struct ixl_vf *vf;
4042 	if (pf->num_vfs > 0) {
4043 		sbuf_printf(buf, "\n\n");
4044 		for (int i = 0; i < pf->num_vfs; i++) {
4045 			vf = &pf->vfs[i];
4046 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4047 				continue;
4048 
4049 			vsi = &vf->vsi;
4050 			ftl_len = 0, ftl_counter = 0;
4051 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4052 			SLIST_FOREACH(f, &vsi->ftl, next)
4053 				ftl_len++;
4054 
4055 			if (ftl_len < 1)
4056 				sbuf_printf(buf, "(none)\n");
4057 			else {
4058 				SLIST_FOREACH(f, &vsi->ftl, next) {
4059 					sbuf_printf(buf,
4060 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4061 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4062 				}
4063 			}
4064 		}
4065 	}
4066 #endif
4067 
4068 	error = sbuf_finish(buf);
4069 	if (error)
4070 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4071 	sbuf_delete(buf);
4072 
4073 	return (error);
4074 }
4075 
4076 #define IXL_SW_RES_SIZE 0x14
4077 int
4078 ixl_res_alloc_cmp(const void *a, const void *b)
4079 {
4080 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4081 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4082 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4083 
4084 	return ((int)one->resource_type - (int)two->resource_type);
4085 }
4086 
4087 /*
4088  * Longest string length: 25
4089  */
4090 char *
4091 ixl_switch_res_type_string(u8 type)
4092 {
4093 	// TODO: This should be changed to static const
4094 	char * ixl_switch_res_type_strings[0x14] = {
4095 		"VEB",
4096 		"VSI",
4097 		"Perfect Match MAC address",
4098 		"S-tag",
4099 		"(Reserved)",
4100 		"Multicast hash entry",
4101 		"Unicast hash entry",
4102 		"VLAN",
4103 		"VSI List entry",
4104 		"(Reserved)",
4105 		"VLAN Statistic Pool",
4106 		"Mirror Rule",
4107 		"Queue Set",
4108 		"Inner VLAN Forward filter",
4109 		"(Reserved)",
4110 		"Inner MAC",
4111 		"IP",
4112 		"GRE/VN1 Key",
4113 		"VN2 Key",
4114 		"Tunneling Port"
4115 	};
4116 
4117 	if (type < 0x14)
4118 		return ixl_switch_res_type_strings[type];
4119 	else
4120 		return "(Reserved)";
4121 }
4122 
4123 static int
4124 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4125 {
4126 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4127 	struct i40e_hw *hw = &pf->hw;
4128 	device_t dev = pf->dev;
4129 	struct sbuf *buf;
4130 	enum i40e_status_code status;
4131 	int error = 0;
4132 
4133 	u8 num_entries;
4134 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4135 
4136 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4137 	if (!buf) {
4138 		device_printf(dev, "Could not allocate sbuf for output.\n");
4139 		return (ENOMEM);
4140 	}
4141 
4142 	bzero(resp, sizeof(resp));
4143 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4144 				resp,
4145 				IXL_SW_RES_SIZE,
4146 				NULL);
4147 	if (status) {
4148 		device_printf(dev,
4149 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4150 		    __func__, i40e_stat_str(hw, status),
4151 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4152 		sbuf_delete(buf);
4153 		return (error);
4154 	}
4155 
4156 	/* Sort entries by type for display */
4157 	qsort(resp, num_entries,
4158 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4159 	    &ixl_res_alloc_cmp);
4160 
4161 	sbuf_cat(buf, "\n");
4162 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4163 	sbuf_printf(buf,
4164 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4165 	    "                          | (this)     | (all) | (this) | (all)       \n");
4166 	for (int i = 0; i < num_entries; i++) {
4167 		sbuf_printf(buf,
4168 		    "%25s | %10d   %5d   %6d   %12d",
4169 		    ixl_switch_res_type_string(resp[i].resource_type),
4170 		    resp[i].guaranteed,
4171 		    resp[i].total,
4172 		    resp[i].used,
4173 		    resp[i].total_unalloced);
4174 		if (i < num_entries - 1)
4175 			sbuf_cat(buf, "\n");
4176 	}
4177 
4178 	error = sbuf_finish(buf);
4179 	if (error)
4180 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4181 
4182 	sbuf_delete(buf);
4183 	return (error);
4184 }
4185 
4186 /*
4187 ** Caller must init and delete sbuf; this function will clear and
4188 ** finish it for caller.
4189 */
4190 char *
4191 ixl_switch_element_string(struct sbuf *s,
4192     struct i40e_aqc_switch_config_element_resp *element)
4193 {
4194 	sbuf_clear(s);
4195 
4196 	switch (element->element_type) {
4197 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4198 		sbuf_printf(s, "MAC %3d", element->element_info);
4199 		break;
4200 	case I40E_AQ_SW_ELEM_TYPE_PF:
4201 		sbuf_printf(s, "PF  %3d", element->element_info);
4202 		break;
4203 	case I40E_AQ_SW_ELEM_TYPE_VF:
4204 		sbuf_printf(s, "VF  %3d", element->element_info);
4205 		break;
4206 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4207 		sbuf_cat(s, "EMP");
4208 		break;
4209 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4210 		sbuf_cat(s, "BMC");
4211 		break;
4212 	case I40E_AQ_SW_ELEM_TYPE_PV:
4213 		sbuf_cat(s, "PV");
4214 		break;
4215 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4216 		sbuf_cat(s, "VEB");
4217 		break;
4218 	case I40E_AQ_SW_ELEM_TYPE_PA:
4219 		sbuf_cat(s, "PA");
4220 		break;
4221 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4222 		sbuf_printf(s, "VSI %3d", element->element_info);
4223 		break;
4224 	default:
4225 		sbuf_cat(s, "?");
4226 		break;
4227 	}
4228 
4229 	sbuf_finish(s);
4230 	return sbuf_data(s);
4231 }
4232 
4233 static int
4234 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4235 {
4236 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4237 	struct i40e_hw *hw = &pf->hw;
4238 	device_t dev = pf->dev;
4239 	struct sbuf *buf;
4240 	struct sbuf *nmbuf;
4241 	enum i40e_status_code status;
4242 	int error = 0;
4243 	u16 next = 0;
4244 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4245 
4246 	struct i40e_aqc_get_switch_config_resp *sw_config;
4247 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4248 
4249 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4250 	if (!buf) {
4251 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4252 		return (ENOMEM);
4253 	}
4254 
4255 	status = i40e_aq_get_switch_config(hw, sw_config,
4256 	    sizeof(aq_buf), &next, NULL);
4257 	if (status) {
4258 		device_printf(dev,
4259 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4260 		    __func__, i40e_stat_str(hw, status),
4261 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4262 		sbuf_delete(buf);
4263 		return error;
4264 	}
4265 	if (next)
4266 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4267 		    __func__, next);
4268 
4269 	nmbuf = sbuf_new_auto();
4270 	if (!nmbuf) {
4271 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4272 		sbuf_delete(buf);
4273 		return (ENOMEM);
4274 	}
4275 
4276 	sbuf_cat(buf, "\n");
4277 	/* Assuming <= 255 elements in switch */
4278 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4279 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4280 	/* Exclude:
4281 	** Revision -- all elements are revision 1 for now
4282 	*/
4283 	sbuf_printf(buf,
4284 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4285 	    "                |          |          | (uplink)\n");
4286 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4287 		// "%4d (%8s) | %8s   %8s   %#8x",
4288 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4289 		sbuf_cat(buf, " ");
4290 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4291 		    &sw_config->element[i]));
4292 		sbuf_cat(buf, " | ");
4293 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4294 		sbuf_cat(buf, "   ");
4295 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4296 		sbuf_cat(buf, "   ");
4297 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4298 		if (i < sw_config->header.num_reported - 1)
4299 			sbuf_cat(buf, "\n");
4300 	}
4301 	sbuf_delete(nmbuf);
4302 
4303 	error = sbuf_finish(buf);
4304 	if (error)
4305 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4306 
4307 	sbuf_delete(buf);
4308 
4309 	return (error);
4310 }
4311 
4312 static int
4313 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4314 {
4315 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4316 	struct i40e_hw *hw = &pf->hw;
4317 	device_t dev = pf->dev;
4318 	struct sbuf *buf;
4319 	int error = 0;
4320 	enum i40e_status_code status;
4321 	u32 reg;
4322 
4323 	struct i40e_aqc_get_set_rss_key_data key_data;
4324 
4325 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4326 	if (!buf) {
4327 		device_printf(dev, "Could not allocate sbuf for output.\n");
4328 		return (ENOMEM);
4329 	}
4330 
4331 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4332 
4333 	sbuf_cat(buf, "\n");
4334 	if (hw->mac.type == I40E_MAC_X722) {
4335 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4336 		if (status)
4337 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4338 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4339 	} else {
4340 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4341 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4342 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4343 		}
4344 	}
4345 
4346 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4347 
4348 	error = sbuf_finish(buf);
4349 	if (error)
4350 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4351 	sbuf_delete(buf);
4352 
4353 	return (error);
4354 }
4355 
4356 static void
4357 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4358 {
4359 	int i, j, k, width;
4360 	char c;
4361 
4362 	if (length < 1 || buf == NULL) return;
4363 
4364 	int byte_stride = 16;
4365 	int lines = length / byte_stride;
4366 	int rem = length % byte_stride;
4367 	if (rem > 0)
4368 		lines++;
4369 
4370 	for (i = 0; i < lines; i++) {
4371 		width = (rem > 0 && i == lines - 1)
4372 		    ? rem : byte_stride;
4373 
4374 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4375 
4376 		for (j = 0; j < width; j++)
4377 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4378 
4379 		if (width < byte_stride) {
4380 			for (k = 0; k < (byte_stride - width); k++)
4381 				sbuf_printf(sb, "   ");
4382 		}
4383 
4384 		if (!text) {
4385 			sbuf_printf(sb, "\n");
4386 			continue;
4387 		}
4388 
4389 		for (j = 0; j < width; j++) {
4390 			c = (char)buf[i * byte_stride + j];
4391 			if (c < 32 || c > 126)
4392 				sbuf_printf(sb, ".");
4393 			else
4394 				sbuf_printf(sb, "%c", c);
4395 
4396 			if (j == width - 1)
4397 				sbuf_printf(sb, "\n");
4398 		}
4399 	}
4400 }
4401 
4402 static int
4403 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4404 {
4405 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4406 	struct i40e_hw *hw = &pf->hw;
4407 	device_t dev = pf->dev;
4408 	struct sbuf *buf;
4409 	int error = 0;
4410 	enum i40e_status_code status;
4411 	u8 hlut[512];
4412 	u32 reg;
4413 
4414 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4415 	if (!buf) {
4416 		device_printf(dev, "Could not allocate sbuf for output.\n");
4417 		return (ENOMEM);
4418 	}
4419 
4420 	bzero(hlut, sizeof(hlut));
4421 	sbuf_cat(buf, "\n");
4422 	if (hw->mac.type == I40E_MAC_X722) {
4423 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4424 		if (status)
4425 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4426 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4427 	} else {
4428 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4429 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4430 			bcopy(&reg, &hlut[i << 2], 4);
4431 		}
4432 	}
4433 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4434 
4435 	error = sbuf_finish(buf);
4436 	if (error)
4437 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4438 	sbuf_delete(buf);
4439 
4440 	return (error);
4441 }
4442 
4443 static int
4444 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4445 {
4446 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4447 	struct i40e_hw *hw = &pf->hw;
4448 	u64 hena;
4449 
4450 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4451 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4452 
4453 	return sysctl_handle_long(oidp, NULL, hena, req);
4454 }
4455 
4456 /*
4457  * Sysctl to disable firmware's link management
4458  *
4459  * 1 - Disable link management on this port
4460  * 0 - Re-enable link management
4461  *
4462  * On normal NVMs, firmware manages link by default.
4463  */
4464 static int
4465 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4466 {
4467 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4468 	struct i40e_hw *hw = &pf->hw;
4469 	device_t dev = pf->dev;
4470 	int requested_mode = -1;
4471 	enum i40e_status_code status = 0;
4472 	int error = 0;
4473 
4474 	/* Read in new mode */
4475 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4476 	if ((error) || (req->newptr == NULL))
4477 		return (error);
4478 	/* Check for sane value */
4479 	if (requested_mode < 0 || requested_mode > 1) {
4480 		device_printf(dev, "Valid modes are 0 or 1\n");
4481 		return (EINVAL);
4482 	}
4483 
4484 	/* Set new mode */
4485 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4486 	if (status) {
4487 		device_printf(dev,
4488 		    "%s: Error setting new phy debug mode %s,"
4489 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4490 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4491 		return (EIO);
4492 	}
4493 
4494 	return (0);
4495 }
4496 
4497 /*
4498  * Read some diagnostic data from an SFP module
4499  * Bytes 96-99, 102-105 from device address 0xA2
4500  */
4501 static int
4502 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4503 {
4504 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 	device_t dev = pf->dev;
4506 	struct sbuf *sbuf;
4507 	int error = 0;
4508 	u8 output;
4509 
4510 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4511 	if (error) {
4512 		device_printf(dev, "Error reading from i2c\n");
4513 		return (error);
4514 	}
4515 	if (output != 0x3) {
4516 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4517 		return (EIO);
4518 	}
4519 
4520 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4521 	if (!(output & 0x60)) {
4522 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4523 		return (EIO);
4524 	}
4525 
4526 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4527 
4528 	for (u8 offset = 96; offset < 100; offset++) {
4529 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4530 		sbuf_printf(sbuf, "%02X ", output);
4531 	}
4532 	for (u8 offset = 102; offset < 106; offset++) {
4533 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4534 		sbuf_printf(sbuf, "%02X ", output);
4535 	}
4536 
4537 	sbuf_finish(sbuf);
4538 	sbuf_delete(sbuf);
4539 
4540 	return (0);
4541 }
4542 
4543 /*
4544  * Sysctl to read a byte from I2C bus.
4545  *
4546  * Input: 32-bit value:
4547  * 	bits 0-7:   device address (0xA0 or 0xA2)
4548  * 	bits 8-15:  offset (0-255)
4549  *	bits 16-31: unused
4550  * Output: 8-bit value read
4551  */
4552 static int
4553 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4554 {
4555 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4556 	device_t dev = pf->dev;
4557 	int input = -1, error = 0;
4558 	u8 dev_addr, offset, output;
4559 
4560 	/* Read in I2C read parameters */
4561 	error = sysctl_handle_int(oidp, &input, 0, req);
4562 	if ((error) || (req->newptr == NULL))
4563 		return (error);
4564 	/* Validate device address */
4565 	dev_addr = input & 0xFF;
4566 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4567 		return (EINVAL);
4568 	}
4569 	offset = (input >> 8) & 0xFF;
4570 
4571 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4572 	if (error)
4573 		return (error);
4574 
4575 	device_printf(dev, "%02X\n", output);
4576 	return (0);
4577 }
4578 
4579 /*
4580  * Sysctl to write a byte to the I2C bus.
4581  *
4582  * Input: 32-bit value:
4583  * 	bits 0-7:   device address (0xA0 or 0xA2)
4584  * 	bits 8-15:  offset (0-255)
4585  *	bits 16-23: value to write
4586  *	bits 24-31: unused
4587  * Output: 8-bit value written
4588  */
4589 static int
4590 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4591 {
4592 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4593 	device_t dev = pf->dev;
4594 	int input = -1, error = 0;
4595 	u8 dev_addr, offset, value;
4596 
4597 	/* Read in I2C write parameters */
4598 	error = sysctl_handle_int(oidp, &input, 0, req);
4599 	if ((error) || (req->newptr == NULL))
4600 		return (error);
4601 	/* Validate device address */
4602 	dev_addr = input & 0xFF;
4603 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4604 		return (EINVAL);
4605 	}
4606 	offset = (input >> 8) & 0xFF;
4607 	value = (input >> 16) & 0xFF;
4608 
4609 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4610 	if (error)
4611 		return (error);
4612 
4613 	device_printf(dev, "%02X written\n", value);
4614 	return (0);
4615 }
4616 
4617 static int
4618 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4619     u8 bit_pos, int *is_set)
4620 {
4621 	device_t dev = pf->dev;
4622 	struct i40e_hw *hw = &pf->hw;
4623 	enum i40e_status_code status;
4624 
4625 	status = i40e_aq_get_phy_capabilities(hw,
4626 	    FALSE, FALSE, abilities, NULL);
4627 	if (status) {
4628 		device_printf(dev,
4629 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4630 		    __func__, i40e_stat_str(hw, status),
4631 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4632 		return (EIO);
4633 	}
4634 
4635 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4636 	return (0);
4637 }
4638 
4639 static int
4640 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4641     u8 bit_pos, int set)
4642 {
4643 	device_t dev = pf->dev;
4644 	struct i40e_hw *hw = &pf->hw;
4645 	struct i40e_aq_set_phy_config config;
4646 	enum i40e_status_code status;
4647 
4648 	/* Set new PHY config */
4649 	memset(&config, 0, sizeof(config));
4650 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4651 	if (set)
4652 		config.fec_config |= bit_pos;
4653 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4654 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4655 		config.phy_type = abilities->phy_type;
4656 		config.phy_type_ext = abilities->phy_type_ext;
4657 		config.link_speed = abilities->link_speed;
4658 		config.eee_capability = abilities->eee_capability;
4659 		config.eeer = abilities->eeer_val;
4660 		config.low_power_ctrl = abilities->d3_lpan;
4661 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4662 
4663 		if (status) {
4664 			device_printf(dev,
4665 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4666 			    __func__, i40e_stat_str(hw, status),
4667 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4668 			return (EIO);
4669 		}
4670 	}
4671 
4672 	return (0);
4673 }
4674 
4675 static int
4676 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4677 {
4678 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4679 	int mode, error = 0;
4680 
4681 	struct i40e_aq_get_phy_abilities_resp abilities;
4682 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4683 	if (error)
4684 		return (error);
4685 	/* Read in new mode */
4686 	error = sysctl_handle_int(oidp, &mode, 0, req);
4687 	if ((error) || (req->newptr == NULL))
4688 		return (error);
4689 
4690 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4691 }
4692 
4693 static int
4694 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4695 {
4696 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4697 	int mode, error = 0;
4698 
4699 	struct i40e_aq_get_phy_abilities_resp abilities;
4700 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4701 	if (error)
4702 		return (error);
4703 	/* Read in new mode */
4704 	error = sysctl_handle_int(oidp, &mode, 0, req);
4705 	if ((error) || (req->newptr == NULL))
4706 		return (error);
4707 
4708 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4709 }
4710 
4711 static int
4712 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4713 {
4714 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4715 	int mode, error = 0;
4716 
4717 	struct i40e_aq_get_phy_abilities_resp abilities;
4718 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4719 	if (error)
4720 		return (error);
4721 	/* Read in new mode */
4722 	error = sysctl_handle_int(oidp, &mode, 0, req);
4723 	if ((error) || (req->newptr == NULL))
4724 		return (error);
4725 
4726 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4727 }
4728 
4729 static int
4730 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4731 {
4732 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4733 	int mode, error = 0;
4734 
4735 	struct i40e_aq_get_phy_abilities_resp abilities;
4736 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4737 	if (error)
4738 		return (error);
4739 	/* Read in new mode */
4740 	error = sysctl_handle_int(oidp, &mode, 0, req);
4741 	if ((error) || (req->newptr == NULL))
4742 		return (error);
4743 
4744 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4745 }
4746 
4747 static int
4748 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4749 {
4750 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4751 	int mode, error = 0;
4752 
4753 	struct i40e_aq_get_phy_abilities_resp abilities;
4754 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4755 	if (error)
4756 		return (error);
4757 	/* Read in new mode */
4758 	error = sysctl_handle_int(oidp, &mode, 0, req);
4759 	if ((error) || (req->newptr == NULL))
4760 		return (error);
4761 
4762 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4763 }
4764 
4765 static int
4766 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4767 {
4768 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4769 	struct i40e_hw *hw = &pf->hw;
4770 	device_t dev = pf->dev;
4771 	struct sbuf *buf;
4772 	int error = 0;
4773 	enum i40e_status_code status;
4774 
4775 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4776 	if (!buf) {
4777 		device_printf(dev, "Could not allocate sbuf for output.\n");
4778 		return (ENOMEM);
4779 	}
4780 
4781 	u8 *final_buff;
4782 	/* This amount is only necessary if reading the entire cluster into memory */
4783 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4784 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4785 	if (final_buff == NULL) {
4786 		device_printf(dev, "Could not allocate memory for output.\n");
4787 		goto out;
4788 	}
4789 	int final_buff_len = 0;
4790 
4791 	u8 cluster_id = 1;
4792 	bool more = true;
4793 
4794 	u8 dump_buf[4096];
4795 	u16 curr_buff_size = 4096;
4796 	u8 curr_next_table = 0;
4797 	u32 curr_next_index = 0;
4798 
4799 	u16 ret_buff_size;
4800 	u8 ret_next_table;
4801 	u32 ret_next_index;
4802 
4803 	sbuf_cat(buf, "\n");
4804 
4805 	while (more) {
4806 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4807 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4808 		if (status) {
4809 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4810 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4811 			goto free_out;
4812 		}
4813 
4814 		/* copy info out of temp buffer */
4815 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4816 		final_buff_len += ret_buff_size;
4817 
4818 		if (ret_next_table != curr_next_table) {
4819 			/* We're done with the current table; we can dump out read data. */
4820 			sbuf_printf(buf, "%d:", curr_next_table);
4821 			int bytes_printed = 0;
4822 			while (bytes_printed <= final_buff_len) {
4823 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4824 				bytes_printed += 16;
4825 			}
4826 				sbuf_cat(buf, "\n");
4827 
4828 			/* The entire cluster has been read; we're finished */
4829 			if (ret_next_table == 0xFF)
4830 				break;
4831 
4832 			/* Otherwise clear the output buffer and continue reading */
4833 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4834 			final_buff_len = 0;
4835 		}
4836 
4837 		if (ret_next_index == 0xFFFFFFFF)
4838 			ret_next_index = 0;
4839 
4840 		bzero(dump_buf, sizeof(dump_buf));
4841 		curr_next_table = ret_next_table;
4842 		curr_next_index = ret_next_index;
4843 	}
4844 
4845 free_out:
4846 	free(final_buff, M_DEVBUF);
4847 out:
4848 	error = sbuf_finish(buf);
4849 	if (error)
4850 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4851 	sbuf_delete(buf);
4852 
4853 	return (error);
4854 }
4855 
4856 static int
4857 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4858 {
4859 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4860 	struct i40e_hw *hw = &pf->hw;
4861 	device_t dev = pf->dev;
4862 	int error = 0;
4863 	int state, new_state;
4864 	enum i40e_status_code status;
4865 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4866 
4867 	/* Read in new mode */
4868 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4869 	if ((error) || (req->newptr == NULL))
4870 		return (error);
4871 
4872 	/* Already in requested state */
4873 	if (new_state == state)
4874 		return (error);
4875 
4876 	if (new_state == 0) {
4877 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4878 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4879 			return (EINVAL);
4880 		}
4881 
4882 		if (pf->hw.aq.api_maj_ver < 1 ||
4883 		    (pf->hw.aq.api_maj_ver == 1 &&
4884 		    pf->hw.aq.api_min_ver < 7)) {
4885 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4886 			return (EINVAL);
4887 		}
4888 
4889 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4890 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4891 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4892 	} else {
4893 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4894 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4895 			device_printf(dev, "FW LLDP agent is already running\n");
4896 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4897 	}
4898 
4899 	return (0);
4900 }
4901 
4902 /*
4903  * Get FW LLDP Agent status
4904  */
4905 int
4906 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4907 {
4908 	enum i40e_status_code ret = I40E_SUCCESS;
4909 	struct i40e_lldp_variables lldp_cfg;
4910 	struct i40e_hw *hw = &pf->hw;
4911 	u8 adminstatus = 0;
4912 
4913 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4914 	if (ret)
4915 		return ret;
4916 
4917 	/* Get the LLDP AdminStatus for the current port */
4918 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4919 	adminstatus &= 0xf;
4920 
4921 	/* Check if LLDP agent is disabled */
4922 	if (!adminstatus) {
4923 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4924 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4925 	} else
4926 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4927 
4928 	return (0);
4929 }
4930 
4931 int
4932 ixl_attach_get_link_status(struct ixl_pf *pf)
4933 {
4934 	struct i40e_hw *hw = &pf->hw;
4935 	device_t dev = pf->dev;
4936 	int error = 0;
4937 
4938 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4939 	    (hw->aq.fw_maj_ver < 4)) {
4940 		i40e_msec_delay(75);
4941 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4942 		if (error) {
4943 			device_printf(dev, "link restart failed, aq_err=%d\n",
4944 			    pf->hw.aq.asq_last_status);
4945 			return error;
4946 		}
4947 	}
4948 
4949 	/* Determine link state */
4950 	hw->phy.get_link_info = TRUE;
4951 	i40e_get_link_status(hw, &pf->link_up);
4952 	return (0);
4953 }
4954 
4955 static int
4956 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4957 {
4958 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4959 	int requested = 0, error = 0;
4960 
4961 	/* Read in new mode */
4962 	error = sysctl_handle_int(oidp, &requested, 0, req);
4963 	if ((error) || (req->newptr == NULL))
4964 		return (error);
4965 
4966 	/* Initiate the PF reset later in the admin task */
4967 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4968 
4969 	return (error);
4970 }
4971 
4972 static int
4973 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4974 {
4975 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4976 	struct i40e_hw *hw = &pf->hw;
4977 	int requested = 0, error = 0;
4978 
4979 	/* Read in new mode */
4980 	error = sysctl_handle_int(oidp, &requested, 0, req);
4981 	if ((error) || (req->newptr == NULL))
4982 		return (error);
4983 
4984 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4985 
4986 	return (error);
4987 }
4988 
4989 static int
4990 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4991 {
4992 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4993 	struct i40e_hw *hw = &pf->hw;
4994 	int requested = 0, error = 0;
4995 
4996 	/* Read in new mode */
4997 	error = sysctl_handle_int(oidp, &requested, 0, req);
4998 	if ((error) || (req->newptr == NULL))
4999 		return (error);
5000 
5001 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5002 
5003 	return (error);
5004 }
5005 
5006 static int
5007 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5008 {
5009 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5010 	struct i40e_hw *hw = &pf->hw;
5011 	int requested = 0, error = 0;
5012 
5013 	/* Read in new mode */
5014 	error = sysctl_handle_int(oidp, &requested, 0, req);
5015 	if ((error) || (req->newptr == NULL))
5016 		return (error);
5017 
5018 	/* TODO: Find out how to bypass this */
5019 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5020 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5021 		error = EINVAL;
5022 	} else
5023 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5024 
5025 	return (error);
5026 }
5027 
5028 /*
5029  * Print out mapping of TX queue indexes and Rx queue indexes
5030  * to MSI-X vectors.
5031  */
5032 static int
5033 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5034 {
5035 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5036 	struct ixl_vsi *vsi = &pf->vsi;
5037 	device_t dev = pf->dev;
5038 	struct sbuf *buf;
5039 	int error = 0;
5040 
5041 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5042 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5043 
5044 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5045 	if (!buf) {
5046 		device_printf(dev, "Could not allocate sbuf for output.\n");
5047 		return (ENOMEM);
5048 	}
5049 
5050 	sbuf_cat(buf, "\n");
5051 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5052 		rx_que = &vsi->rx_queues[i];
5053 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5054 	}
5055 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5056 		tx_que = &vsi->tx_queues[i];
5057 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5058 	}
5059 
5060 	error = sbuf_finish(buf);
5061 	if (error)
5062 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5063 	sbuf_delete(buf);
5064 
5065 	return (error);
5066 }
5067