xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 4e462178745853ecc014c13f82f89cfe39b83e9c)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 /*********************************************************************
641  * 	Filter Routines
642  *
643  *	Routines for multicast and vlan filter management.
644  *
645  *********************************************************************/
646 void
647 ixl_add_multi(struct ixl_vsi *vsi)
648 {
649 	struct	ifmultiaddr	*ifma;
650 	struct ifnet		*ifp = vsi->ifp;
651 	struct i40e_hw		*hw = vsi->hw;
652 	int			mcnt = 0, flags;
653 
654 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
655 
656 	if_maddr_rlock(ifp);
657 	/*
658 	** First just get a count, to decide if we
659 	** we simply use multicast promiscuous.
660 	*/
661 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
662 		if (ifma->ifma_addr->sa_family != AF_LINK)
663 			continue;
664 		mcnt++;
665 	}
666 	if_maddr_runlock(ifp);
667 
668 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
669 		/* delete existing MC filters */
670 		ixl_del_hw_filters(vsi, mcnt);
671 		i40e_aq_set_vsi_multicast_promiscuous(hw,
672 		    vsi->seid, TRUE, NULL);
673 		return;
674 	}
675 
676 	mcnt = 0;
677 	if_maddr_rlock(ifp);
678 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
679 		if (ifma->ifma_addr->sa_family != AF_LINK)
680 			continue;
681 		ixl_add_mc_filter(vsi,
682 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
683 		mcnt++;
684 	}
685 	if_maddr_runlock(ifp);
686 	if (mcnt > 0) {
687 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
688 		ixl_add_hw_filters(vsi, flags, mcnt);
689 	}
690 
691 	IOCTL_DEBUGOUT("ixl_add_multi: end");
692 }
693 
694 int
695 ixl_del_multi(struct ixl_vsi *vsi)
696 {
697 	struct ifnet		*ifp = vsi->ifp;
698 	struct ifmultiaddr	*ifma;
699 	struct ixl_mac_filter	*f;
700 	int			mcnt = 0;
701 	bool		match = FALSE;
702 
703 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
704 
705 	/* Search for removed multicast addresses */
706 	if_maddr_rlock(ifp);
707 	SLIST_FOREACH(f, &vsi->ftl, next) {
708 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
709 			match = FALSE;
710 			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
711 				if (ifma->ifma_addr->sa_family != AF_LINK)
712 					continue;
713 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
714 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
715 					match = TRUE;
716 					break;
717 				}
718 			}
719 			if (match == FALSE) {
720 				f->flags |= IXL_FILTER_DEL;
721 				mcnt++;
722 			}
723 		}
724 	}
725 	if_maddr_runlock(ifp);
726 
727 	if (mcnt > 0)
728 		ixl_del_hw_filters(vsi, mcnt);
729 
730 	return (mcnt);
731 }
732 
733 void
734 ixl_link_up_msg(struct ixl_pf *pf)
735 {
736 	struct i40e_hw *hw = &pf->hw;
737 	struct ifnet *ifp = pf->vsi.ifp;
738 	char *req_fec_string, *neg_fec_string;
739 	u8 fec_abilities;
740 
741 	fec_abilities = hw->phy.link_info.req_fec_info;
742 	/* If both RS and KR are requested, only show RS */
743 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
744 		req_fec_string = ixl_fec_string[0];
745 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
746 		req_fec_string = ixl_fec_string[1];
747 	else
748 		req_fec_string = ixl_fec_string[2];
749 
750 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
751 		neg_fec_string = ixl_fec_string[0];
752 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
753 		neg_fec_string = ixl_fec_string[1];
754 	else
755 		neg_fec_string = ixl_fec_string[2];
756 
757 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
758 	    ifp->if_xname,
759 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
760 	    req_fec_string, neg_fec_string,
761 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
762 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
763 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
764 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
765 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
766 		ixl_fc_string[1] : ixl_fc_string[0]);
767 }
768 
769 /*
770  * Configure admin queue/misc interrupt cause registers in hardware.
771  */
772 void
773 ixl_configure_intr0_msix(struct ixl_pf *pf)
774 {
775 	struct i40e_hw *hw = &pf->hw;
776 	u32 reg;
777 
778 	/* First set up the adminq - vector 0 */
779 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
780 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
781 
782 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
783 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
784 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
785 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
786 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
787 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
788 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
789 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
790 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
791 
792 	/*
793 	 * 0x7FF is the end of the queue list.
794 	 * This means we won't use MSI-X vector 0 for a queue interrupt
795 	 * in MSI-X mode.
796 	 */
797 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
798 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
799 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
800 
801 	wr32(hw, I40E_PFINT_DYN_CTL0,
802 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
803 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
804 
805 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
806 }
807 
808 /*
809  * Configure queue interrupt cause registers in hardware.
810  *
811  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
812  */
813 void
814 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
815 {
816 	struct i40e_hw *hw = &pf->hw;
817 	struct ixl_vsi *vsi = &pf->vsi;
818 	u32		reg;
819 	u16		vector = 1;
820 
821 	// TODO: See if max is really necessary
822 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
823 		/* Make sure interrupt is disabled */
824 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
825 		/* Set linked list head to point to corresponding RX queue
826 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
827 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
828 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
829 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
830 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
831 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
832 
833 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
834 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
835 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
836 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
837 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
838 		wr32(hw, I40E_QINT_RQCTL(i), reg);
839 
840 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
841 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
842 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
843 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
844 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
845 		wr32(hw, I40E_QINT_TQCTL(i), reg);
846 	}
847 }
848 
849 /*
850  * Configure for single interrupt vector operation
851  */
852 void
853 ixl_configure_legacy(struct ixl_pf *pf)
854 {
855 	struct i40e_hw	*hw = &pf->hw;
856 	struct ixl_vsi	*vsi = &pf->vsi;
857 	u32 reg;
858 
859 // TODO: Fix
860 #if 0
861 	/* Configure ITR */
862 	vsi->tx_itr_setting = pf->tx_itr;
863 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
864 	    vsi->tx_itr_setting);
865 	txr->itr = vsi->tx_itr_setting;
866 
867 	vsi->rx_itr_setting = pf->rx_itr;
868 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
869 	    vsi->rx_itr_setting);
870 	rxr->itr = vsi->rx_itr_setting;
871 	/* XXX: Assuming only 1 queue in single interrupt mode */
872 #endif
873 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
874 
875 	/* Setup "other" causes */
876 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
877 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
878 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
879 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
880 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
881 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
882 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
883 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
884 	    ;
885 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
886 
887 	/* No ITR for non-queue interrupts */
888 	wr32(hw, I40E_PFINT_STAT_CTL0,
889 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
890 
891 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
892 	wr32(hw, I40E_PFINT_LNKLST0, 0);
893 
894 	/* Associate the queue pair to the vector and enable the q int */
895 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
896 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
897 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
898 	wr32(hw, I40E_QINT_RQCTL(0), reg);
899 
900 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
901 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
902 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
903 	wr32(hw, I40E_QINT_TQCTL(0), reg);
904 }
905 
906 void
907 ixl_free_pci_resources(struct ixl_pf *pf)
908 {
909 	struct ixl_vsi		*vsi = &pf->vsi;
910 	device_t		dev = iflib_get_dev(vsi->ctx);
911 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
912 
913 	/* We may get here before stations are set up */
914 	if (rx_que == NULL)
915 		goto early;
916 
917 	/*
918 	**  Release all MSI-X VSI resources:
919 	*/
920 	iflib_irq_free(vsi->ctx, &vsi->irq);
921 
922 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
923 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
924 early:
925 	if (pf->pci_mem != NULL)
926 		bus_release_resource(dev, SYS_RES_MEMORY,
927 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
928 }
929 
930 void
931 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
932 {
933 	/* Display supported media types */
934 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
947 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
948 
949 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
950 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
951 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
952 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
953 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
954 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
955 
956 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
957 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
958 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
959 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
960 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
961 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
962 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
963 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
964 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
965 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
966 
967 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
971 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
972 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
973 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
977 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
978 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
979 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
980 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
981 
982 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
984 
985 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
986 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
987 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
988 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
989 
990 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
991 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
992 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
993 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
994 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
995 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
996 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
997 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
998 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
999 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1000 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1001 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1002 }
1003 
1004 /*********************************************************************
1005  *
1006  *  Setup networking device structure and register an interface.
1007  *
1008  **********************************************************************/
1009 int
1010 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1011 {
1012 	struct ixl_vsi *vsi = &pf->vsi;
1013 	if_ctx_t ctx = vsi->ctx;
1014 	struct i40e_hw *hw = &pf->hw;
1015 	struct ifnet *ifp = iflib_get_ifp(ctx);
1016 	struct i40e_aq_get_phy_abilities_resp abilities;
1017 	enum i40e_status_code aq_error = 0;
1018 
1019 	INIT_DBG_DEV(dev, "begin");
1020 
1021 	vsi->shared->isc_max_frame_size =
1022 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1023 	    + ETHER_VLAN_ENCAP_LEN;
1024 
1025 	aq_error = i40e_aq_get_phy_capabilities(hw,
1026 	    FALSE, TRUE, &abilities, NULL);
1027 	/* May need delay to detect fiber correctly */
1028 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1029 		/* TODO: Maybe just retry this in a task... */
1030 		i40e_msec_delay(200);
1031 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1032 		    TRUE, &abilities, NULL);
1033 	}
1034 	if (aq_error) {
1035 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1036 			device_printf(dev, "Unknown PHY type detected!\n");
1037 		else
1038 			device_printf(dev,
1039 			    "Error getting supported media types, err %d,"
1040 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1041 	} else {
1042 		pf->supported_speeds = abilities.link_speed;
1043 #if __FreeBSD_version >= 1100000
1044 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1045 #else
1046 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1047 #endif
1048 
1049 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1050 	}
1051 
1052 	/* Use autoselect media by default */
1053 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1054 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1055 
1056 	return (0);
1057 }
1058 
1059 /*
1060  * Input: bitmap of enum i40e_aq_link_speed
1061  */
1062 u64
1063 ixl_max_aq_speed_to_value(u8 link_speeds)
1064 {
1065 	if (link_speeds & I40E_LINK_SPEED_40GB)
1066 		return IF_Gbps(40);
1067 	if (link_speeds & I40E_LINK_SPEED_25GB)
1068 		return IF_Gbps(25);
1069 	if (link_speeds & I40E_LINK_SPEED_20GB)
1070 		return IF_Gbps(20);
1071 	if (link_speeds & I40E_LINK_SPEED_10GB)
1072 		return IF_Gbps(10);
1073 	if (link_speeds & I40E_LINK_SPEED_1GB)
1074 		return IF_Gbps(1);
1075 	if (link_speeds & I40E_LINK_SPEED_100MB)
1076 		return IF_Mbps(100);
1077 	else
1078 		/* Minimum supported link speed */
1079 		return IF_Mbps(100);
1080 }
1081 
1082 /*
1083 ** Run when the Admin Queue gets a link state change interrupt.
1084 */
1085 void
1086 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1087 {
1088 	struct i40e_hw *hw = &pf->hw;
1089 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1090 	struct i40e_aqc_get_link_status *status =
1091 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1092 
1093 	/* Request link status from adapter */
1094 	hw->phy.get_link_info = TRUE;
1095 	i40e_get_link_status(hw, &pf->link_up);
1096 
1097 	/* Print out message if an unqualified module is found */
1098 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1099 	    (pf->advertised_speed) &&
1100 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1101 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1102 		device_printf(dev, "Link failed because "
1103 		    "an unqualified module was detected!\n");
1104 
1105 	/* OS link info is updated elsewhere */
1106 }
1107 
1108 /*********************************************************************
1109  *
1110  *  Get Firmware Switch configuration
1111  *	- this will need to be more robust when more complex
1112  *	  switch configurations are enabled.
1113  *
1114  **********************************************************************/
1115 int
1116 ixl_switch_config(struct ixl_pf *pf)
1117 {
1118 	struct i40e_hw	*hw = &pf->hw;
1119 	struct ixl_vsi	*vsi = &pf->vsi;
1120 	device_t 	dev = iflib_get_dev(vsi->ctx);
1121 	struct i40e_aqc_get_switch_config_resp *sw_config;
1122 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1123 	int	ret;
1124 	u16	next = 0;
1125 
1126 	memset(&aq_buf, 0, sizeof(aq_buf));
1127 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1128 	ret = i40e_aq_get_switch_config(hw, sw_config,
1129 	    sizeof(aq_buf), &next, NULL);
1130 	if (ret) {
1131 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1132 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1133 		return (ret);
1134 	}
1135 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1136 		device_printf(dev,
1137 		    "Switch config: header reported: %d in structure, %d total\n",
1138 		    sw_config->header.num_reported, sw_config->header.num_total);
1139 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1140 			device_printf(dev,
1141 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1142 			    sw_config->element[i].element_type,
1143 			    sw_config->element[i].seid,
1144 			    sw_config->element[i].uplink_seid,
1145 			    sw_config->element[i].downlink_seid);
1146 		}
1147 	}
1148 	/* Simplified due to a single VSI */
1149 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1150 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1151 	vsi->seid = sw_config->element[0].seid;
1152 	return (ret);
1153 }
1154 
1155 /*********************************************************************
1156  *
1157  *  Initialize the VSI:  this handles contexts, which means things
1158  *  			 like the number of descriptors, buffer size,
1159  *			 plus we init the rings thru this function.
1160  *
1161  **********************************************************************/
1162 int
1163 ixl_initialize_vsi(struct ixl_vsi *vsi)
1164 {
1165 	struct ixl_pf *pf = vsi->back;
1166 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1167 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1168 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1169 	device_t		dev = iflib_get_dev(vsi->ctx);
1170 	struct i40e_hw		*hw = vsi->hw;
1171 	struct i40e_vsi_context	ctxt;
1172 	int 			tc_queues;
1173 	int			err = 0;
1174 
1175 	memset(&ctxt, 0, sizeof(ctxt));
1176 	ctxt.seid = vsi->seid;
1177 	if (pf->veb_seid != 0)
1178 		ctxt.uplink_seid = pf->veb_seid;
1179 	ctxt.pf_num = hw->pf_id;
1180 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1181 	if (err) {
1182 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1183 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1184 		return (err);
1185 	}
1186 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1187 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1188 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1189 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1190 	    ctxt.uplink_seid, ctxt.vsi_number,
1191 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1192 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1193 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1194 	/*
1195 	** Set the queue and traffic class bits
1196 	**  - when multiple traffic classes are supported
1197 	**    this will need to be more robust.
1198 	*/
1199 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1200 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1201 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1202 	ctxt.info.queue_mapping[0] = 0;
1203 	/*
1204 	 * This VSI will only use traffic class 0; start traffic class 0's
1205 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1206 	 * the driver may not use all of them).
1207 	 */
1208 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1209 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1210 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1211 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1212 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1213 
1214 	/* Set VLAN receive stripping mode */
1215 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1216 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1217 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1218 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1219 	else
1220 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1221 
1222 #ifdef IXL_IW
1223 	/* Set TCP Enable for iWARP capable VSI */
1224 	if (ixl_enable_iwarp && pf->iw_enabled) {
1225 		ctxt.info.valid_sections |=
1226 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1227 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1228 	}
1229 #endif
1230 	/* Save VSI number and info for use later */
1231 	vsi->vsi_num = ctxt.vsi_number;
1232 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1233 
1234 	/* Reset VSI statistics */
1235 	ixl_vsi_reset_stats(vsi);
1236 	vsi->hw_filters_add = 0;
1237 	vsi->hw_filters_del = 0;
1238 
1239 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1240 
1241 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1242 	if (err) {
1243 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1244 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1245 		return (err);
1246 	}
1247 
1248 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1249 		struct tx_ring		*txr = &tx_que->txr;
1250 		struct i40e_hmc_obj_txq tctx;
1251 		u32			txctl;
1252 
1253 		/* Setup the HMC TX Context  */
1254 		bzero(&tctx, sizeof(tctx));
1255 		tctx.new_context = 1;
1256 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1257 		tctx.qlen = scctx->isc_ntxd[0];
1258 		tctx.fc_ena = 0;	/* Disable FCoE */
1259 		/*
1260 		 * This value needs to pulled from the VSI that this queue
1261 		 * is assigned to. Index into array is traffic class.
1262 		 */
1263 		tctx.rdylist = vsi->info.qs_handle[0];
1264 		/*
1265 		 * Set these to enable Head Writeback
1266 		 * - Address is last entry in TX ring (reserved for HWB index)
1267 		 * Leave these as 0 for Descriptor Writeback
1268 		 */
1269 		if (vsi->enable_head_writeback) {
1270 			tctx.head_wb_ena = 1;
1271 			tctx.head_wb_addr = txr->tx_paddr +
1272 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1273 		} else {
1274 			tctx.head_wb_ena = 0;
1275 			tctx.head_wb_addr = 0;
1276 		}
1277 		tctx.rdylist_act = 0;
1278 		err = i40e_clear_lan_tx_queue_context(hw, i);
1279 		if (err) {
1280 			device_printf(dev, "Unable to clear TX context\n");
1281 			break;
1282 		}
1283 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1284 		if (err) {
1285 			device_printf(dev, "Unable to set TX context\n");
1286 			break;
1287 		}
1288 		/* Associate the ring with this PF */
1289 		txctl = I40E_QTX_CTL_PF_QUEUE;
1290 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1291 		    I40E_QTX_CTL_PF_INDX_MASK);
1292 		wr32(hw, I40E_QTX_CTL(i), txctl);
1293 		ixl_flush(hw);
1294 
1295 		/* Do ring (re)init */
1296 		ixl_init_tx_ring(vsi, tx_que);
1297 	}
1298 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1299 		struct rx_ring 		*rxr = &rx_que->rxr;
1300 		struct i40e_hmc_obj_rxq rctx;
1301 
1302 		/* Next setup the HMC RX Context  */
1303 		if (scctx->isc_max_frame_size <= MCLBYTES)
1304 			rxr->mbuf_sz = MCLBYTES;
1305 		else
1306 			rxr->mbuf_sz = MJUMPAGESIZE;
1307 
1308 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1309 
1310 		/* Set up an RX context for the HMC */
1311 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1312 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1313 		/* ignore header split for now */
1314 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1315 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1316 		    scctx->isc_max_frame_size : max_rxmax;
1317 		rctx.dtype = 0;
1318 		rctx.dsize = 1;		/* do 32byte descriptors */
1319 		rctx.hsplit_0 = 0;	/* no header split */
1320 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1321 		rctx.qlen = scctx->isc_nrxd[0];
1322 		rctx.tphrdesc_ena = 1;
1323 		rctx.tphwdesc_ena = 1;
1324 		rctx.tphdata_ena = 0;	/* Header Split related */
1325 		rctx.tphhead_ena = 0;	/* Header Split related */
1326 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1327 		rctx.crcstrip = 1;
1328 		rctx.l2tsel = 1;
1329 		rctx.showiv = 1;	/* Strip inner VLAN header */
1330 		rctx.fc_ena = 0;	/* Disable FCoE */
1331 		rctx.prefena = 1;	/* Prefetch descriptors */
1332 
1333 		err = i40e_clear_lan_rx_queue_context(hw, i);
1334 		if (err) {
1335 			device_printf(dev,
1336 			    "Unable to clear RX context %d\n", i);
1337 			break;
1338 		}
1339 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1340 		if (err) {
1341 			device_printf(dev, "Unable to set RX context %d\n", i);
1342 			break;
1343 		}
1344 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1345 	}
1346 	return (err);
1347 }
1348 
1349 void
1350 ixl_free_mac_filters(struct ixl_vsi *vsi)
1351 {
1352 	struct ixl_mac_filter *f;
1353 
1354 	while (!SLIST_EMPTY(&vsi->ftl)) {
1355 		f = SLIST_FIRST(&vsi->ftl);
1356 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1357 		free(f, M_DEVBUF);
1358 	}
1359 }
1360 
1361 /*
1362 ** Provide a update to the queue RX
1363 ** interrupt moderation value.
1364 */
1365 void
1366 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1367 {
1368 	struct ixl_vsi	*vsi = que->vsi;
1369 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1370 	struct i40e_hw	*hw = vsi->hw;
1371 	struct rx_ring	*rxr = &que->rxr;
1372 	u16		rx_itr;
1373 	u16		rx_latency = 0;
1374 	int		rx_bytes;
1375 
1376 	/* Idle, do nothing */
1377 	if (rxr->bytes == 0)
1378 		return;
1379 
1380 	if (pf->dynamic_rx_itr) {
1381 		rx_bytes = rxr->bytes/rxr->itr;
1382 		rx_itr = rxr->itr;
1383 
1384 		/* Adjust latency range */
1385 		switch (rxr->latency) {
1386 		case IXL_LOW_LATENCY:
1387 			if (rx_bytes > 10) {
1388 				rx_latency = IXL_AVE_LATENCY;
1389 				rx_itr = IXL_ITR_20K;
1390 			}
1391 			break;
1392 		case IXL_AVE_LATENCY:
1393 			if (rx_bytes > 20) {
1394 				rx_latency = IXL_BULK_LATENCY;
1395 				rx_itr = IXL_ITR_8K;
1396 			} else if (rx_bytes <= 10) {
1397 				rx_latency = IXL_LOW_LATENCY;
1398 				rx_itr = IXL_ITR_100K;
1399 			}
1400 			break;
1401 		case IXL_BULK_LATENCY:
1402 			if (rx_bytes <= 20) {
1403 				rx_latency = IXL_AVE_LATENCY;
1404 				rx_itr = IXL_ITR_20K;
1405 			}
1406 			break;
1407        		 }
1408 
1409 		rxr->latency = rx_latency;
1410 
1411 		if (rx_itr != rxr->itr) {
1412 			/* do an exponential smoothing */
1413 			rx_itr = (10 * rx_itr * rxr->itr) /
1414 			    ((9 * rx_itr) + rxr->itr);
1415 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1416 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1417 			    rxr->me), rxr->itr);
1418 		}
1419 	} else { /* We may have have toggled to non-dynamic */
1420 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1421 			vsi->rx_itr_setting = pf->rx_itr;
1422 		/* Update the hardware if needed */
1423 		if (rxr->itr != vsi->rx_itr_setting) {
1424 			rxr->itr = vsi->rx_itr_setting;
1425 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1426 			    rxr->me), rxr->itr);
1427 		}
1428 	}
1429 	rxr->bytes = 0;
1430 	rxr->packets = 0;
1431 }
1432 
1433 
1434 /*
1435 ** Provide a update to the queue TX
1436 ** interrupt moderation value.
1437 */
1438 void
1439 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1440 {
1441 	struct ixl_vsi	*vsi = que->vsi;
1442 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1443 	struct i40e_hw	*hw = vsi->hw;
1444 	struct tx_ring	*txr = &que->txr;
1445 	u16		tx_itr;
1446 	u16		tx_latency = 0;
1447 	int		tx_bytes;
1448 
1449 
1450 	/* Idle, do nothing */
1451 	if (txr->bytes == 0)
1452 		return;
1453 
1454 	if (pf->dynamic_tx_itr) {
1455 		tx_bytes = txr->bytes/txr->itr;
1456 		tx_itr = txr->itr;
1457 
1458 		switch (txr->latency) {
1459 		case IXL_LOW_LATENCY:
1460 			if (tx_bytes > 10) {
1461 				tx_latency = IXL_AVE_LATENCY;
1462 				tx_itr = IXL_ITR_20K;
1463 			}
1464 			break;
1465 		case IXL_AVE_LATENCY:
1466 			if (tx_bytes > 20) {
1467 				tx_latency = IXL_BULK_LATENCY;
1468 				tx_itr = IXL_ITR_8K;
1469 			} else if (tx_bytes <= 10) {
1470 				tx_latency = IXL_LOW_LATENCY;
1471 				tx_itr = IXL_ITR_100K;
1472 			}
1473 			break;
1474 		case IXL_BULK_LATENCY:
1475 			if (tx_bytes <= 20) {
1476 				tx_latency = IXL_AVE_LATENCY;
1477 				tx_itr = IXL_ITR_20K;
1478 			}
1479 			break;
1480 		}
1481 
1482 		txr->latency = tx_latency;
1483 
1484 		if (tx_itr != txr->itr) {
1485        	         /* do an exponential smoothing */
1486 			tx_itr = (10 * tx_itr * txr->itr) /
1487 			    ((9 * tx_itr) + txr->itr);
1488 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1489 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1490 			    txr->me), txr->itr);
1491 		}
1492 
1493 	} else { /* We may have have toggled to non-dynamic */
1494 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1495 			vsi->tx_itr_setting = pf->tx_itr;
1496 		/* Update the hardware if needed */
1497 		if (txr->itr != vsi->tx_itr_setting) {
1498 			txr->itr = vsi->tx_itr_setting;
1499 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1500 			    txr->me), txr->itr);
1501 		}
1502 	}
1503 	txr->bytes = 0;
1504 	txr->packets = 0;
1505 	return;
1506 }
1507 
1508 #ifdef IXL_DEBUG
1509 /**
1510  * ixl_sysctl_qtx_tail_handler
1511  * Retrieves I40E_QTX_TAIL value from hardware
1512  * for a sysctl.
1513  */
1514 int
1515 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1516 {
1517 	struct ixl_tx_queue *tx_que;
1518 	int error;
1519 	u32 val;
1520 
1521 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1522 	if (!tx_que) return 0;
1523 
1524 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1525 	error = sysctl_handle_int(oidp, &val, 0, req);
1526 	if (error || !req->newptr)
1527 		return error;
1528 	return (0);
1529 }
1530 
1531 /**
1532  * ixl_sysctl_qrx_tail_handler
1533  * Retrieves I40E_QRX_TAIL value from hardware
1534  * for a sysctl.
1535  */
1536 int
1537 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1538 {
1539 	struct ixl_rx_queue *rx_que;
1540 	int error;
1541 	u32 val;
1542 
1543 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1544 	if (!rx_que) return 0;
1545 
1546 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1547 	error = sysctl_handle_int(oidp, &val, 0, req);
1548 	if (error || !req->newptr)
1549 		return error;
1550 	return (0);
1551 }
1552 #endif
1553 
1554 /*
1555  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1556  * Writes to the ITR registers immediately.
1557  */
1558 static int
1559 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1560 {
1561 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1562 	device_t dev = pf->dev;
1563 	int error = 0;
1564 	int requested_tx_itr;
1565 
1566 	requested_tx_itr = pf->tx_itr;
1567 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1568 	if ((error) || (req->newptr == NULL))
1569 		return (error);
1570 	if (pf->dynamic_tx_itr) {
1571 		device_printf(dev,
1572 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1573 		    return (EINVAL);
1574 	}
1575 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1576 		device_printf(dev,
1577 		    "Invalid TX itr value; value must be between 0 and %d\n",
1578 		        IXL_MAX_ITR);
1579 		return (EINVAL);
1580 	}
1581 
1582 	pf->tx_itr = requested_tx_itr;
1583 	ixl_configure_tx_itr(pf);
1584 
1585 	return (error);
1586 }
1587 
1588 /*
1589  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1590  * Writes to the ITR registers immediately.
1591  */
1592 static int
1593 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1594 {
1595 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1596 	device_t dev = pf->dev;
1597 	int error = 0;
1598 	int requested_rx_itr;
1599 
1600 	requested_rx_itr = pf->rx_itr;
1601 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1602 	if ((error) || (req->newptr == NULL))
1603 		return (error);
1604 	if (pf->dynamic_rx_itr) {
1605 		device_printf(dev,
1606 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1607 		    return (EINVAL);
1608 	}
1609 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1610 		device_printf(dev,
1611 		    "Invalid RX itr value; value must be between 0 and %d\n",
1612 		        IXL_MAX_ITR);
1613 		return (EINVAL);
1614 	}
1615 
1616 	pf->rx_itr = requested_rx_itr;
1617 	ixl_configure_rx_itr(pf);
1618 
1619 	return (error);
1620 }
1621 
1622 void
1623 ixl_add_hw_stats(struct ixl_pf *pf)
1624 {
1625 	struct ixl_vsi *vsi = &pf->vsi;
1626 	device_t dev = iflib_get_dev(vsi->ctx);
1627 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1628 
1629 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1630 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1631 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1632 
1633 	/* Driver statistics */
1634 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1635 			CTLFLAG_RD, &pf->admin_irq,
1636 			"Admin Queue IRQs received");
1637 
1638 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1639 
1640 	ixl_add_queues_sysctls(dev, vsi);
1641 
1642 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1643 }
1644 
1645 void
1646 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1647 	struct sysctl_oid_list *child,
1648 	struct i40e_hw_port_stats *stats)
1649 {
1650 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1651 				    CTLFLAG_RD, NULL, "Mac Statistics");
1652 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1653 
1654 	struct i40e_eth_stats *eth_stats = &stats->eth;
1655 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1656 
1657 	struct ixl_sysctl_info ctls[] =
1658 	{
1659 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1660 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1661 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1662 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1663 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1664 		/* Packet Reception Stats */
1665 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1666 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1667 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1668 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1669 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1670 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1671 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1672 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1673 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1674 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1675 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1676 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1677 		/* Packet Transmission Stats */
1678 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1679 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1680 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1681 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1682 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1683 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1684 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1685 		/* Flow control */
1686 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1687 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1688 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1689 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1690 		/* End */
1691 		{0,0,0}
1692 	};
1693 
1694 	struct ixl_sysctl_info *entry = ctls;
1695 	while (entry->stat != 0)
1696 	{
1697 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1698 				CTLFLAG_RD, entry->stat,
1699 				entry->description);
1700 		entry++;
1701 	}
1702 }
1703 
1704 void
1705 ixl_set_rss_key(struct ixl_pf *pf)
1706 {
1707 	struct i40e_hw *hw = &pf->hw;
1708 	struct ixl_vsi *vsi = &pf->vsi;
1709 	device_t	dev = pf->dev;
1710 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1711 	enum i40e_status_code status;
1712 
1713 #ifdef RSS
1714         /* Fetch the configured RSS key */
1715         rss_getkey((uint8_t *) &rss_seed);
1716 #else
1717 	ixl_get_default_rss_key(rss_seed);
1718 #endif
1719 	/* Fill out hash function seed */
1720 	if (hw->mac.type == I40E_MAC_X722) {
1721 		struct i40e_aqc_get_set_rss_key_data key_data;
1722 		bcopy(rss_seed, &key_data, 52);
1723 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1724 		if (status)
1725 			device_printf(dev,
1726 			    "i40e_aq_set_rss_key status %s, error %s\n",
1727 			    i40e_stat_str(hw, status),
1728 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1729 	} else {
1730 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1731 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1732 	}
1733 }
1734 
1735 /*
1736  * Configure enabled PCTYPES for RSS.
1737  */
1738 void
1739 ixl_set_rss_pctypes(struct ixl_pf *pf)
1740 {
1741 	struct i40e_hw *hw = &pf->hw;
1742 	u64		set_hena = 0, hena;
1743 
1744 #ifdef RSS
1745 	u32		rss_hash_config;
1746 
1747 	rss_hash_config = rss_gethashconfig();
1748 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1749                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1750 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1751                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1752 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1753                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1754 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1755                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1756 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1757 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1758 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1759                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1760         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1761                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1762 #else
1763 	if (hw->mac.type == I40E_MAC_X722)
1764 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1765 	else
1766 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1767 #endif
1768 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1769 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1770 	hena |= set_hena;
1771 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1772 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1773 
1774 }
1775 
1776 void
1777 ixl_set_rss_hlut(struct ixl_pf *pf)
1778 {
1779 	struct i40e_hw	*hw = &pf->hw;
1780 	struct ixl_vsi *vsi = &pf->vsi;
1781 	device_t	dev = iflib_get_dev(vsi->ctx);
1782 	int		i, que_id;
1783 	int		lut_entry_width;
1784 	u32		lut = 0;
1785 	enum i40e_status_code status;
1786 
1787 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1788 
1789 	/* Populate the LUT with max no. of queues in round robin fashion */
1790 	u8 hlut_buf[512];
1791 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1792 #ifdef RSS
1793 		/*
1794 		 * Fetch the RSS bucket id for the given indirection entry.
1795 		 * Cap it at the number of configured buckets (which is
1796 		 * num_queues.)
1797 		 */
1798 		que_id = rss_get_indirection_to_bucket(i);
1799 		que_id = que_id % vsi->num_rx_queues;
1800 #else
1801 		que_id = i % vsi->num_rx_queues;
1802 #endif
1803 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1804 		hlut_buf[i] = lut;
1805 	}
1806 
1807 	if (hw->mac.type == I40E_MAC_X722) {
1808 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1809 		if (status)
1810 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1811 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1812 	} else {
1813 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1814 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1815 		ixl_flush(hw);
1816 	}
1817 }
1818 
1819 /*
1820 ** Setup the PF's RSS parameters.
1821 */
1822 void
1823 ixl_config_rss(struct ixl_pf *pf)
1824 {
1825 	ixl_set_rss_key(pf);
1826 	ixl_set_rss_pctypes(pf);
1827 	ixl_set_rss_hlut(pf);
1828 }
1829 
1830 /*
1831 ** This routine updates vlan filters, called by init
1832 ** it scans the filter table and then updates the hw
1833 ** after a soft reset.
1834 */
1835 void
1836 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1837 {
1838 	struct ixl_mac_filter	*f;
1839 	int			cnt = 0, flags;
1840 
1841 	if (vsi->num_vlans == 0)
1842 		return;
1843 	/*
1844 	** Scan the filter list for vlan entries,
1845 	** mark them for addition and then call
1846 	** for the AQ update.
1847 	*/
1848 	SLIST_FOREACH(f, &vsi->ftl, next) {
1849 		if (f->flags & IXL_FILTER_VLAN) {
1850 			f->flags |=
1851 			    (IXL_FILTER_ADD |
1852 			    IXL_FILTER_USED);
1853 			cnt++;
1854 		}
1855 	}
1856 	if (cnt == 0) {
1857 		printf("setup vlan: no filters found!\n");
1858 		return;
1859 	}
1860 	flags = IXL_FILTER_VLAN;
1861 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1862 	ixl_add_hw_filters(vsi, flags, cnt);
1863 }
1864 
1865 /*
1866  * In some firmware versions there is default MAC/VLAN filter
1867  * configured which interferes with filters managed by driver.
1868  * Make sure it's removed.
1869  */
1870 void
1871 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1872 {
1873 	struct i40e_aqc_remove_macvlan_element_data e;
1874 
1875 	bzero(&e, sizeof(e));
1876 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1877 	e.vlan_tag = 0;
1878 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1879 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1880 
1881 	bzero(&e, sizeof(e));
1882 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1883 	e.vlan_tag = 0;
1884 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1885 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1886 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1887 }
1888 
1889 /*
1890 ** Initialize filter list and add filters that the hardware
1891 ** needs to know about.
1892 **
1893 ** Requires VSI's filter list & seid to be set before calling.
1894 */
1895 void
1896 ixl_init_filters(struct ixl_vsi *vsi)
1897 {
1898 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1899 
1900 	/* Initialize mac filter list for VSI */
1901 	SLIST_INIT(&vsi->ftl);
1902 
1903 	/* Receive broadcast Ethernet frames */
1904 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1905 
1906 	ixl_del_default_hw_filters(vsi);
1907 
1908 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1909 	/*
1910 	 * Prevent Tx flow control frames from being sent out by
1911 	 * non-firmware transmitters.
1912 	 * This affects every VSI in the PF.
1913 	 */
1914 	if (pf->enable_tx_fc_filter)
1915 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1916 }
1917 
1918 /*
1919 ** This routine adds mulicast filters
1920 */
1921 void
1922 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1923 {
1924 	struct ixl_mac_filter *f;
1925 
1926 	/* Does one already exist */
1927 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1928 	if (f != NULL)
1929 		return;
1930 
1931 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1932 	if (f != NULL)
1933 		f->flags |= IXL_FILTER_MC;
1934 	else
1935 		printf("WARNING: no filter available!!\n");
1936 }
1937 
1938 void
1939 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1940 {
1941 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1942 }
1943 
1944 /*
1945  * This routine adds a MAC/VLAN filter to the software filter
1946  * list, then adds that new filter to the HW if it doesn't already
1947  * exist in the SW filter list.
1948  */
1949 void
1950 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1951 {
1952 	struct ixl_mac_filter	*f, *tmp;
1953 	struct ixl_pf		*pf;
1954 	device_t		dev;
1955 
1956 	DEBUGOUT("ixl_add_filter: begin");
1957 
1958 	pf = vsi->back;
1959 	dev = pf->dev;
1960 
1961 	/* Does one already exist */
1962 	f = ixl_find_filter(vsi, macaddr, vlan);
1963 	if (f != NULL)
1964 		return;
1965 	/*
1966 	** Is this the first vlan being registered, if so we
1967 	** need to remove the ANY filter that indicates we are
1968 	** not in a vlan, and replace that with a 0 filter.
1969 	*/
1970 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1971 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1972 		if (tmp != NULL) {
1973 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1974 			ixl_add_filter(vsi, macaddr, 0);
1975 		}
1976 	}
1977 
1978 	f = ixl_new_filter(vsi, macaddr, vlan);
1979 	if (f == NULL) {
1980 		device_printf(dev, "WARNING: no filter available!!\n");
1981 		return;
1982 	}
1983 	if (f->vlan != IXL_VLAN_ANY)
1984 		f->flags |= IXL_FILTER_VLAN;
1985 	else
1986 		vsi->num_macs++;
1987 
1988 	f->flags |= IXL_FILTER_USED;
1989 	ixl_add_hw_filters(vsi, f->flags, 1);
1990 }
1991 
1992 void
1993 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1994 {
1995 	struct ixl_mac_filter *f;
1996 
1997 	f = ixl_find_filter(vsi, macaddr, vlan);
1998 	if (f == NULL)
1999 		return;
2000 
2001 	f->flags |= IXL_FILTER_DEL;
2002 	ixl_del_hw_filters(vsi, 1);
2003 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2004 		vsi->num_macs--;
2005 
2006 	/* Check if this is the last vlan removal */
2007 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2008 		/* Switch back to a non-vlan filter */
2009 		ixl_del_filter(vsi, macaddr, 0);
2010 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2011 	}
2012 	return;
2013 }
2014 
2015 /*
2016 ** Find the filter with both matching mac addr and vlan id
2017 */
2018 struct ixl_mac_filter *
2019 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2020 {
2021 	struct ixl_mac_filter	*f;
2022 
2023 	SLIST_FOREACH(f, &vsi->ftl, next) {
2024 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2025 		    && (f->vlan == vlan)) {
2026 			return (f);
2027 		}
2028 	}
2029 
2030 	return (NULL);
2031 }
2032 
2033 /*
2034 ** This routine takes additions to the vsi filter
2035 ** table and creates an Admin Queue call to create
2036 ** the filters in the hardware.
2037 */
2038 void
2039 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2040 {
2041 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2042 	struct ixl_mac_filter	*f;
2043 	struct ixl_pf		*pf;
2044 	struct i40e_hw		*hw;
2045 	device_t		dev;
2046 	enum i40e_status_code	status;
2047 	int			j = 0;
2048 
2049 	pf = vsi->back;
2050 	dev = vsi->dev;
2051 	hw = &pf->hw;
2052 
2053 	if (cnt < 1) {
2054 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2055 		return;
2056 	}
2057 
2058 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2059 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2060 	if (a == NULL) {
2061 		device_printf(dev, "add_hw_filters failed to get memory\n");
2062 		return;
2063 	}
2064 
2065 	/*
2066 	** Scan the filter list, each time we find one
2067 	** we add it to the admin queue array and turn off
2068 	** the add bit.
2069 	*/
2070 	SLIST_FOREACH(f, &vsi->ftl, next) {
2071 		if ((f->flags & flags) == flags) {
2072 			b = &a[j]; // a pox on fvl long names :)
2073 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2074 			if (f->vlan == IXL_VLAN_ANY) {
2075 				b->vlan_tag = 0;
2076 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2077 			} else {
2078 				b->vlan_tag = f->vlan;
2079 				b->flags = 0;
2080 			}
2081 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2082 			f->flags &= ~IXL_FILTER_ADD;
2083 			j++;
2084 
2085 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2086 			    MAC_FORMAT_ARGS(f->macaddr));
2087 		}
2088 		if (j == cnt)
2089 			break;
2090 	}
2091 	if (j > 0) {
2092 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2093 		if (status)
2094 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2095 			    "error %s\n", i40e_stat_str(hw, status),
2096 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2097 		else
2098 			vsi->hw_filters_add += j;
2099 	}
2100 	free(a, M_DEVBUF);
2101 	return;
2102 }
2103 
2104 /*
2105 ** This routine takes removals in the vsi filter
2106 ** table and creates an Admin Queue call to delete
2107 ** the filters in the hardware.
2108 */
2109 void
2110 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2111 {
2112 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2113 	struct ixl_pf		*pf;
2114 	struct i40e_hw		*hw;
2115 	device_t		dev;
2116 	struct ixl_mac_filter	*f, *f_temp;
2117 	enum i40e_status_code	status;
2118 	int			j = 0;
2119 
2120 	pf = vsi->back;
2121 	hw = &pf->hw;
2122 	dev = vsi->dev;
2123 
2124 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2125 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2126 	if (d == NULL) {
2127 		device_printf(dev, "%s: failed to get memory\n", __func__);
2128 		return;
2129 	}
2130 
2131 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2132 		if (f->flags & IXL_FILTER_DEL) {
2133 			e = &d[j]; // a pox on fvl long names :)
2134 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2135 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2136 			if (f->vlan == IXL_VLAN_ANY) {
2137 				e->vlan_tag = 0;
2138 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2139 			} else {
2140 				e->vlan_tag = f->vlan;
2141 			}
2142 
2143 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2144 			    MAC_FORMAT_ARGS(f->macaddr));
2145 
2146 			/* delete entry from vsi list */
2147 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2148 			free(f, M_DEVBUF);
2149 			j++;
2150 		}
2151 		if (j == cnt)
2152 			break;
2153 	}
2154 	if (j > 0) {
2155 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2156 		if (status) {
2157 			int sc = 0;
2158 			for (int i = 0; i < j; i++)
2159 				sc += (!d[i].error_code);
2160 			vsi->hw_filters_del += sc;
2161 			device_printf(dev,
2162 			    "Failed to remove %d/%d filters, error %s\n",
2163 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2164 		} else
2165 			vsi->hw_filters_del += j;
2166 	}
2167 	free(d, M_DEVBUF);
2168 	return;
2169 }
2170 
2171 int
2172 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2173 {
2174 	struct i40e_hw	*hw = &pf->hw;
2175 	int		error = 0;
2176 	u32		reg;
2177 	u16		pf_qidx;
2178 
2179 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2180 
2181 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2182 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2183 	    pf_qidx, vsi_qidx);
2184 
2185 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2186 
2187 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2188 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2189 	    I40E_QTX_ENA_QENA_STAT_MASK;
2190 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2191 	/* Verify the enable took */
2192 	for (int j = 0; j < 10; j++) {
2193 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2194 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2195 			break;
2196 		i40e_usec_delay(10);
2197 	}
2198 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2199 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2200 		    pf_qidx);
2201 		error = ETIMEDOUT;
2202 	}
2203 
2204 	return (error);
2205 }
2206 
2207 int
2208 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2209 {
2210 	struct i40e_hw	*hw = &pf->hw;
2211 	int		error = 0;
2212 	u32		reg;
2213 	u16		pf_qidx;
2214 
2215 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2216 
2217 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2218 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2219 	    pf_qidx, vsi_qidx);
2220 
2221 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2222 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2223 	    I40E_QRX_ENA_QENA_STAT_MASK;
2224 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2225 	/* Verify the enable took */
2226 	for (int j = 0; j < 10; j++) {
2227 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2228 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2229 			break;
2230 		i40e_usec_delay(10);
2231 	}
2232 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2233 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2234 		    pf_qidx);
2235 		error = ETIMEDOUT;
2236 	}
2237 
2238 	return (error);
2239 }
2240 
2241 int
2242 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2243 {
2244 	int error = 0;
2245 
2246 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2247 	/* Called function already prints error message */
2248 	if (error)
2249 		return (error);
2250 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2251 	return (error);
2252 }
2253 
2254 /* For PF VSI only */
2255 int
2256 ixl_enable_rings(struct ixl_vsi *vsi)
2257 {
2258 	struct ixl_pf	*pf = vsi->back;
2259 	int		error = 0;
2260 
2261 	for (int i = 0; i < vsi->num_tx_queues; i++)
2262 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2263 
2264 	for (int i = 0; i < vsi->num_rx_queues; i++)
2265 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2266 
2267 	return (error);
2268 }
2269 
2270 /*
2271  * Returns error on first ring that is detected hung.
2272  */
2273 int
2274 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2275 {
2276 	struct i40e_hw	*hw = &pf->hw;
2277 	int		error = 0;
2278 	u32		reg;
2279 	u16		pf_qidx;
2280 
2281 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2282 
2283 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2284 	i40e_usec_delay(500);
2285 
2286 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2287 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2288 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2289 	/* Verify the disable took */
2290 	for (int j = 0; j < 10; j++) {
2291 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2292 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2293 			break;
2294 		i40e_msec_delay(10);
2295 	}
2296 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2297 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2298 		    pf_qidx);
2299 		error = ETIMEDOUT;
2300 	}
2301 
2302 	return (error);
2303 }
2304 
2305 /*
2306  * Returns error on first ring that is detected hung.
2307  */
2308 int
2309 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2310 {
2311 	struct i40e_hw	*hw = &pf->hw;
2312 	int		error = 0;
2313 	u32		reg;
2314 	u16		pf_qidx;
2315 
2316 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2317 
2318 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2319 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2320 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2321 	/* Verify the disable took */
2322 	for (int j = 0; j < 10; j++) {
2323 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2324 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2325 			break;
2326 		i40e_msec_delay(10);
2327 	}
2328 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2329 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2330 		    pf_qidx);
2331 		error = ETIMEDOUT;
2332 	}
2333 
2334 	return (error);
2335 }
2336 
2337 int
2338 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2339 {
2340 	int error = 0;
2341 
2342 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2343 	/* Called function already prints error message */
2344 	if (error)
2345 		return (error);
2346 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2347 	return (error);
2348 }
2349 
2350 int
2351 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2352 {
2353 	int error = 0;
2354 
2355 	for (int i = 0; i < vsi->num_tx_queues; i++)
2356 		error = ixl_disable_tx_ring(pf, qtag, i);
2357 
2358 	for (int i = 0; i < vsi->num_rx_queues; i++)
2359 		error = ixl_disable_rx_ring(pf, qtag, i);
2360 
2361 	return (error);
2362 }
2363 
2364 static void
2365 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2366 {
2367 	struct i40e_hw *hw = &pf->hw;
2368 	device_t dev = pf->dev;
2369 	struct ixl_vf *vf;
2370 	bool mdd_detected = false;
2371 	bool pf_mdd_detected = false;
2372 	bool vf_mdd_detected = false;
2373 	u16 vf_num, queue;
2374 	u8 pf_num, event;
2375 	u8 pf_mdet_num, vp_mdet_num;
2376 	u32 reg;
2377 
2378 	/* find what triggered the MDD event */
2379 	reg = rd32(hw, I40E_GL_MDET_TX);
2380 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2381 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2382 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2383 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2384 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2385 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2386 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2387 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2388 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2389 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2390 		mdd_detected = true;
2391 	}
2392 
2393 	if (!mdd_detected)
2394 		return;
2395 
2396 	reg = rd32(hw, I40E_PF_MDET_TX);
2397 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2398 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2399 		pf_mdet_num = hw->pf_id;
2400 		pf_mdd_detected = true;
2401 	}
2402 
2403 	/* Check if MDD was caused by a VF */
2404 	for (int i = 0; i < pf->num_vfs; i++) {
2405 		vf = &(pf->vfs[i]);
2406 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2407 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2408 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2409 			vp_mdet_num = i;
2410 			vf->num_mdd_events++;
2411 			vf_mdd_detected = true;
2412 		}
2413 	}
2414 
2415 	/* Print out an error message */
2416 	if (vf_mdd_detected && pf_mdd_detected)
2417 		device_printf(dev,
2418 		    "Malicious Driver Detection event %d"
2419 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2420 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2421 	else if (vf_mdd_detected && !pf_mdd_detected)
2422 		device_printf(dev,
2423 		    "Malicious Driver Detection event %d"
2424 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2425 		    event, queue, pf_num, vf_num, vp_mdet_num);
2426 	else if (!vf_mdd_detected && pf_mdd_detected)
2427 		device_printf(dev,
2428 		    "Malicious Driver Detection event %d"
2429 		    " on TX queue %d, pf number %d (PF-%d)\n",
2430 		    event, queue, pf_num, pf_mdet_num);
2431 	/* Theoretically shouldn't happen */
2432 	else
2433 		device_printf(dev,
2434 		    "TX Malicious Driver Detection event (unknown)\n");
2435 }
2436 
2437 static void
2438 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2439 {
2440 	struct i40e_hw *hw = &pf->hw;
2441 	device_t dev = pf->dev;
2442 	struct ixl_vf *vf;
2443 	bool mdd_detected = false;
2444 	bool pf_mdd_detected = false;
2445 	bool vf_mdd_detected = false;
2446 	u16 queue;
2447 	u8 pf_num, event;
2448 	u8 pf_mdet_num, vp_mdet_num;
2449 	u32 reg;
2450 
2451 	/*
2452 	 * GL_MDET_RX doesn't contain VF number information, unlike
2453 	 * GL_MDET_TX.
2454 	 */
2455 	reg = rd32(hw, I40E_GL_MDET_RX);
2456 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2457 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2458 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2459 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2460 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2461 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2462 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2463 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2464 		mdd_detected = true;
2465 	}
2466 
2467 	if (!mdd_detected)
2468 		return;
2469 
2470 	reg = rd32(hw, I40E_PF_MDET_RX);
2471 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2472 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2473 		pf_mdet_num = hw->pf_id;
2474 		pf_mdd_detected = true;
2475 	}
2476 
2477 	/* Check if MDD was caused by a VF */
2478 	for (int i = 0; i < pf->num_vfs; i++) {
2479 		vf = &(pf->vfs[i]);
2480 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2481 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2482 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2483 			vp_mdet_num = i;
2484 			vf->num_mdd_events++;
2485 			vf_mdd_detected = true;
2486 		}
2487 	}
2488 
2489 	/* Print out an error message */
2490 	if (vf_mdd_detected && pf_mdd_detected)
2491 		device_printf(dev,
2492 		    "Malicious Driver Detection event %d"
2493 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2494 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2495 	else if (vf_mdd_detected && !pf_mdd_detected)
2496 		device_printf(dev,
2497 		    "Malicious Driver Detection event %d"
2498 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2499 		    event, queue, pf_num, vp_mdet_num);
2500 	else if (!vf_mdd_detected && pf_mdd_detected)
2501 		device_printf(dev,
2502 		    "Malicious Driver Detection event %d"
2503 		    " on RX queue %d, pf number %d (PF-%d)\n",
2504 		    event, queue, pf_num, pf_mdet_num);
2505 	/* Theoretically shouldn't happen */
2506 	else
2507 		device_printf(dev,
2508 		    "RX Malicious Driver Detection event (unknown)\n");
2509 }
2510 
2511 /**
2512  * ixl_handle_mdd_event
2513  *
2514  * Called from interrupt handler to identify possibly malicious vfs
2515  * (But also detects events from the PF, as well)
2516  **/
2517 void
2518 ixl_handle_mdd_event(struct ixl_pf *pf)
2519 {
2520 	struct i40e_hw *hw = &pf->hw;
2521 	u32 reg;
2522 
2523 	/*
2524 	 * Handle both TX/RX because it's possible they could
2525 	 * both trigger in the same interrupt.
2526 	 */
2527 	ixl_handle_tx_mdd_event(pf);
2528 	ixl_handle_rx_mdd_event(pf);
2529 
2530 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2531 
2532 	/* re-enable mdd interrupt cause */
2533 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2534 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2535 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2536 	ixl_flush(hw);
2537 }
2538 
2539 void
2540 ixl_enable_intr(struct ixl_vsi *vsi)
2541 {
2542 	struct i40e_hw		*hw = vsi->hw;
2543 	struct ixl_rx_queue	*que = vsi->rx_queues;
2544 
2545 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2546 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2547 			ixl_enable_queue(hw, que->rxr.me);
2548 	} else
2549 		ixl_enable_intr0(hw);
2550 }
2551 
2552 void
2553 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2554 {
2555 	struct i40e_hw		*hw = vsi->hw;
2556 	struct ixl_rx_queue	*que = vsi->rx_queues;
2557 
2558 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2559 		ixl_disable_queue(hw, que->rxr.me);
2560 }
2561 
2562 void
2563 ixl_enable_intr0(struct i40e_hw *hw)
2564 {
2565 	u32		reg;
2566 
2567 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2568 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2569 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2570 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2571 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2572 }
2573 
2574 void
2575 ixl_disable_intr0(struct i40e_hw *hw)
2576 {
2577 	u32		reg;
2578 
2579 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2580 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2581 	ixl_flush(hw);
2582 }
2583 
2584 void
2585 ixl_enable_queue(struct i40e_hw *hw, int id)
2586 {
2587 	u32		reg;
2588 
2589 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2590 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2591 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2592 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2593 }
2594 
2595 void
2596 ixl_disable_queue(struct i40e_hw *hw, int id)
2597 {
2598 	u32		reg;
2599 
2600 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2601 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2602 }
2603 
2604 void
2605 ixl_update_stats_counters(struct ixl_pf *pf)
2606 {
2607 	struct i40e_hw	*hw = &pf->hw;
2608 	struct ixl_vsi	*vsi = &pf->vsi;
2609 	struct ixl_vf	*vf;
2610 
2611 	struct i40e_hw_port_stats *nsd = &pf->stats;
2612 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2613 
2614 	/* Update hw stats */
2615 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2616 			   pf->stat_offsets_loaded,
2617 			   &osd->crc_errors, &nsd->crc_errors);
2618 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2619 			   pf->stat_offsets_loaded,
2620 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2621 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2622 			   I40E_GLPRT_GORCL(hw->port),
2623 			   pf->stat_offsets_loaded,
2624 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2625 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2626 			   I40E_GLPRT_GOTCL(hw->port),
2627 			   pf->stat_offsets_loaded,
2628 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2629 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2630 			   pf->stat_offsets_loaded,
2631 			   &osd->eth.rx_discards,
2632 			   &nsd->eth.rx_discards);
2633 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2634 			   I40E_GLPRT_UPRCL(hw->port),
2635 			   pf->stat_offsets_loaded,
2636 			   &osd->eth.rx_unicast,
2637 			   &nsd->eth.rx_unicast);
2638 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2639 			   I40E_GLPRT_UPTCL(hw->port),
2640 			   pf->stat_offsets_loaded,
2641 			   &osd->eth.tx_unicast,
2642 			   &nsd->eth.tx_unicast);
2643 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2644 			   I40E_GLPRT_MPRCL(hw->port),
2645 			   pf->stat_offsets_loaded,
2646 			   &osd->eth.rx_multicast,
2647 			   &nsd->eth.rx_multicast);
2648 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2649 			   I40E_GLPRT_MPTCL(hw->port),
2650 			   pf->stat_offsets_loaded,
2651 			   &osd->eth.tx_multicast,
2652 			   &nsd->eth.tx_multicast);
2653 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2654 			   I40E_GLPRT_BPRCL(hw->port),
2655 			   pf->stat_offsets_loaded,
2656 			   &osd->eth.rx_broadcast,
2657 			   &nsd->eth.rx_broadcast);
2658 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2659 			   I40E_GLPRT_BPTCL(hw->port),
2660 			   pf->stat_offsets_loaded,
2661 			   &osd->eth.tx_broadcast,
2662 			   &nsd->eth.tx_broadcast);
2663 
2664 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2665 			   pf->stat_offsets_loaded,
2666 			   &osd->tx_dropped_link_down,
2667 			   &nsd->tx_dropped_link_down);
2668 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2669 			   pf->stat_offsets_loaded,
2670 			   &osd->mac_local_faults,
2671 			   &nsd->mac_local_faults);
2672 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2673 			   pf->stat_offsets_loaded,
2674 			   &osd->mac_remote_faults,
2675 			   &nsd->mac_remote_faults);
2676 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2677 			   pf->stat_offsets_loaded,
2678 			   &osd->rx_length_errors,
2679 			   &nsd->rx_length_errors);
2680 
2681 	/* Flow control (LFC) stats */
2682 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2683 			   pf->stat_offsets_loaded,
2684 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2685 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2686 			   pf->stat_offsets_loaded,
2687 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2688 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2689 			   pf->stat_offsets_loaded,
2690 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2691 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2692 			   pf->stat_offsets_loaded,
2693 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2694 
2695 	/* Packet size stats rx */
2696 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2697 			   I40E_GLPRT_PRC64L(hw->port),
2698 			   pf->stat_offsets_loaded,
2699 			   &osd->rx_size_64, &nsd->rx_size_64);
2700 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2701 			   I40E_GLPRT_PRC127L(hw->port),
2702 			   pf->stat_offsets_loaded,
2703 			   &osd->rx_size_127, &nsd->rx_size_127);
2704 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2705 			   I40E_GLPRT_PRC255L(hw->port),
2706 			   pf->stat_offsets_loaded,
2707 			   &osd->rx_size_255, &nsd->rx_size_255);
2708 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2709 			   I40E_GLPRT_PRC511L(hw->port),
2710 			   pf->stat_offsets_loaded,
2711 			   &osd->rx_size_511, &nsd->rx_size_511);
2712 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2713 			   I40E_GLPRT_PRC1023L(hw->port),
2714 			   pf->stat_offsets_loaded,
2715 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2716 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2717 			   I40E_GLPRT_PRC1522L(hw->port),
2718 			   pf->stat_offsets_loaded,
2719 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2720 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2721 			   I40E_GLPRT_PRC9522L(hw->port),
2722 			   pf->stat_offsets_loaded,
2723 			   &osd->rx_size_big, &nsd->rx_size_big);
2724 
2725 	/* Packet size stats tx */
2726 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2727 			   I40E_GLPRT_PTC64L(hw->port),
2728 			   pf->stat_offsets_loaded,
2729 			   &osd->tx_size_64, &nsd->tx_size_64);
2730 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2731 			   I40E_GLPRT_PTC127L(hw->port),
2732 			   pf->stat_offsets_loaded,
2733 			   &osd->tx_size_127, &nsd->tx_size_127);
2734 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2735 			   I40E_GLPRT_PTC255L(hw->port),
2736 			   pf->stat_offsets_loaded,
2737 			   &osd->tx_size_255, &nsd->tx_size_255);
2738 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2739 			   I40E_GLPRT_PTC511L(hw->port),
2740 			   pf->stat_offsets_loaded,
2741 			   &osd->tx_size_511, &nsd->tx_size_511);
2742 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2743 			   I40E_GLPRT_PTC1023L(hw->port),
2744 			   pf->stat_offsets_loaded,
2745 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2746 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2747 			   I40E_GLPRT_PTC1522L(hw->port),
2748 			   pf->stat_offsets_loaded,
2749 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2750 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2751 			   I40E_GLPRT_PTC9522L(hw->port),
2752 			   pf->stat_offsets_loaded,
2753 			   &osd->tx_size_big, &nsd->tx_size_big);
2754 
2755 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2756 			   pf->stat_offsets_loaded,
2757 			   &osd->rx_undersize, &nsd->rx_undersize);
2758 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2759 			   pf->stat_offsets_loaded,
2760 			   &osd->rx_fragments, &nsd->rx_fragments);
2761 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2762 			   pf->stat_offsets_loaded,
2763 			   &osd->rx_oversize, &nsd->rx_oversize);
2764 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2765 			   pf->stat_offsets_loaded,
2766 			   &osd->rx_jabber, &nsd->rx_jabber);
2767 	pf->stat_offsets_loaded = true;
2768 	/* End hw stats */
2769 
2770 	/* Update vsi stats */
2771 	ixl_update_vsi_stats(vsi);
2772 
2773 	for (int i = 0; i < pf->num_vfs; i++) {
2774 		vf = &pf->vfs[i];
2775 		if (vf->vf_flags & VF_FLAG_ENABLED)
2776 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2777 	}
2778 }
2779 
2780 int
2781 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2782 {
2783 	struct i40e_hw *hw = &pf->hw;
2784 	device_t dev = pf->dev;
2785 	int error = 0;
2786 
2787 	error = i40e_shutdown_lan_hmc(hw);
2788 	if (error)
2789 		device_printf(dev,
2790 		    "Shutdown LAN HMC failed with code %d\n", error);
2791 
2792 	ixl_disable_intr0(hw);
2793 
2794 	error = i40e_shutdown_adminq(hw);
2795 	if (error)
2796 		device_printf(dev,
2797 		    "Shutdown Admin queue failed with code %d\n", error);
2798 
2799 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2800 	return (error);
2801 }
2802 
2803 int
2804 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2805 {
2806 	struct i40e_hw *hw = &pf->hw;
2807 	struct ixl_vsi *vsi = &pf->vsi;
2808 	device_t dev = pf->dev;
2809 	int error = 0;
2810 
2811 	device_printf(dev, "Rebuilding driver state...\n");
2812 
2813 	error = i40e_pf_reset(hw);
2814 	if (error) {
2815 		device_printf(dev, "PF reset failure %s\n",
2816 		    i40e_stat_str(hw, error));
2817 		goto ixl_rebuild_hw_structs_after_reset_err;
2818 	}
2819 
2820 	/* Setup */
2821 	error = i40e_init_adminq(hw);
2822 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2823 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2824 		    error);
2825 		goto ixl_rebuild_hw_structs_after_reset_err;
2826 	}
2827 
2828 	i40e_clear_pxe_mode(hw);
2829 
2830 	error = ixl_get_hw_capabilities(pf);
2831 	if (error) {
2832 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2833 		goto ixl_rebuild_hw_structs_after_reset_err;
2834 	}
2835 
2836 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2837 	    hw->func_caps.num_rx_qp, 0, 0);
2838 	if (error) {
2839 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2840 		goto ixl_rebuild_hw_structs_after_reset_err;
2841 	}
2842 
2843 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2844 	if (error) {
2845 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2846 		goto ixl_rebuild_hw_structs_after_reset_err;
2847 	}
2848 
2849 	/* reserve a contiguous allocation for the PF's VSI */
2850 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2851 	if (error) {
2852 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2853 		    error);
2854 		/* TODO: error handling */
2855 	}
2856 
2857 	error = ixl_switch_config(pf);
2858 	if (error) {
2859 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2860 		     error);
2861 		error = EIO;
2862 		goto ixl_rebuild_hw_structs_after_reset_err;
2863 	}
2864 
2865 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2866 	    NULL);
2867         if (error) {
2868 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2869 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2870 		error = EIO;
2871 		goto ixl_rebuild_hw_structs_after_reset_err;
2872 	}
2873 
2874 	u8 set_fc_err_mask;
2875 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2876 	if (error) {
2877 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2878 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2879 		error = EIO;
2880 		goto ixl_rebuild_hw_structs_after_reset_err;
2881 	}
2882 
2883 	/* Remove default filters reinstalled by FW on reset */
2884 	ixl_del_default_hw_filters(vsi);
2885 
2886 	/* Determine link state */
2887 	if (ixl_attach_get_link_status(pf)) {
2888 		error = EINVAL;
2889 		/* TODO: error handling */
2890 	}
2891 
2892 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2893 	ixl_get_fw_lldp_status(pf);
2894 
2895 	/* Keep admin queue interrupts active while driver is loaded */
2896 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2897  		ixl_configure_intr0_msix(pf);
2898  		ixl_enable_intr0(hw);
2899 	}
2900 
2901 	device_printf(dev, "Rebuilding driver state done.\n");
2902 	return (0);
2903 
2904 ixl_rebuild_hw_structs_after_reset_err:
2905 	device_printf(dev, "Reload the driver to recover\n");
2906 	return (error);
2907 }
2908 
2909 void
2910 ixl_handle_empr_reset(struct ixl_pf *pf)
2911 {
2912 	struct ixl_vsi	*vsi = &pf->vsi;
2913 	struct i40e_hw	*hw = &pf->hw;
2914 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2915 	int count = 0;
2916 	u32 reg;
2917 
2918 	ixl_prepare_for_reset(pf, is_up);
2919 
2920 	/* Typically finishes within 3-4 seconds */
2921 	while (count++ < 100) {
2922 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2923 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2924 		if (reg)
2925 			i40e_msec_delay(100);
2926 		else
2927 			break;
2928 	}
2929 	ixl_dbg(pf, IXL_DBG_INFO,
2930 			"Reset wait count: %d\n", count);
2931 
2932 	ixl_rebuild_hw_structs_after_reset(pf);
2933 
2934 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2935 }
2936 
2937 /**
2938  * Update VSI-specific ethernet statistics counters.
2939  **/
2940 void
2941 ixl_update_eth_stats(struct ixl_vsi *vsi)
2942 {
2943 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2944 	struct i40e_hw *hw = &pf->hw;
2945 	struct i40e_eth_stats *es;
2946 	struct i40e_eth_stats *oes;
2947 	struct i40e_hw_port_stats *nsd;
2948 	u16 stat_idx = vsi->info.stat_counter_idx;
2949 
2950 	es = &vsi->eth_stats;
2951 	oes = &vsi->eth_stats_offsets;
2952 	nsd = &pf->stats;
2953 
2954 	/* Gather up the stats that the hw collects */
2955 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2956 			   vsi->stat_offsets_loaded,
2957 			   &oes->tx_errors, &es->tx_errors);
2958 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2959 			   vsi->stat_offsets_loaded,
2960 			   &oes->rx_discards, &es->rx_discards);
2961 
2962 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2963 			   I40E_GLV_GORCL(stat_idx),
2964 			   vsi->stat_offsets_loaded,
2965 			   &oes->rx_bytes, &es->rx_bytes);
2966 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2967 			   I40E_GLV_UPRCL(stat_idx),
2968 			   vsi->stat_offsets_loaded,
2969 			   &oes->rx_unicast, &es->rx_unicast);
2970 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2971 			   I40E_GLV_MPRCL(stat_idx),
2972 			   vsi->stat_offsets_loaded,
2973 			   &oes->rx_multicast, &es->rx_multicast);
2974 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2975 			   I40E_GLV_BPRCL(stat_idx),
2976 			   vsi->stat_offsets_loaded,
2977 			   &oes->rx_broadcast, &es->rx_broadcast);
2978 
2979 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2980 			   I40E_GLV_GOTCL(stat_idx),
2981 			   vsi->stat_offsets_loaded,
2982 			   &oes->tx_bytes, &es->tx_bytes);
2983 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2984 			   I40E_GLV_UPTCL(stat_idx),
2985 			   vsi->stat_offsets_loaded,
2986 			   &oes->tx_unicast, &es->tx_unicast);
2987 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2988 			   I40E_GLV_MPTCL(stat_idx),
2989 			   vsi->stat_offsets_loaded,
2990 			   &oes->tx_multicast, &es->tx_multicast);
2991 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2992 			   I40E_GLV_BPTCL(stat_idx),
2993 			   vsi->stat_offsets_loaded,
2994 			   &oes->tx_broadcast, &es->tx_broadcast);
2995 	vsi->stat_offsets_loaded = true;
2996 }
2997 
2998 void
2999 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3000 {
3001 	struct ixl_pf		*pf;
3002 	struct ifnet		*ifp;
3003 	struct i40e_eth_stats	*es;
3004 	u64			tx_discards;
3005 
3006 	struct i40e_hw_port_stats *nsd;
3007 
3008 	pf = vsi->back;
3009 	ifp = vsi->ifp;
3010 	es = &vsi->eth_stats;
3011 	nsd = &pf->stats;
3012 
3013 	ixl_update_eth_stats(vsi);
3014 
3015 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3016 
3017 	/* Update ifnet stats */
3018 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3019 	                   es->rx_multicast +
3020 			   es->rx_broadcast);
3021 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3022 	                   es->tx_multicast +
3023 			   es->tx_broadcast);
3024 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3025 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3026 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3027 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3028 
3029 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3030 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3031 	    nsd->rx_jabber);
3032 	IXL_SET_OERRORS(vsi, es->tx_errors);
3033 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3034 	IXL_SET_OQDROPS(vsi, tx_discards);
3035 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3036 	IXL_SET_COLLISIONS(vsi, 0);
3037 }
3038 
3039 /**
3040  * Reset all of the stats for the given pf
3041  **/
3042 void
3043 ixl_pf_reset_stats(struct ixl_pf *pf)
3044 {
3045 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3046 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3047 	pf->stat_offsets_loaded = false;
3048 }
3049 
3050 /**
3051  * Resets all stats of the given vsi
3052  **/
3053 void
3054 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3055 {
3056 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3057 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3058 	vsi->stat_offsets_loaded = false;
3059 }
3060 
3061 /**
3062  * Read and update a 48 bit stat from the hw
3063  *
3064  * Since the device stats are not reset at PFReset, they likely will not
3065  * be zeroed when the driver starts.  We'll save the first values read
3066  * and use them as offsets to be subtracted from the raw values in order
3067  * to report stats that count from zero.
3068  **/
3069 void
3070 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3071 	bool offset_loaded, u64 *offset, u64 *stat)
3072 {
3073 	u64 new_data;
3074 
3075 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3076 	new_data = rd64(hw, loreg);
3077 #else
3078 	/*
3079 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3080 	 * 10 don't support 64-bit bus reads/writes.
3081 	 */
3082 	new_data = rd32(hw, loreg);
3083 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3084 #endif
3085 
3086 	if (!offset_loaded)
3087 		*offset = new_data;
3088 	if (new_data >= *offset)
3089 		*stat = new_data - *offset;
3090 	else
3091 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3092 	*stat &= 0xFFFFFFFFFFFFULL;
3093 }
3094 
3095 /**
3096  * Read and update a 32 bit stat from the hw
3097  **/
3098 void
3099 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3100 	bool offset_loaded, u64 *offset, u64 *stat)
3101 {
3102 	u32 new_data;
3103 
3104 	new_data = rd32(hw, reg);
3105 	if (!offset_loaded)
3106 		*offset = new_data;
3107 	if (new_data >= *offset)
3108 		*stat = (u32)(new_data - *offset);
3109 	else
3110 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3111 }
3112 
3113 void
3114 ixl_add_device_sysctls(struct ixl_pf *pf)
3115 {
3116 	device_t dev = pf->dev;
3117 	struct i40e_hw *hw = &pf->hw;
3118 
3119 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3120 	struct sysctl_oid_list *ctx_list =
3121 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3122 
3123 	struct sysctl_oid *debug_node;
3124 	struct sysctl_oid_list *debug_list;
3125 
3126 	struct sysctl_oid *fec_node;
3127 	struct sysctl_oid_list *fec_list;
3128 
3129 	/* Set up sysctls */
3130 	SYSCTL_ADD_PROC(ctx, ctx_list,
3131 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3132 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3133 
3134 	SYSCTL_ADD_PROC(ctx, ctx_list,
3135 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3136 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3137 
3138 	SYSCTL_ADD_PROC(ctx, ctx_list,
3139 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3140 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3141 
3142 	SYSCTL_ADD_PROC(ctx, ctx_list,
3143 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3144 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3145 
3146 	SYSCTL_ADD_PROC(ctx, ctx_list,
3147 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3148 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3149 
3150 	SYSCTL_ADD_PROC(ctx, ctx_list,
3151 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3152 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3153 	    "Queues not allocated to a PF or VF");
3154 
3155 	SYSCTL_ADD_PROC(ctx, ctx_list,
3156 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3157 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3158 	    "Immediately set TX ITR value for all queues");
3159 
3160 	SYSCTL_ADD_PROC(ctx, ctx_list,
3161 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3162 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3163 	    "Immediately set RX ITR value for all queues");
3164 
3165 	SYSCTL_ADD_INT(ctx, ctx_list,
3166 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3167 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3168 
3169 	SYSCTL_ADD_INT(ctx, ctx_list,
3170 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3171 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3172 
3173 	/* Add FEC sysctls for 25G adapters */
3174 	if (i40e_is_25G_device(hw->device_id)) {
3175 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3176 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3177 		fec_list = SYSCTL_CHILDREN(fec_node);
3178 
3179 		SYSCTL_ADD_PROC(ctx, fec_list,
3180 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3181 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3182 
3183 		SYSCTL_ADD_PROC(ctx, fec_list,
3184 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3185 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3186 
3187 		SYSCTL_ADD_PROC(ctx, fec_list,
3188 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3189 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3190 
3191 		SYSCTL_ADD_PROC(ctx, fec_list,
3192 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3193 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3194 
3195 		SYSCTL_ADD_PROC(ctx, fec_list,
3196 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3197 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3198 	}
3199 
3200 	SYSCTL_ADD_PROC(ctx, ctx_list,
3201 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3202 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3203 
3204 	/* Add sysctls meant to print debug information, but don't list them
3205 	 * in "sysctl -a" output. */
3206 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3207 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3208 	debug_list = SYSCTL_CHILDREN(debug_node);
3209 
3210 	SYSCTL_ADD_UINT(ctx, debug_list,
3211 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3212 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3213 
3214 	SYSCTL_ADD_UINT(ctx, debug_list,
3215 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3216 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3217 
3218 	SYSCTL_ADD_PROC(ctx, debug_list,
3219 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3220 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3221 
3222 	SYSCTL_ADD_PROC(ctx, debug_list,
3223 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3224 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3225 
3226 	SYSCTL_ADD_PROC(ctx, debug_list,
3227 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3228 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3229 
3230 	SYSCTL_ADD_PROC(ctx, debug_list,
3231 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3232 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3233 
3234 	SYSCTL_ADD_PROC(ctx, debug_list,
3235 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3236 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3237 
3238 	SYSCTL_ADD_PROC(ctx, debug_list,
3239 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3240 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3241 
3242 	SYSCTL_ADD_PROC(ctx, debug_list,
3243 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3244 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3245 
3246 	SYSCTL_ADD_PROC(ctx, debug_list,
3247 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3248 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3249 
3250 	SYSCTL_ADD_PROC(ctx, debug_list,
3251 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3252 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3253 
3254 	SYSCTL_ADD_PROC(ctx, debug_list,
3255 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3256 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3257 
3258 	SYSCTL_ADD_PROC(ctx, debug_list,
3259 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3260 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3261 
3262 	SYSCTL_ADD_PROC(ctx, debug_list,
3263 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3264 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3265 
3266 	SYSCTL_ADD_PROC(ctx, debug_list,
3267 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3268 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3269 
3270 	SYSCTL_ADD_PROC(ctx, debug_list,
3271 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3272 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3273 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3274 
3275 	SYSCTL_ADD_PROC(ctx, debug_list,
3276 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3277 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3278 
3279 	if (pf->has_i2c) {
3280 		SYSCTL_ADD_PROC(ctx, debug_list,
3281 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3282 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3283 
3284 		SYSCTL_ADD_PROC(ctx, debug_list,
3285 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3286 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3287 
3288 		SYSCTL_ADD_PROC(ctx, debug_list,
3289 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3290 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3291 	}
3292 }
3293 
3294 /*
3295  * Primarily for finding out how many queues can be assigned to VFs,
3296  * at runtime.
3297  */
3298 static int
3299 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3300 {
3301 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3302 	int queues;
3303 
3304 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3305 
3306 	return sysctl_handle_int(oidp, NULL, queues, req);
3307 }
3308 
3309 /*
3310 ** Set flow control using sysctl:
3311 ** 	0 - off
3312 **	1 - rx pause
3313 **	2 - tx pause
3314 **	3 - full
3315 */
3316 int
3317 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3318 {
3319 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3320 	struct i40e_hw *hw = &pf->hw;
3321 	device_t dev = pf->dev;
3322 	int requested_fc, error = 0;
3323 	enum i40e_status_code aq_error = 0;
3324 	u8 fc_aq_err = 0;
3325 
3326 	/* Get request */
3327 	requested_fc = pf->fc;
3328 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3329 	if ((error) || (req->newptr == NULL))
3330 		return (error);
3331 	if (requested_fc < 0 || requested_fc > 3) {
3332 		device_printf(dev,
3333 		    "Invalid fc mode; valid modes are 0 through 3\n");
3334 		return (EINVAL);
3335 	}
3336 
3337 	/* Set fc ability for port */
3338 	hw->fc.requested_mode = requested_fc;
3339 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3340 	if (aq_error) {
3341 		device_printf(dev,
3342 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3343 		    __func__, aq_error, fc_aq_err);
3344 		return (EIO);
3345 	}
3346 	pf->fc = requested_fc;
3347 
3348 	return (0);
3349 }
3350 
3351 char *
3352 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3353 {
3354 	int index;
3355 
3356 	char *speeds[] = {
3357 		"Unknown",
3358 		"100 Mbps",
3359 		"1 Gbps",
3360 		"10 Gbps",
3361 		"40 Gbps",
3362 		"20 Gbps",
3363 		"25 Gbps",
3364 	};
3365 
3366 	switch (link_speed) {
3367 	case I40E_LINK_SPEED_100MB:
3368 		index = 1;
3369 		break;
3370 	case I40E_LINK_SPEED_1GB:
3371 		index = 2;
3372 		break;
3373 	case I40E_LINK_SPEED_10GB:
3374 		index = 3;
3375 		break;
3376 	case I40E_LINK_SPEED_40GB:
3377 		index = 4;
3378 		break;
3379 	case I40E_LINK_SPEED_20GB:
3380 		index = 5;
3381 		break;
3382 	case I40E_LINK_SPEED_25GB:
3383 		index = 6;
3384 		break;
3385 	case I40E_LINK_SPEED_UNKNOWN:
3386 	default:
3387 		index = 0;
3388 		break;
3389 	}
3390 
3391 	return speeds[index];
3392 }
3393 
3394 int
3395 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3396 {
3397 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3398 	struct i40e_hw *hw = &pf->hw;
3399 	int error = 0;
3400 
3401 	ixl_update_link_status(pf);
3402 
3403 	error = sysctl_handle_string(oidp,
3404 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3405 	    8, req);
3406 	return (error);
3407 }
3408 
3409 /*
3410  * Converts 8-bit speeds value to and from sysctl flags and
3411  * Admin Queue flags.
3412  */
3413 static u8
3414 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3415 {
3416 	static u16 speedmap[6] = {
3417 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3418 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3419 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3420 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3421 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3422 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3423 	};
3424 	u8 retval = 0;
3425 
3426 	for (int i = 0; i < 6; i++) {
3427 		if (to_aq)
3428 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3429 		else
3430 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3431 	}
3432 
3433 	return (retval);
3434 }
3435 
3436 int
3437 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3438 {
3439 	struct i40e_hw *hw = &pf->hw;
3440 	device_t dev = pf->dev;
3441 	struct i40e_aq_get_phy_abilities_resp abilities;
3442 	struct i40e_aq_set_phy_config config;
3443 	enum i40e_status_code aq_error = 0;
3444 
3445 	/* Get current capability information */
3446 	aq_error = i40e_aq_get_phy_capabilities(hw,
3447 	    FALSE, FALSE, &abilities, NULL);
3448 	if (aq_error) {
3449 		device_printf(dev,
3450 		    "%s: Error getting phy capabilities %d,"
3451 		    " aq error: %d\n", __func__, aq_error,
3452 		    hw->aq.asq_last_status);
3453 		return (EIO);
3454 	}
3455 
3456 	/* Prepare new config */
3457 	bzero(&config, sizeof(config));
3458 	if (from_aq)
3459 		config.link_speed = speeds;
3460 	else
3461 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3462 	config.phy_type = abilities.phy_type;
3463 	config.phy_type_ext = abilities.phy_type_ext;
3464 	config.abilities = abilities.abilities
3465 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3466 	config.eee_capability = abilities.eee_capability;
3467 	config.eeer = abilities.eeer_val;
3468 	config.low_power_ctrl = abilities.d3_lpan;
3469 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3470 
3471 	/* Do aq command & restart link */
3472 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3473 	if (aq_error) {
3474 		device_printf(dev,
3475 		    "%s: Error setting new phy config %d,"
3476 		    " aq error: %d\n", __func__, aq_error,
3477 		    hw->aq.asq_last_status);
3478 		return (EIO);
3479 	}
3480 
3481 	return (0);
3482 }
3483 
3484 /*
3485 ** Supported link speedsL
3486 **	Flags:
3487 **	 0x1 - 100 Mb
3488 **	 0x2 - 1G
3489 **	 0x4 - 10G
3490 **	 0x8 - 20G
3491 **	0x10 - 25G
3492 **	0x20 - 40G
3493 */
3494 static int
3495 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3496 {
3497 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3498 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3499 
3500 	return sysctl_handle_int(oidp, NULL, supported, req);
3501 }
3502 
3503 /*
3504 ** Control link advertise speed:
3505 **	Flags:
3506 **	 0x1 - advertise 100 Mb
3507 **	 0x2 - advertise 1G
3508 **	 0x4 - advertise 10G
3509 **	 0x8 - advertise 20G
3510 **	0x10 - advertise 25G
3511 **	0x20 - advertise 40G
3512 **
3513 **	Set to 0 to disable link
3514 */
3515 int
3516 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3517 {
3518 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3519 	device_t dev = pf->dev;
3520 	u8 converted_speeds;
3521 	int requested_ls = 0;
3522 	int error = 0;
3523 
3524 	/* Read in new mode */
3525 	requested_ls = pf->advertised_speed;
3526 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3527 	if ((error) || (req->newptr == NULL))
3528 		return (error);
3529 
3530 	/* Error out if bits outside of possible flag range are set */
3531 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3532 		device_printf(dev, "Input advertised speed out of range; "
3533 		    "valid flags are: 0x%02x\n",
3534 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3535 		return (EINVAL);
3536 	}
3537 
3538 	/* Check if adapter supports input value */
3539 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3540 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3541 		device_printf(dev, "Invalid advertised speed; "
3542 		    "valid flags are: 0x%02x\n",
3543 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3544 		return (EINVAL);
3545 	}
3546 
3547 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3548 	if (error)
3549 		return (error);
3550 
3551 	pf->advertised_speed = requested_ls;
3552 	ixl_update_link_status(pf);
3553 	return (0);
3554 }
3555 
3556 /*
3557 ** Get the width and transaction speed of
3558 ** the bus this adapter is plugged into.
3559 */
3560 void
3561 ixl_get_bus_info(struct ixl_pf *pf)
3562 {
3563 	struct i40e_hw *hw = &pf->hw;
3564 	device_t dev = pf->dev;
3565         u16 link;
3566         u32 offset, num_ports;
3567 	u64 max_speed;
3568 
3569 	/* Some devices don't use PCIE */
3570 	if (hw->mac.type == I40E_MAC_X722)
3571 		return;
3572 
3573         /* Read PCI Express Capabilities Link Status Register */
3574         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3575         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3576 
3577 	/* Fill out hw struct with PCIE info */
3578 	i40e_set_pci_config_data(hw, link);
3579 
3580 	/* Use info to print out bandwidth messages */
3581         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3582             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3583             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3584             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3585             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3586             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3587             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3588             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3589             ("Unknown"));
3590 
3591 	/*
3592 	 * If adapter is in slot with maximum supported speed,
3593 	 * no warning message needs to be printed out.
3594 	 */
3595 	if (hw->bus.speed >= i40e_bus_speed_8000
3596 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3597 		return;
3598 
3599 	num_ports = bitcount32(hw->func_caps.valid_functions);
3600 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3601 
3602 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3603                 device_printf(dev, "PCI-Express bandwidth available"
3604                     " for this device may be insufficient for"
3605                     " optimal performance.\n");
3606                 device_printf(dev, "Please move the device to a different"
3607 		    " PCI-e link with more lanes and/or higher"
3608 		    " transfer rate.\n");
3609         }
3610 }
3611 
3612 static int
3613 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3614 {
3615 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3616 	struct i40e_hw	*hw = &pf->hw;
3617 	struct sbuf	*sbuf;
3618 
3619 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3620 	ixl_nvm_version_str(hw, sbuf);
3621 	sbuf_finish(sbuf);
3622 	sbuf_delete(sbuf);
3623 
3624 	return (0);
3625 }
3626 
3627 void
3628 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3629 {
3630 	if ((nvma->command == I40E_NVM_READ) &&
3631 	    ((nvma->config & 0xFF) == 0xF) &&
3632 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3633 	    (nvma->offset == 0) &&
3634 	    (nvma->data_size == 1)) {
3635 		// device_printf(dev, "- Get Driver Status Command\n");
3636 	}
3637 	else if (nvma->command == I40E_NVM_READ) {
3638 
3639 	}
3640 	else {
3641 		switch (nvma->command) {
3642 		case 0xB:
3643 			device_printf(dev, "- command: I40E_NVM_READ\n");
3644 			break;
3645 		case 0xC:
3646 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3647 			break;
3648 		default:
3649 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3650 			break;
3651 		}
3652 
3653 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3654 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3655 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3656 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3657 	}
3658 }
3659 
3660 int
3661 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3662 {
3663 	struct i40e_hw *hw = &pf->hw;
3664 	struct i40e_nvm_access *nvma;
3665 	device_t dev = pf->dev;
3666 	enum i40e_status_code status = 0;
3667 	size_t nvma_size, ifd_len, exp_len;
3668 	int err, perrno;
3669 
3670 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3671 
3672 	/* Sanity checks */
3673 	nvma_size = sizeof(struct i40e_nvm_access);
3674 	ifd_len = ifd->ifd_len;
3675 
3676 	if (ifd_len < nvma_size ||
3677 	    ifd->ifd_data == NULL) {
3678 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3679 		    __func__);
3680 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3681 		    __func__, ifd_len, nvma_size);
3682 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3683 		    ifd->ifd_data);
3684 		return (EINVAL);
3685 	}
3686 
3687 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3688 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3689 	if (err) {
3690 		device_printf(dev, "%s: Cannot get request from user space\n",
3691 		    __func__);
3692 		free(nvma, M_DEVBUF);
3693 		return (err);
3694 	}
3695 
3696 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3697 		ixl_print_nvm_cmd(dev, nvma);
3698 
3699 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3700 		int count = 0;
3701 		while (count++ < 100) {
3702 			i40e_msec_delay(100);
3703 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3704 				break;
3705 		}
3706 	}
3707 
3708 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3709 		free(nvma, M_DEVBUF);
3710 		return (-EBUSY);
3711 	}
3712 
3713 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3714 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3715 		    __func__);
3716 		free(nvma, M_DEVBUF);
3717 		return (EINVAL);
3718 	}
3719 
3720 	/*
3721 	 * Older versions of the NVM update tool don't set ifd_len to the size
3722 	 * of the entire buffer passed to the ioctl. Check the data_size field
3723 	 * in the contained i40e_nvm_access struct and ensure everything is
3724 	 * copied in from userspace.
3725 	 */
3726 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3727 
3728 	if (ifd_len < exp_len) {
3729 		ifd_len = exp_len;
3730 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3731 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3732 		if (err) {
3733 			device_printf(dev, "%s: Cannot get request from user space\n",
3734 					__func__);
3735 			free(nvma, M_DEVBUF);
3736 			return (err);
3737 		}
3738 	}
3739 
3740 	// TODO: Might need a different lock here
3741 	// IXL_PF_LOCK(pf);
3742 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3743 	// IXL_PF_UNLOCK(pf);
3744 
3745 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3746 	free(nvma, M_DEVBUF);
3747 	if (err) {
3748 		device_printf(dev, "%s: Cannot return data to user space\n",
3749 				__func__);
3750 		return (err);
3751 	}
3752 
3753 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3754 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3755 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3756 		    i40e_stat_str(hw, status), perrno);
3757 
3758 	/*
3759 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3760 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3761 	 */
3762 	if (perrno == -EPERM)
3763 		return (-EACCES);
3764 	else
3765 		return (perrno);
3766 }
3767 
3768 int
3769 ixl_find_i2c_interface(struct ixl_pf *pf)
3770 {
3771 	struct i40e_hw *hw = &pf->hw;
3772 	bool i2c_en, port_matched;
3773 	u32 reg;
3774 
3775 	for (int i = 0; i < 4; i++) {
3776 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3777 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3778 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3779 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3780 		    & BIT(hw->port);
3781 		if (i2c_en && port_matched)
3782 			return (i);
3783 	}
3784 
3785 	return (-1);
3786 }
3787 
3788 static char *
3789 ixl_phy_type_string(u32 bit_pos, bool ext)
3790 {
3791 	static char * phy_types_str[32] = {
3792 		"SGMII",
3793 		"1000BASE-KX",
3794 		"10GBASE-KX4",
3795 		"10GBASE-KR",
3796 		"40GBASE-KR4",
3797 		"XAUI",
3798 		"XFI",
3799 		"SFI",
3800 		"XLAUI",
3801 		"XLPPI",
3802 		"40GBASE-CR4",
3803 		"10GBASE-CR1",
3804 		"SFP+ Active DA",
3805 		"QSFP+ Active DA",
3806 		"Reserved (14)",
3807 		"Reserved (15)",
3808 		"Reserved (16)",
3809 		"100BASE-TX",
3810 		"1000BASE-T",
3811 		"10GBASE-T",
3812 		"10GBASE-SR",
3813 		"10GBASE-LR",
3814 		"10GBASE-SFP+Cu",
3815 		"10GBASE-CR1",
3816 		"40GBASE-CR4",
3817 		"40GBASE-SR4",
3818 		"40GBASE-LR4",
3819 		"1000BASE-SX",
3820 		"1000BASE-LX",
3821 		"1000BASE-T Optical",
3822 		"20GBASE-KR2",
3823 		"Reserved (31)"
3824 	};
3825 	static char * ext_phy_types_str[8] = {
3826 		"25GBASE-KR",
3827 		"25GBASE-CR",
3828 		"25GBASE-SR",
3829 		"25GBASE-LR",
3830 		"25GBASE-AOC",
3831 		"25GBASE-ACC",
3832 		"Reserved (6)",
3833 		"Reserved (7)"
3834 	};
3835 
3836 	if (ext && bit_pos > 7) return "Invalid_Ext";
3837 	if (bit_pos > 31) return "Invalid";
3838 
3839 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3840 }
3841 
3842 /* TODO: ERJ: I don't this is necessary anymore. */
3843 int
3844 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3845 {
3846 	device_t dev = pf->dev;
3847 	struct i40e_hw *hw = &pf->hw;
3848 	struct i40e_aq_desc desc;
3849 	enum i40e_status_code status;
3850 
3851 	struct i40e_aqc_get_link_status *aq_link_status =
3852 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3853 
3854 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3855 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3856 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3857 	if (status) {
3858 		device_printf(dev,
3859 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3860 		    __func__, i40e_stat_str(hw, status),
3861 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3862 		return (EIO);
3863 	}
3864 
3865 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3866 	return (0);
3867 }
3868 
3869 static char *
3870 ixl_phy_type_string_ls(u8 val)
3871 {
3872 	if (val >= 0x1F)
3873 		return ixl_phy_type_string(val - 0x1F, true);
3874 	else
3875 		return ixl_phy_type_string(val, false);
3876 }
3877 
3878 static int
3879 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3880 {
3881 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3882 	device_t dev = pf->dev;
3883 	struct sbuf *buf;
3884 	int error = 0;
3885 
3886 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3887 	if (!buf) {
3888 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3889 		return (ENOMEM);
3890 	}
3891 
3892 	struct i40e_aqc_get_link_status link_status;
3893 	error = ixl_aq_get_link_status(pf, &link_status);
3894 	if (error) {
3895 		sbuf_delete(buf);
3896 		return (error);
3897 	}
3898 
3899 	sbuf_printf(buf, "\n"
3900 	    "PHY Type : 0x%02x<%s>\n"
3901 	    "Speed    : 0x%02x\n"
3902 	    "Link info: 0x%02x\n"
3903 	    "AN info  : 0x%02x\n"
3904 	    "Ext info : 0x%02x\n"
3905 	    "Loopback : 0x%02x\n"
3906 	    "Max Frame: %d\n"
3907 	    "Config   : 0x%02x\n"
3908 	    "Power    : 0x%02x",
3909 	    link_status.phy_type,
3910 	    ixl_phy_type_string_ls(link_status.phy_type),
3911 	    link_status.link_speed,
3912 	    link_status.link_info,
3913 	    link_status.an_info,
3914 	    link_status.ext_info,
3915 	    link_status.loopback,
3916 	    link_status.max_frame_size,
3917 	    link_status.config,
3918 	    link_status.power_desc);
3919 
3920 	error = sbuf_finish(buf);
3921 	if (error)
3922 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3923 
3924 	sbuf_delete(buf);
3925 	return (error);
3926 }
3927 
3928 static int
3929 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3930 {
3931 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3932 	struct i40e_hw *hw = &pf->hw;
3933 	device_t dev = pf->dev;
3934 	enum i40e_status_code status;
3935 	struct i40e_aq_get_phy_abilities_resp abilities;
3936 	struct sbuf *buf;
3937 	int error = 0;
3938 
3939 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3940 	if (!buf) {
3941 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3942 		return (ENOMEM);
3943 	}
3944 
3945 	status = i40e_aq_get_phy_capabilities(hw,
3946 	    FALSE, FALSE, &abilities, NULL);
3947 	if (status) {
3948 		device_printf(dev,
3949 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3950 		    __func__, i40e_stat_str(hw, status),
3951 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3952 		sbuf_delete(buf);
3953 		return (EIO);
3954 	}
3955 
3956 	sbuf_printf(buf, "\n"
3957 	    "PHY Type : %08x",
3958 	    abilities.phy_type);
3959 
3960 	if (abilities.phy_type != 0) {
3961 		sbuf_printf(buf, "<");
3962 		for (int i = 0; i < 32; i++)
3963 			if ((1 << i) & abilities.phy_type)
3964 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3965 		sbuf_printf(buf, ">\n");
3966 	}
3967 
3968 	sbuf_printf(buf, "PHY Ext  : %02x",
3969 	    abilities.phy_type_ext);
3970 
3971 	if (abilities.phy_type_ext != 0) {
3972 		sbuf_printf(buf, "<");
3973 		for (int i = 0; i < 4; i++)
3974 			if ((1 << i) & abilities.phy_type_ext)
3975 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3976 		sbuf_printf(buf, ">");
3977 	}
3978 	sbuf_printf(buf, "\n");
3979 
3980 	sbuf_printf(buf,
3981 	    "Speed    : %02x\n"
3982 	    "Abilities: %02x\n"
3983 	    "EEE cap  : %04x\n"
3984 	    "EEER reg : %08x\n"
3985 	    "D3 Lpan  : %02x\n"
3986 	    "ID       : %02x %02x %02x %02x\n"
3987 	    "ModType  : %02x %02x %02x\n"
3988 	    "ModType E: %01x\n"
3989 	    "FEC Cfg  : %02x\n"
3990 	    "Ext CC   : %02x",
3991 	    abilities.link_speed,
3992 	    abilities.abilities, abilities.eee_capability,
3993 	    abilities.eeer_val, abilities.d3_lpan,
3994 	    abilities.phy_id[0], abilities.phy_id[1],
3995 	    abilities.phy_id[2], abilities.phy_id[3],
3996 	    abilities.module_type[0], abilities.module_type[1],
3997 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3998 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3999 	    abilities.ext_comp_code);
4000 
4001 	error = sbuf_finish(buf);
4002 	if (error)
4003 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4004 
4005 	sbuf_delete(buf);
4006 	return (error);
4007 }
4008 
4009 static int
4010 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4011 {
4012 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4013 	struct ixl_vsi *vsi = &pf->vsi;
4014 	struct ixl_mac_filter *f;
4015 	device_t dev = pf->dev;
4016 	int error = 0, ftl_len = 0, ftl_counter = 0;
4017 
4018 	struct sbuf *buf;
4019 
4020 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4021 	if (!buf) {
4022 		device_printf(dev, "Could not allocate sbuf for output.\n");
4023 		return (ENOMEM);
4024 	}
4025 
4026 	sbuf_printf(buf, "\n");
4027 
4028 	/* Print MAC filters */
4029 	sbuf_printf(buf, "PF Filters:\n");
4030 	SLIST_FOREACH(f, &vsi->ftl, next)
4031 		ftl_len++;
4032 
4033 	if (ftl_len < 1)
4034 		sbuf_printf(buf, "(none)\n");
4035 	else {
4036 		SLIST_FOREACH(f, &vsi->ftl, next) {
4037 			sbuf_printf(buf,
4038 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4039 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4040 			/* don't print '\n' for last entry */
4041 			if (++ftl_counter != ftl_len)
4042 				sbuf_printf(buf, "\n");
4043 		}
4044 	}
4045 
4046 #ifdef PCI_IOV
4047 	/* TODO: Give each VF its own filter list sysctl */
4048 	struct ixl_vf *vf;
4049 	if (pf->num_vfs > 0) {
4050 		sbuf_printf(buf, "\n\n");
4051 		for (int i = 0; i < pf->num_vfs; i++) {
4052 			vf = &pf->vfs[i];
4053 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4054 				continue;
4055 
4056 			vsi = &vf->vsi;
4057 			ftl_len = 0, ftl_counter = 0;
4058 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4059 			SLIST_FOREACH(f, &vsi->ftl, next)
4060 				ftl_len++;
4061 
4062 			if (ftl_len < 1)
4063 				sbuf_printf(buf, "(none)\n");
4064 			else {
4065 				SLIST_FOREACH(f, &vsi->ftl, next) {
4066 					sbuf_printf(buf,
4067 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4068 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4069 				}
4070 			}
4071 		}
4072 	}
4073 #endif
4074 
4075 	error = sbuf_finish(buf);
4076 	if (error)
4077 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4078 	sbuf_delete(buf);
4079 
4080 	return (error);
4081 }
4082 
4083 #define IXL_SW_RES_SIZE 0x14
4084 int
4085 ixl_res_alloc_cmp(const void *a, const void *b)
4086 {
4087 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4088 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4089 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4090 
4091 	return ((int)one->resource_type - (int)two->resource_type);
4092 }
4093 
4094 /*
4095  * Longest string length: 25
4096  */
4097 char *
4098 ixl_switch_res_type_string(u8 type)
4099 {
4100 	// TODO: This should be changed to static const
4101 	char * ixl_switch_res_type_strings[0x14] = {
4102 		"VEB",
4103 		"VSI",
4104 		"Perfect Match MAC address",
4105 		"S-tag",
4106 		"(Reserved)",
4107 		"Multicast hash entry",
4108 		"Unicast hash entry",
4109 		"VLAN",
4110 		"VSI List entry",
4111 		"(Reserved)",
4112 		"VLAN Statistic Pool",
4113 		"Mirror Rule",
4114 		"Queue Set",
4115 		"Inner VLAN Forward filter",
4116 		"(Reserved)",
4117 		"Inner MAC",
4118 		"IP",
4119 		"GRE/VN1 Key",
4120 		"VN2 Key",
4121 		"Tunneling Port"
4122 	};
4123 
4124 	if (type < 0x14)
4125 		return ixl_switch_res_type_strings[type];
4126 	else
4127 		return "(Reserved)";
4128 }
4129 
4130 static int
4131 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4132 {
4133 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4134 	struct i40e_hw *hw = &pf->hw;
4135 	device_t dev = pf->dev;
4136 	struct sbuf *buf;
4137 	enum i40e_status_code status;
4138 	int error = 0;
4139 
4140 	u8 num_entries;
4141 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4142 
4143 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4144 	if (!buf) {
4145 		device_printf(dev, "Could not allocate sbuf for output.\n");
4146 		return (ENOMEM);
4147 	}
4148 
4149 	bzero(resp, sizeof(resp));
4150 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4151 				resp,
4152 				IXL_SW_RES_SIZE,
4153 				NULL);
4154 	if (status) {
4155 		device_printf(dev,
4156 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4157 		    __func__, i40e_stat_str(hw, status),
4158 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4159 		sbuf_delete(buf);
4160 		return (error);
4161 	}
4162 
4163 	/* Sort entries by type for display */
4164 	qsort(resp, num_entries,
4165 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4166 	    &ixl_res_alloc_cmp);
4167 
4168 	sbuf_cat(buf, "\n");
4169 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4170 	sbuf_printf(buf,
4171 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4172 	    "                          | (this)     | (all) | (this) | (all)       \n");
4173 	for (int i = 0; i < num_entries; i++) {
4174 		sbuf_printf(buf,
4175 		    "%25s | %10d   %5d   %6d   %12d",
4176 		    ixl_switch_res_type_string(resp[i].resource_type),
4177 		    resp[i].guaranteed,
4178 		    resp[i].total,
4179 		    resp[i].used,
4180 		    resp[i].total_unalloced);
4181 		if (i < num_entries - 1)
4182 			sbuf_cat(buf, "\n");
4183 	}
4184 
4185 	error = sbuf_finish(buf);
4186 	if (error)
4187 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4188 
4189 	sbuf_delete(buf);
4190 	return (error);
4191 }
4192 
4193 /*
4194 ** Caller must init and delete sbuf; this function will clear and
4195 ** finish it for caller.
4196 */
4197 char *
4198 ixl_switch_element_string(struct sbuf *s,
4199     struct i40e_aqc_switch_config_element_resp *element)
4200 {
4201 	sbuf_clear(s);
4202 
4203 	switch (element->element_type) {
4204 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4205 		sbuf_printf(s, "MAC %3d", element->element_info);
4206 		break;
4207 	case I40E_AQ_SW_ELEM_TYPE_PF:
4208 		sbuf_printf(s, "PF  %3d", element->element_info);
4209 		break;
4210 	case I40E_AQ_SW_ELEM_TYPE_VF:
4211 		sbuf_printf(s, "VF  %3d", element->element_info);
4212 		break;
4213 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4214 		sbuf_cat(s, "EMP");
4215 		break;
4216 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4217 		sbuf_cat(s, "BMC");
4218 		break;
4219 	case I40E_AQ_SW_ELEM_TYPE_PV:
4220 		sbuf_cat(s, "PV");
4221 		break;
4222 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4223 		sbuf_cat(s, "VEB");
4224 		break;
4225 	case I40E_AQ_SW_ELEM_TYPE_PA:
4226 		sbuf_cat(s, "PA");
4227 		break;
4228 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4229 		sbuf_printf(s, "VSI %3d", element->element_info);
4230 		break;
4231 	default:
4232 		sbuf_cat(s, "?");
4233 		break;
4234 	}
4235 
4236 	sbuf_finish(s);
4237 	return sbuf_data(s);
4238 }
4239 
4240 static int
4241 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4242 {
4243 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4244 	struct i40e_hw *hw = &pf->hw;
4245 	device_t dev = pf->dev;
4246 	struct sbuf *buf;
4247 	struct sbuf *nmbuf;
4248 	enum i40e_status_code status;
4249 	int error = 0;
4250 	u16 next = 0;
4251 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4252 
4253 	struct i40e_aqc_get_switch_config_resp *sw_config;
4254 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4255 
4256 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4257 	if (!buf) {
4258 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4259 		return (ENOMEM);
4260 	}
4261 
4262 	status = i40e_aq_get_switch_config(hw, sw_config,
4263 	    sizeof(aq_buf), &next, NULL);
4264 	if (status) {
4265 		device_printf(dev,
4266 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4267 		    __func__, i40e_stat_str(hw, status),
4268 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4269 		sbuf_delete(buf);
4270 		return error;
4271 	}
4272 	if (next)
4273 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4274 		    __func__, next);
4275 
4276 	nmbuf = sbuf_new_auto();
4277 	if (!nmbuf) {
4278 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4279 		sbuf_delete(buf);
4280 		return (ENOMEM);
4281 	}
4282 
4283 	sbuf_cat(buf, "\n");
4284 	/* Assuming <= 255 elements in switch */
4285 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4286 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4287 	/* Exclude:
4288 	** Revision -- all elements are revision 1 for now
4289 	*/
4290 	sbuf_printf(buf,
4291 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4292 	    "                |          |          | (uplink)\n");
4293 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4294 		// "%4d (%8s) | %8s   %8s   %#8x",
4295 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4296 		sbuf_cat(buf, " ");
4297 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4298 		    &sw_config->element[i]));
4299 		sbuf_cat(buf, " | ");
4300 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4301 		sbuf_cat(buf, "   ");
4302 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4303 		sbuf_cat(buf, "   ");
4304 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4305 		if (i < sw_config->header.num_reported - 1)
4306 			sbuf_cat(buf, "\n");
4307 	}
4308 	sbuf_delete(nmbuf);
4309 
4310 	error = sbuf_finish(buf);
4311 	if (error)
4312 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4313 
4314 	sbuf_delete(buf);
4315 
4316 	return (error);
4317 }
4318 
4319 static int
4320 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4321 {
4322 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4323 	struct i40e_hw *hw = &pf->hw;
4324 	device_t dev = pf->dev;
4325 	struct sbuf *buf;
4326 	int error = 0;
4327 	enum i40e_status_code status;
4328 	u32 reg;
4329 
4330 	struct i40e_aqc_get_set_rss_key_data key_data;
4331 
4332 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4333 	if (!buf) {
4334 		device_printf(dev, "Could not allocate sbuf for output.\n");
4335 		return (ENOMEM);
4336 	}
4337 
4338 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4339 
4340 	sbuf_cat(buf, "\n");
4341 	if (hw->mac.type == I40E_MAC_X722) {
4342 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4343 		if (status)
4344 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4345 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4346 	} else {
4347 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4348 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4349 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4350 		}
4351 	}
4352 
4353 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4354 
4355 	error = sbuf_finish(buf);
4356 	if (error)
4357 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4358 	sbuf_delete(buf);
4359 
4360 	return (error);
4361 }
4362 
4363 static void
4364 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4365 {
4366 	int i, j, k, width;
4367 	char c;
4368 
4369 	if (length < 1 || buf == NULL) return;
4370 
4371 	int byte_stride = 16;
4372 	int lines = length / byte_stride;
4373 	int rem = length % byte_stride;
4374 	if (rem > 0)
4375 		lines++;
4376 
4377 	for (i = 0; i < lines; i++) {
4378 		width = (rem > 0 && i == lines - 1)
4379 		    ? rem : byte_stride;
4380 
4381 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4382 
4383 		for (j = 0; j < width; j++)
4384 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4385 
4386 		if (width < byte_stride) {
4387 			for (k = 0; k < (byte_stride - width); k++)
4388 				sbuf_printf(sb, "   ");
4389 		}
4390 
4391 		if (!text) {
4392 			sbuf_printf(sb, "\n");
4393 			continue;
4394 		}
4395 
4396 		for (j = 0; j < width; j++) {
4397 			c = (char)buf[i * byte_stride + j];
4398 			if (c < 32 || c > 126)
4399 				sbuf_printf(sb, ".");
4400 			else
4401 				sbuf_printf(sb, "%c", c);
4402 
4403 			if (j == width - 1)
4404 				sbuf_printf(sb, "\n");
4405 		}
4406 	}
4407 }
4408 
4409 static int
4410 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4411 {
4412 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4413 	struct i40e_hw *hw = &pf->hw;
4414 	device_t dev = pf->dev;
4415 	struct sbuf *buf;
4416 	int error = 0;
4417 	enum i40e_status_code status;
4418 	u8 hlut[512];
4419 	u32 reg;
4420 
4421 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4422 	if (!buf) {
4423 		device_printf(dev, "Could not allocate sbuf for output.\n");
4424 		return (ENOMEM);
4425 	}
4426 
4427 	bzero(hlut, sizeof(hlut));
4428 	sbuf_cat(buf, "\n");
4429 	if (hw->mac.type == I40E_MAC_X722) {
4430 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4431 		if (status)
4432 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4433 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4434 	} else {
4435 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4436 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4437 			bcopy(&reg, &hlut[i << 2], 4);
4438 		}
4439 	}
4440 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4441 
4442 	error = sbuf_finish(buf);
4443 	if (error)
4444 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4445 	sbuf_delete(buf);
4446 
4447 	return (error);
4448 }
4449 
4450 static int
4451 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4452 {
4453 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4454 	struct i40e_hw *hw = &pf->hw;
4455 	u64 hena;
4456 
4457 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4458 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4459 
4460 	return sysctl_handle_long(oidp, NULL, hena, req);
4461 }
4462 
4463 /*
4464  * Sysctl to disable firmware's link management
4465  *
4466  * 1 - Disable link management on this port
4467  * 0 - Re-enable link management
4468  *
4469  * On normal NVMs, firmware manages link by default.
4470  */
4471 static int
4472 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4473 {
4474 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4475 	struct i40e_hw *hw = &pf->hw;
4476 	device_t dev = pf->dev;
4477 	int requested_mode = -1;
4478 	enum i40e_status_code status = 0;
4479 	int error = 0;
4480 
4481 	/* Read in new mode */
4482 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4483 	if ((error) || (req->newptr == NULL))
4484 		return (error);
4485 	/* Check for sane value */
4486 	if (requested_mode < 0 || requested_mode > 1) {
4487 		device_printf(dev, "Valid modes are 0 or 1\n");
4488 		return (EINVAL);
4489 	}
4490 
4491 	/* Set new mode */
4492 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4493 	if (status) {
4494 		device_printf(dev,
4495 		    "%s: Error setting new phy debug mode %s,"
4496 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4497 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4498 		return (EIO);
4499 	}
4500 
4501 	return (0);
4502 }
4503 
4504 /*
4505  * Read some diagnostic data from an SFP module
4506  * Bytes 96-99, 102-105 from device address 0xA2
4507  */
4508 static int
4509 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4510 {
4511 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4512 	device_t dev = pf->dev;
4513 	struct sbuf *sbuf;
4514 	int error = 0;
4515 	u8 output;
4516 
4517 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4518 	if (error) {
4519 		device_printf(dev, "Error reading from i2c\n");
4520 		return (error);
4521 	}
4522 	if (output != 0x3) {
4523 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4524 		return (EIO);
4525 	}
4526 
4527 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4528 	if (!(output & 0x60)) {
4529 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4530 		return (EIO);
4531 	}
4532 
4533 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4534 
4535 	for (u8 offset = 96; offset < 100; offset++) {
4536 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4537 		sbuf_printf(sbuf, "%02X ", output);
4538 	}
4539 	for (u8 offset = 102; offset < 106; offset++) {
4540 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4541 		sbuf_printf(sbuf, "%02X ", output);
4542 	}
4543 
4544 	sbuf_finish(sbuf);
4545 	sbuf_delete(sbuf);
4546 
4547 	return (0);
4548 }
4549 
4550 /*
4551  * Sysctl to read a byte from I2C bus.
4552  *
4553  * Input: 32-bit value:
4554  * 	bits 0-7:   device address (0xA0 or 0xA2)
4555  * 	bits 8-15:  offset (0-255)
4556  *	bits 16-31: unused
4557  * Output: 8-bit value read
4558  */
4559 static int
4560 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4561 {
4562 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4563 	device_t dev = pf->dev;
4564 	int input = -1, error = 0;
4565 	u8 dev_addr, offset, output;
4566 
4567 	/* Read in I2C read parameters */
4568 	error = sysctl_handle_int(oidp, &input, 0, req);
4569 	if ((error) || (req->newptr == NULL))
4570 		return (error);
4571 	/* Validate device address */
4572 	dev_addr = input & 0xFF;
4573 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4574 		return (EINVAL);
4575 	}
4576 	offset = (input >> 8) & 0xFF;
4577 
4578 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4579 	if (error)
4580 		return (error);
4581 
4582 	device_printf(dev, "%02X\n", output);
4583 	return (0);
4584 }
4585 
4586 /*
4587  * Sysctl to write a byte to the I2C bus.
4588  *
4589  * Input: 32-bit value:
4590  * 	bits 0-7:   device address (0xA0 or 0xA2)
4591  * 	bits 8-15:  offset (0-255)
4592  *	bits 16-23: value to write
4593  *	bits 24-31: unused
4594  * Output: 8-bit value written
4595  */
4596 static int
4597 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4598 {
4599 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4600 	device_t dev = pf->dev;
4601 	int input = -1, error = 0;
4602 	u8 dev_addr, offset, value;
4603 
4604 	/* Read in I2C write parameters */
4605 	error = sysctl_handle_int(oidp, &input, 0, req);
4606 	if ((error) || (req->newptr == NULL))
4607 		return (error);
4608 	/* Validate device address */
4609 	dev_addr = input & 0xFF;
4610 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4611 		return (EINVAL);
4612 	}
4613 	offset = (input >> 8) & 0xFF;
4614 	value = (input >> 16) & 0xFF;
4615 
4616 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4617 	if (error)
4618 		return (error);
4619 
4620 	device_printf(dev, "%02X written\n", value);
4621 	return (0);
4622 }
4623 
4624 static int
4625 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4626     u8 bit_pos, int *is_set)
4627 {
4628 	device_t dev = pf->dev;
4629 	struct i40e_hw *hw = &pf->hw;
4630 	enum i40e_status_code status;
4631 
4632 	status = i40e_aq_get_phy_capabilities(hw,
4633 	    FALSE, FALSE, abilities, NULL);
4634 	if (status) {
4635 		device_printf(dev,
4636 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4637 		    __func__, i40e_stat_str(hw, status),
4638 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4639 		return (EIO);
4640 	}
4641 
4642 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4643 	return (0);
4644 }
4645 
4646 static int
4647 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4648     u8 bit_pos, int set)
4649 {
4650 	device_t dev = pf->dev;
4651 	struct i40e_hw *hw = &pf->hw;
4652 	struct i40e_aq_set_phy_config config;
4653 	enum i40e_status_code status;
4654 
4655 	/* Set new PHY config */
4656 	memset(&config, 0, sizeof(config));
4657 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4658 	if (set)
4659 		config.fec_config |= bit_pos;
4660 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4661 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4662 		config.phy_type = abilities->phy_type;
4663 		config.phy_type_ext = abilities->phy_type_ext;
4664 		config.link_speed = abilities->link_speed;
4665 		config.eee_capability = abilities->eee_capability;
4666 		config.eeer = abilities->eeer_val;
4667 		config.low_power_ctrl = abilities->d3_lpan;
4668 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4669 
4670 		if (status) {
4671 			device_printf(dev,
4672 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4673 			    __func__, i40e_stat_str(hw, status),
4674 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4675 			return (EIO);
4676 		}
4677 	}
4678 
4679 	return (0);
4680 }
4681 
4682 static int
4683 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4684 {
4685 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4686 	int mode, error = 0;
4687 
4688 	struct i40e_aq_get_phy_abilities_resp abilities;
4689 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4690 	if (error)
4691 		return (error);
4692 	/* Read in new mode */
4693 	error = sysctl_handle_int(oidp, &mode, 0, req);
4694 	if ((error) || (req->newptr == NULL))
4695 		return (error);
4696 
4697 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4698 }
4699 
4700 static int
4701 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4702 {
4703 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4704 	int mode, error = 0;
4705 
4706 	struct i40e_aq_get_phy_abilities_resp abilities;
4707 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4708 	if (error)
4709 		return (error);
4710 	/* Read in new mode */
4711 	error = sysctl_handle_int(oidp, &mode, 0, req);
4712 	if ((error) || (req->newptr == NULL))
4713 		return (error);
4714 
4715 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4716 }
4717 
4718 static int
4719 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4720 {
4721 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4722 	int mode, error = 0;
4723 
4724 	struct i40e_aq_get_phy_abilities_resp abilities;
4725 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4726 	if (error)
4727 		return (error);
4728 	/* Read in new mode */
4729 	error = sysctl_handle_int(oidp, &mode, 0, req);
4730 	if ((error) || (req->newptr == NULL))
4731 		return (error);
4732 
4733 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4734 }
4735 
4736 static int
4737 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4738 {
4739 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4740 	int mode, error = 0;
4741 
4742 	struct i40e_aq_get_phy_abilities_resp abilities;
4743 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4744 	if (error)
4745 		return (error);
4746 	/* Read in new mode */
4747 	error = sysctl_handle_int(oidp, &mode, 0, req);
4748 	if ((error) || (req->newptr == NULL))
4749 		return (error);
4750 
4751 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4752 }
4753 
4754 static int
4755 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4756 {
4757 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4758 	int mode, error = 0;
4759 
4760 	struct i40e_aq_get_phy_abilities_resp abilities;
4761 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4762 	if (error)
4763 		return (error);
4764 	/* Read in new mode */
4765 	error = sysctl_handle_int(oidp, &mode, 0, req);
4766 	if ((error) || (req->newptr == NULL))
4767 		return (error);
4768 
4769 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4770 }
4771 
4772 static int
4773 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4774 {
4775 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4776 	struct i40e_hw *hw = &pf->hw;
4777 	device_t dev = pf->dev;
4778 	struct sbuf *buf;
4779 	int error = 0;
4780 	enum i40e_status_code status;
4781 
4782 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4783 	if (!buf) {
4784 		device_printf(dev, "Could not allocate sbuf for output.\n");
4785 		return (ENOMEM);
4786 	}
4787 
4788 	u8 *final_buff;
4789 	/* This amount is only necessary if reading the entire cluster into memory */
4790 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4791 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4792 	if (final_buff == NULL) {
4793 		device_printf(dev, "Could not allocate memory for output.\n");
4794 		goto out;
4795 	}
4796 	int final_buff_len = 0;
4797 
4798 	u8 cluster_id = 1;
4799 	bool more = true;
4800 
4801 	u8 dump_buf[4096];
4802 	u16 curr_buff_size = 4096;
4803 	u8 curr_next_table = 0;
4804 	u32 curr_next_index = 0;
4805 
4806 	u16 ret_buff_size;
4807 	u8 ret_next_table;
4808 	u32 ret_next_index;
4809 
4810 	sbuf_cat(buf, "\n");
4811 
4812 	while (more) {
4813 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4814 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4815 		if (status) {
4816 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4817 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4818 			goto free_out;
4819 		}
4820 
4821 		/* copy info out of temp buffer */
4822 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4823 		final_buff_len += ret_buff_size;
4824 
4825 		if (ret_next_table != curr_next_table) {
4826 			/* We're done with the current table; we can dump out read data. */
4827 			sbuf_printf(buf, "%d:", curr_next_table);
4828 			int bytes_printed = 0;
4829 			while (bytes_printed <= final_buff_len) {
4830 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4831 				bytes_printed += 16;
4832 			}
4833 				sbuf_cat(buf, "\n");
4834 
4835 			/* The entire cluster has been read; we're finished */
4836 			if (ret_next_table == 0xFF)
4837 				break;
4838 
4839 			/* Otherwise clear the output buffer and continue reading */
4840 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4841 			final_buff_len = 0;
4842 		}
4843 
4844 		if (ret_next_index == 0xFFFFFFFF)
4845 			ret_next_index = 0;
4846 
4847 		bzero(dump_buf, sizeof(dump_buf));
4848 		curr_next_table = ret_next_table;
4849 		curr_next_index = ret_next_index;
4850 	}
4851 
4852 free_out:
4853 	free(final_buff, M_DEVBUF);
4854 out:
4855 	error = sbuf_finish(buf);
4856 	if (error)
4857 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4858 	sbuf_delete(buf);
4859 
4860 	return (error);
4861 }
4862 
4863 static int
4864 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4865 {
4866 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4867 	struct i40e_hw *hw = &pf->hw;
4868 	device_t dev = pf->dev;
4869 	int error = 0;
4870 	int state, new_state;
4871 	enum i40e_status_code status;
4872 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4873 
4874 	/* Read in new mode */
4875 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4876 	if ((error) || (req->newptr == NULL))
4877 		return (error);
4878 
4879 	/* Already in requested state */
4880 	if (new_state == state)
4881 		return (error);
4882 
4883 	if (new_state == 0) {
4884 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4885 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4886 			return (EINVAL);
4887 		}
4888 
4889 		if (pf->hw.aq.api_maj_ver < 1 ||
4890 		    (pf->hw.aq.api_maj_ver == 1 &&
4891 		    pf->hw.aq.api_min_ver < 7)) {
4892 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4893 			return (EINVAL);
4894 		}
4895 
4896 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4897 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4898 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4899 	} else {
4900 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4901 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4902 			device_printf(dev, "FW LLDP agent is already running\n");
4903 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4904 	}
4905 
4906 	return (0);
4907 }
4908 
4909 /*
4910  * Get FW LLDP Agent status
4911  */
4912 int
4913 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4914 {
4915 	enum i40e_status_code ret = I40E_SUCCESS;
4916 	struct i40e_lldp_variables lldp_cfg;
4917 	struct i40e_hw *hw = &pf->hw;
4918 	u8 adminstatus = 0;
4919 
4920 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4921 	if (ret)
4922 		return ret;
4923 
4924 	/* Get the LLDP AdminStatus for the current port */
4925 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4926 	adminstatus &= 0xf;
4927 
4928 	/* Check if LLDP agent is disabled */
4929 	if (!adminstatus) {
4930 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4931 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4932 	} else
4933 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4934 
4935 	return (0);
4936 }
4937 
4938 int
4939 ixl_attach_get_link_status(struct ixl_pf *pf)
4940 {
4941 	struct i40e_hw *hw = &pf->hw;
4942 	device_t dev = pf->dev;
4943 	int error = 0;
4944 
4945 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4946 	    (hw->aq.fw_maj_ver < 4)) {
4947 		i40e_msec_delay(75);
4948 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4949 		if (error) {
4950 			device_printf(dev, "link restart failed, aq_err=%d\n",
4951 			    pf->hw.aq.asq_last_status);
4952 			return error;
4953 		}
4954 	}
4955 
4956 	/* Determine link state */
4957 	hw->phy.get_link_info = TRUE;
4958 	i40e_get_link_status(hw, &pf->link_up);
4959 	return (0);
4960 }
4961 
4962 static int
4963 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4964 {
4965 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4966 	int requested = 0, error = 0;
4967 
4968 	/* Read in new mode */
4969 	error = sysctl_handle_int(oidp, &requested, 0, req);
4970 	if ((error) || (req->newptr == NULL))
4971 		return (error);
4972 
4973 	/* Initiate the PF reset later in the admin task */
4974 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4975 
4976 	return (error);
4977 }
4978 
4979 static int
4980 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4981 {
4982 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4983 	struct i40e_hw *hw = &pf->hw;
4984 	int requested = 0, error = 0;
4985 
4986 	/* Read in new mode */
4987 	error = sysctl_handle_int(oidp, &requested, 0, req);
4988 	if ((error) || (req->newptr == NULL))
4989 		return (error);
4990 
4991 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4992 
4993 	return (error);
4994 }
4995 
4996 static int
4997 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4998 {
4999 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5000 	struct i40e_hw *hw = &pf->hw;
5001 	int requested = 0, error = 0;
5002 
5003 	/* Read in new mode */
5004 	error = sysctl_handle_int(oidp, &requested, 0, req);
5005 	if ((error) || (req->newptr == NULL))
5006 		return (error);
5007 
5008 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5009 
5010 	return (error);
5011 }
5012 
5013 static int
5014 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5015 {
5016 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5017 	struct i40e_hw *hw = &pf->hw;
5018 	int requested = 0, error = 0;
5019 
5020 	/* Read in new mode */
5021 	error = sysctl_handle_int(oidp, &requested, 0, req);
5022 	if ((error) || (req->newptr == NULL))
5023 		return (error);
5024 
5025 	/* TODO: Find out how to bypass this */
5026 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5027 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5028 		error = EINVAL;
5029 	} else
5030 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5031 
5032 	return (error);
5033 }
5034 
5035 /*
5036  * Print out mapping of TX queue indexes and Rx queue indexes
5037  * to MSI-X vectors.
5038  */
5039 static int
5040 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5041 {
5042 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5043 	struct ixl_vsi *vsi = &pf->vsi;
5044 	device_t dev = pf->dev;
5045 	struct sbuf *buf;
5046 	int error = 0;
5047 
5048 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5049 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5050 
5051 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5052 	if (!buf) {
5053 		device_printf(dev, "Could not allocate sbuf for output.\n");
5054 		return (ENOMEM);
5055 	}
5056 
5057 	sbuf_cat(buf, "\n");
5058 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5059 		rx_que = &vsi->rx_queues[i];
5060 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5061 	}
5062 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5063 		tx_que = &vsi->tx_queues[i];
5064 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5065 	}
5066 
5067 	error = sbuf_finish(buf);
5068 	if (error)
5069 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5070 	sbuf_delete(buf);
5071 
5072 	return (error);
5073 }
5074