xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision b0d29bc47dba79f6f38e67eabadfb4b32ffd9390)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 static u_int
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
642 {
643 	struct ixl_vsi *vsi = arg;
644 
645 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
646 
647 	return (1);
648 }
649 
650 /*********************************************************************
651  * 	Filter Routines
652  *
653  *	Routines for multicast and vlan filter management.
654  *
655  *********************************************************************/
656 void
657 ixl_add_multi(struct ixl_vsi *vsi)
658 {
659 	struct ifnet		*ifp = vsi->ifp;
660 	struct i40e_hw		*hw = vsi->hw;
661 	int			mcnt = 0, flags;
662 
663 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
664 
665 	/*
666 	** First just get a count, to decide if we
667 	** we simply use multicast promiscuous.
668 	*/
669 	mcnt = if_llmaddr_count(ifp);
670 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 		/* delete existing MC filters */
672 		ixl_del_hw_filters(vsi, mcnt);
673 		i40e_aq_set_vsi_multicast_promiscuous(hw,
674 		    vsi->seid, TRUE, NULL);
675 		return;
676 	}
677 
678 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
679 	if (mcnt > 0) {
680 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 		ixl_add_hw_filters(vsi, flags, mcnt);
682 	}
683 
684 	IOCTL_DEBUGOUT("ixl_add_multi: end");
685 }
686 
687 static u_int
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
689 {
690 	struct ixl_mac_filter *f = arg;
691 
692 	if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
693 		return (1);
694 	else
695 		return (0);
696 }
697 
698 int
699 ixl_del_multi(struct ixl_vsi *vsi)
700 {
701 	struct ifnet		*ifp = vsi->ifp;
702 	struct ixl_mac_filter	*f;
703 	int			mcnt = 0;
704 
705 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
706 
707 	SLIST_FOREACH(f, &vsi->ftl, next)
708 		if ((f->flags & IXL_FILTER_USED) &&
709 		    (f->flags & IXL_FILTER_MC) &&
710 		    (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 			f->flags |= IXL_FILTER_DEL;
712 			mcnt++;
713 		}
714 
715 	if (mcnt > 0)
716 		ixl_del_hw_filters(vsi, mcnt);
717 
718 	return (mcnt);
719 }
720 
721 void
722 ixl_link_up_msg(struct ixl_pf *pf)
723 {
724 	struct i40e_hw *hw = &pf->hw;
725 	struct ifnet *ifp = pf->vsi.ifp;
726 	char *req_fec_string, *neg_fec_string;
727 	u8 fec_abilities;
728 
729 	fec_abilities = hw->phy.link_info.req_fec_info;
730 	/* If both RS and KR are requested, only show RS */
731 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 		req_fec_string = ixl_fec_string[0];
733 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 		req_fec_string = ixl_fec_string[1];
735 	else
736 		req_fec_string = ixl_fec_string[2];
737 
738 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 		neg_fec_string = ixl_fec_string[0];
740 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 		neg_fec_string = ixl_fec_string[1];
742 	else
743 		neg_fec_string = ixl_fec_string[2];
744 
745 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
746 	    ifp->if_xname,
747 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 	    req_fec_string, neg_fec_string,
749 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 		ixl_fc_string[1] : ixl_fc_string[0]);
755 }
756 
757 /*
758  * Configure admin queue/misc interrupt cause registers in hardware.
759  */
760 void
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
762 {
763 	struct i40e_hw *hw = &pf->hw;
764 	u32 reg;
765 
766 	/* First set up the adminq - vector 0 */
767 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
768 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
769 
770 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
772 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
779 
780 	/*
781 	 * 0x7FF is the end of the queue list.
782 	 * This means we won't use MSI-X vector 0 for a queue interrupt
783 	 * in MSI-X mode.
784 	 */
785 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
788 
789 	wr32(hw, I40E_PFINT_DYN_CTL0,
790 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
792 
793 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
794 }
795 
796 /*
797  * Configure queue interrupt cause registers in hardware.
798  *
799  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
800  */
801 void
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
803 {
804 	struct i40e_hw *hw = &pf->hw;
805 	struct ixl_vsi *vsi = &pf->vsi;
806 	u32		reg;
807 	u16		vector = 1;
808 
809 	// TODO: See if max is really necessary
810 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 		/* Make sure interrupt is disabled */
812 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 		/* Set linked list head to point to corresponding RX queue
814 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
820 
821 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 		wr32(hw, I40E_QINT_RQCTL(i), reg);
827 
828 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 		wr32(hw, I40E_QINT_TQCTL(i), reg);
834 	}
835 }
836 
837 /*
838  * Configure for single interrupt vector operation
839  */
840 void
841 ixl_configure_legacy(struct ixl_pf *pf)
842 {
843 	struct i40e_hw	*hw = &pf->hw;
844 	struct ixl_vsi	*vsi = &pf->vsi;
845 	u32 reg;
846 
847 // TODO: Fix
848 #if 0
849 	/* Configure ITR */
850 	vsi->tx_itr_setting = pf->tx_itr;
851 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 	    vsi->tx_itr_setting);
853 	txr->itr = vsi->tx_itr_setting;
854 
855 	vsi->rx_itr_setting = pf->rx_itr;
856 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 	    vsi->rx_itr_setting);
858 	rxr->itr = vsi->rx_itr_setting;
859 	/* XXX: Assuming only 1 queue in single interrupt mode */
860 #endif
861 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
862 
863 	/* Setup "other" causes */
864 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
867 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
872 	    ;
873 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
874 
875 	/* No ITR for non-queue interrupts */
876 	wr32(hw, I40E_PFINT_STAT_CTL0,
877 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
878 
879 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 	wr32(hw, I40E_PFINT_LNKLST0, 0);
881 
882 	/* Associate the queue pair to the vector and enable the q int */
883 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 	wr32(hw, I40E_QINT_RQCTL(0), reg);
887 
888 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 	wr32(hw, I40E_QINT_TQCTL(0), reg);
892 }
893 
894 void
895 ixl_free_pci_resources(struct ixl_pf *pf)
896 {
897 	struct ixl_vsi		*vsi = &pf->vsi;
898 	device_t		dev = iflib_get_dev(vsi->ctx);
899 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
900 
901 	/* We may get here before stations are set up */
902 	if (rx_que == NULL)
903 		goto early;
904 
905 	/*
906 	**  Release all MSI-X VSI resources:
907 	*/
908 	iflib_irq_free(vsi->ctx, &vsi->irq);
909 
910 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
912 early:
913 	if (pf->pci_mem != NULL)
914 		bus_release_resource(dev, SYS_RES_MEMORY,
915 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
916 }
917 
918 void
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
920 {
921 	/* Display supported media types */
922 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
924 
925 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
931 
932 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
954 
955 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
957 
958 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
972 
973 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
977 
978 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
990 }
991 
992 /*********************************************************************
993  *
994  *  Setup networking device structure and register an interface.
995  *
996  **********************************************************************/
997 int
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
999 {
1000 	struct ixl_vsi *vsi = &pf->vsi;
1001 	if_ctx_t ctx = vsi->ctx;
1002 	struct i40e_hw *hw = &pf->hw;
1003 	struct ifnet *ifp = iflib_get_ifp(ctx);
1004 	struct i40e_aq_get_phy_abilities_resp abilities;
1005 	enum i40e_status_code aq_error = 0;
1006 
1007 	INIT_DBG_DEV(dev, "begin");
1008 
1009 	vsi->shared->isc_max_frame_size =
1010 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 	    + ETHER_VLAN_ENCAP_LEN;
1012 
1013 	aq_error = i40e_aq_get_phy_capabilities(hw,
1014 	    FALSE, TRUE, &abilities, NULL);
1015 	/* May need delay to detect fiber correctly */
1016 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 		/* TODO: Maybe just retry this in a task... */
1018 		i40e_msec_delay(200);
1019 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 		    TRUE, &abilities, NULL);
1021 	}
1022 	if (aq_error) {
1023 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 			device_printf(dev, "Unknown PHY type detected!\n");
1025 		else
1026 			device_printf(dev,
1027 			    "Error getting supported media types, err %d,"
1028 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1029 	} else {
1030 		pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1033 #else
1034 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1035 #endif
1036 
1037 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1038 	}
1039 
1040 	/* Use autoselect media by default */
1041 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1043 
1044 	return (0);
1045 }
1046 
1047 /*
1048  * Input: bitmap of enum i40e_aq_link_speed
1049  */
1050 u64
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1052 {
1053 	if (link_speeds & I40E_LINK_SPEED_40GB)
1054 		return IF_Gbps(40);
1055 	if (link_speeds & I40E_LINK_SPEED_25GB)
1056 		return IF_Gbps(25);
1057 	if (link_speeds & I40E_LINK_SPEED_20GB)
1058 		return IF_Gbps(20);
1059 	if (link_speeds & I40E_LINK_SPEED_10GB)
1060 		return IF_Gbps(10);
1061 	if (link_speeds & I40E_LINK_SPEED_1GB)
1062 		return IF_Gbps(1);
1063 	if (link_speeds & I40E_LINK_SPEED_100MB)
1064 		return IF_Mbps(100);
1065 	else
1066 		/* Minimum supported link speed */
1067 		return IF_Mbps(100);
1068 }
1069 
1070 /*
1071 ** Run when the Admin Queue gets a link state change interrupt.
1072 */
1073 void
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1075 {
1076 	struct i40e_hw *hw = &pf->hw;
1077 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 	struct i40e_aqc_get_link_status *status =
1079 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1080 
1081 	/* Request link status from adapter */
1082 	hw->phy.get_link_info = TRUE;
1083 	i40e_get_link_status(hw, &pf->link_up);
1084 
1085 	/* Print out message if an unqualified module is found */
1086 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 	    (pf->advertised_speed) &&
1088 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1090 		device_printf(dev, "Link failed because "
1091 		    "an unqualified module was detected!\n");
1092 
1093 	/* OS link info is updated elsewhere */
1094 }
1095 
1096 /*********************************************************************
1097  *
1098  *  Get Firmware Switch configuration
1099  *	- this will need to be more robust when more complex
1100  *	  switch configurations are enabled.
1101  *
1102  **********************************************************************/
1103 int
1104 ixl_switch_config(struct ixl_pf *pf)
1105 {
1106 	struct i40e_hw	*hw = &pf->hw;
1107 	struct ixl_vsi	*vsi = &pf->vsi;
1108 	device_t 	dev = iflib_get_dev(vsi->ctx);
1109 	struct i40e_aqc_get_switch_config_resp *sw_config;
1110 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1111 	int	ret;
1112 	u16	next = 0;
1113 
1114 	memset(&aq_buf, 0, sizeof(aq_buf));
1115 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 	ret = i40e_aq_get_switch_config(hw, sw_config,
1117 	    sizeof(aq_buf), &next, NULL);
1118 	if (ret) {
1119 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1121 		return (ret);
1122 	}
1123 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1124 		device_printf(dev,
1125 		    "Switch config: header reported: %d in structure, %d total\n",
1126 		    LE16_TO_CPU(sw_config->header.num_reported),
1127 		    LE16_TO_CPU(sw_config->header.num_total));
1128 		for (int i = 0;
1129 		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
1130 			device_printf(dev,
1131 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1132 			    sw_config->element[i].element_type,
1133 			    LE16_TO_CPU(sw_config->element[i].seid),
1134 			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
1135 			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
1136 		}
1137 	}
1138 	/* Simplified due to a single VSI */
1139 	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
1140 	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
1141 	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
1142 	return (ret);
1143 }
1144 
1145 /*********************************************************************
1146  *
1147  *  Initialize the VSI:  this handles contexts, which means things
1148  *  			 like the number of descriptors, buffer size,
1149  *			 plus we init the rings thru this function.
1150  *
1151  **********************************************************************/
1152 int
1153 ixl_initialize_vsi(struct ixl_vsi *vsi)
1154 {
1155 	struct ixl_pf *pf = vsi->back;
1156 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1157 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1158 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1159 	device_t		dev = iflib_get_dev(vsi->ctx);
1160 	struct i40e_hw		*hw = vsi->hw;
1161 	struct i40e_vsi_context	ctxt;
1162 	int 			tc_queues;
1163 	int			err = 0;
1164 
1165 	memset(&ctxt, 0, sizeof(ctxt));
1166 	ctxt.seid = vsi->seid;
1167 	if (pf->veb_seid != 0)
1168 		ctxt.uplink_seid = pf->veb_seid;
1169 	ctxt.pf_num = hw->pf_id;
1170 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1171 	if (err) {
1172 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1173 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1174 		return (err);
1175 	}
1176 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1177 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1178 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1179 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1180 	    ctxt.uplink_seid, ctxt.vsi_number,
1181 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1182 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1183 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1184 	/*
1185 	** Set the queue and traffic class bits
1186 	**  - when multiple traffic classes are supported
1187 	**    this will need to be more robust.
1188 	*/
1189 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1190 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1191 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1192 	ctxt.info.queue_mapping[0] = 0;
1193 	/*
1194 	 * This VSI will only use traffic class 0; start traffic class 0's
1195 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1196 	 * the driver may not use all of them).
1197 	 */
1198 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1199 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1200 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1201 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1202 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1203 
1204 	/* Set VLAN receive stripping mode */
1205 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1206 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1207 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1208 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1209 	else
1210 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1211 
1212 #ifdef IXL_IW
1213 	/* Set TCP Enable for iWARP capable VSI */
1214 	if (ixl_enable_iwarp && pf->iw_enabled) {
1215 		ctxt.info.valid_sections |=
1216 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1217 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1218 	}
1219 #endif
1220 	/* Save VSI number and info for use later */
1221 	vsi->vsi_num = ctxt.vsi_number;
1222 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1223 
1224 	/* Reset VSI statistics */
1225 	ixl_vsi_reset_stats(vsi);
1226 	vsi->hw_filters_add = 0;
1227 	vsi->hw_filters_del = 0;
1228 
1229 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1230 
1231 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1232 	if (err) {
1233 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1234 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1235 		return (err);
1236 	}
1237 
1238 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1239 		struct tx_ring		*txr = &tx_que->txr;
1240 		struct i40e_hmc_obj_txq tctx;
1241 		u32			txctl;
1242 
1243 		/* Setup the HMC TX Context  */
1244 		bzero(&tctx, sizeof(tctx));
1245 		tctx.new_context = 1;
1246 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1247 		tctx.qlen = scctx->isc_ntxd[0];
1248 		tctx.fc_ena = 0;	/* Disable FCoE */
1249 		/*
1250 		 * This value needs to pulled from the VSI that this queue
1251 		 * is assigned to. Index into array is traffic class.
1252 		 */
1253 		tctx.rdylist = vsi->info.qs_handle[0];
1254 		/*
1255 		 * Set these to enable Head Writeback
1256 		 * - Address is last entry in TX ring (reserved for HWB index)
1257 		 * Leave these as 0 for Descriptor Writeback
1258 		 */
1259 		if (vsi->enable_head_writeback) {
1260 			tctx.head_wb_ena = 1;
1261 			tctx.head_wb_addr = txr->tx_paddr +
1262 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1263 		} else {
1264 			tctx.head_wb_ena = 0;
1265 			tctx.head_wb_addr = 0;
1266 		}
1267 		tctx.rdylist_act = 0;
1268 		err = i40e_clear_lan_tx_queue_context(hw, i);
1269 		if (err) {
1270 			device_printf(dev, "Unable to clear TX context\n");
1271 			break;
1272 		}
1273 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1274 		if (err) {
1275 			device_printf(dev, "Unable to set TX context\n");
1276 			break;
1277 		}
1278 		/* Associate the ring with this PF */
1279 		txctl = I40E_QTX_CTL_PF_QUEUE;
1280 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1281 		    I40E_QTX_CTL_PF_INDX_MASK);
1282 		wr32(hw, I40E_QTX_CTL(i), txctl);
1283 		ixl_flush(hw);
1284 
1285 		/* Do ring (re)init */
1286 		ixl_init_tx_ring(vsi, tx_que);
1287 	}
1288 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1289 		struct rx_ring 		*rxr = &rx_que->rxr;
1290 		struct i40e_hmc_obj_rxq rctx;
1291 
1292 		/* Next setup the HMC RX Context  */
1293 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1294 
1295 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1296 
1297 		/* Set up an RX context for the HMC */
1298 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1299 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1300 		/* ignore header split for now */
1301 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1302 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1303 		    scctx->isc_max_frame_size : max_rxmax;
1304 		rctx.dtype = 0;
1305 		rctx.dsize = 1;		/* do 32byte descriptors */
1306 		rctx.hsplit_0 = 0;	/* no header split */
1307 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1308 		rctx.qlen = scctx->isc_nrxd[0];
1309 		rctx.tphrdesc_ena = 1;
1310 		rctx.tphwdesc_ena = 1;
1311 		rctx.tphdata_ena = 0;	/* Header Split related */
1312 		rctx.tphhead_ena = 0;	/* Header Split related */
1313 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1314 		rctx.crcstrip = 1;
1315 		rctx.l2tsel = 1;
1316 		rctx.showiv = 1;	/* Strip inner VLAN header */
1317 		rctx.fc_ena = 0;	/* Disable FCoE */
1318 		rctx.prefena = 1;	/* Prefetch descriptors */
1319 
1320 		err = i40e_clear_lan_rx_queue_context(hw, i);
1321 		if (err) {
1322 			device_printf(dev,
1323 			    "Unable to clear RX context %d\n", i);
1324 			break;
1325 		}
1326 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1327 		if (err) {
1328 			device_printf(dev, "Unable to set RX context %d\n", i);
1329 			break;
1330 		}
1331 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1332 	}
1333 	return (err);
1334 }
1335 
1336 void
1337 ixl_free_mac_filters(struct ixl_vsi *vsi)
1338 {
1339 	struct ixl_mac_filter *f;
1340 
1341 	while (!SLIST_EMPTY(&vsi->ftl)) {
1342 		f = SLIST_FIRST(&vsi->ftl);
1343 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1344 		free(f, M_DEVBUF);
1345 	}
1346 }
1347 
1348 /*
1349 ** Provide a update to the queue RX
1350 ** interrupt moderation value.
1351 */
1352 void
1353 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1354 {
1355 	struct ixl_vsi	*vsi = que->vsi;
1356 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1357 	struct i40e_hw	*hw = vsi->hw;
1358 	struct rx_ring	*rxr = &que->rxr;
1359 	u16		rx_itr;
1360 	u16		rx_latency = 0;
1361 	int		rx_bytes;
1362 
1363 	/* Idle, do nothing */
1364 	if (rxr->bytes == 0)
1365 		return;
1366 
1367 	if (pf->dynamic_rx_itr) {
1368 		rx_bytes = rxr->bytes/rxr->itr;
1369 		rx_itr = rxr->itr;
1370 
1371 		/* Adjust latency range */
1372 		switch (rxr->latency) {
1373 		case IXL_LOW_LATENCY:
1374 			if (rx_bytes > 10) {
1375 				rx_latency = IXL_AVE_LATENCY;
1376 				rx_itr = IXL_ITR_20K;
1377 			}
1378 			break;
1379 		case IXL_AVE_LATENCY:
1380 			if (rx_bytes > 20) {
1381 				rx_latency = IXL_BULK_LATENCY;
1382 				rx_itr = IXL_ITR_8K;
1383 			} else if (rx_bytes <= 10) {
1384 				rx_latency = IXL_LOW_LATENCY;
1385 				rx_itr = IXL_ITR_100K;
1386 			}
1387 			break;
1388 		case IXL_BULK_LATENCY:
1389 			if (rx_bytes <= 20) {
1390 				rx_latency = IXL_AVE_LATENCY;
1391 				rx_itr = IXL_ITR_20K;
1392 			}
1393 			break;
1394        		 }
1395 
1396 		rxr->latency = rx_latency;
1397 
1398 		if (rx_itr != rxr->itr) {
1399 			/* do an exponential smoothing */
1400 			rx_itr = (10 * rx_itr * rxr->itr) /
1401 			    ((9 * rx_itr) + rxr->itr);
1402 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1403 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1404 			    rxr->me), rxr->itr);
1405 		}
1406 	} else { /* We may have have toggled to non-dynamic */
1407 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1408 			vsi->rx_itr_setting = pf->rx_itr;
1409 		/* Update the hardware if needed */
1410 		if (rxr->itr != vsi->rx_itr_setting) {
1411 			rxr->itr = vsi->rx_itr_setting;
1412 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1413 			    rxr->me), rxr->itr);
1414 		}
1415 	}
1416 	rxr->bytes = 0;
1417 	rxr->packets = 0;
1418 }
1419 
1420 
1421 /*
1422 ** Provide a update to the queue TX
1423 ** interrupt moderation value.
1424 */
1425 void
1426 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1427 {
1428 	struct ixl_vsi	*vsi = que->vsi;
1429 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1430 	struct i40e_hw	*hw = vsi->hw;
1431 	struct tx_ring	*txr = &que->txr;
1432 	u16		tx_itr;
1433 	u16		tx_latency = 0;
1434 	int		tx_bytes;
1435 
1436 
1437 	/* Idle, do nothing */
1438 	if (txr->bytes == 0)
1439 		return;
1440 
1441 	if (pf->dynamic_tx_itr) {
1442 		tx_bytes = txr->bytes/txr->itr;
1443 		tx_itr = txr->itr;
1444 
1445 		switch (txr->latency) {
1446 		case IXL_LOW_LATENCY:
1447 			if (tx_bytes > 10) {
1448 				tx_latency = IXL_AVE_LATENCY;
1449 				tx_itr = IXL_ITR_20K;
1450 			}
1451 			break;
1452 		case IXL_AVE_LATENCY:
1453 			if (tx_bytes > 20) {
1454 				tx_latency = IXL_BULK_LATENCY;
1455 				tx_itr = IXL_ITR_8K;
1456 			} else if (tx_bytes <= 10) {
1457 				tx_latency = IXL_LOW_LATENCY;
1458 				tx_itr = IXL_ITR_100K;
1459 			}
1460 			break;
1461 		case IXL_BULK_LATENCY:
1462 			if (tx_bytes <= 20) {
1463 				tx_latency = IXL_AVE_LATENCY;
1464 				tx_itr = IXL_ITR_20K;
1465 			}
1466 			break;
1467 		}
1468 
1469 		txr->latency = tx_latency;
1470 
1471 		if (tx_itr != txr->itr) {
1472        	         /* do an exponential smoothing */
1473 			tx_itr = (10 * tx_itr * txr->itr) /
1474 			    ((9 * tx_itr) + txr->itr);
1475 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1476 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1477 			    txr->me), txr->itr);
1478 		}
1479 
1480 	} else { /* We may have have toggled to non-dynamic */
1481 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1482 			vsi->tx_itr_setting = pf->tx_itr;
1483 		/* Update the hardware if needed */
1484 		if (txr->itr != vsi->tx_itr_setting) {
1485 			txr->itr = vsi->tx_itr_setting;
1486 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1487 			    txr->me), txr->itr);
1488 		}
1489 	}
1490 	txr->bytes = 0;
1491 	txr->packets = 0;
1492 	return;
1493 }
1494 
1495 #ifdef IXL_DEBUG
1496 /**
1497  * ixl_sysctl_qtx_tail_handler
1498  * Retrieves I40E_QTX_TAIL value from hardware
1499  * for a sysctl.
1500  */
1501 int
1502 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1503 {
1504 	struct ixl_tx_queue *tx_que;
1505 	int error;
1506 	u32 val;
1507 
1508 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1509 	if (!tx_que) return 0;
1510 
1511 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1512 	error = sysctl_handle_int(oidp, &val, 0, req);
1513 	if (error || !req->newptr)
1514 		return error;
1515 	return (0);
1516 }
1517 
1518 /**
1519  * ixl_sysctl_qrx_tail_handler
1520  * Retrieves I40E_QRX_TAIL value from hardware
1521  * for a sysctl.
1522  */
1523 int
1524 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1525 {
1526 	struct ixl_rx_queue *rx_que;
1527 	int error;
1528 	u32 val;
1529 
1530 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1531 	if (!rx_que) return 0;
1532 
1533 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1534 	error = sysctl_handle_int(oidp, &val, 0, req);
1535 	if (error || !req->newptr)
1536 		return error;
1537 	return (0);
1538 }
1539 #endif
1540 
1541 /*
1542  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1543  * Writes to the ITR registers immediately.
1544  */
1545 static int
1546 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1547 {
1548 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1549 	device_t dev = pf->dev;
1550 	int error = 0;
1551 	int requested_tx_itr;
1552 
1553 	requested_tx_itr = pf->tx_itr;
1554 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1555 	if ((error) || (req->newptr == NULL))
1556 		return (error);
1557 	if (pf->dynamic_tx_itr) {
1558 		device_printf(dev,
1559 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1560 		    return (EINVAL);
1561 	}
1562 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1563 		device_printf(dev,
1564 		    "Invalid TX itr value; value must be between 0 and %d\n",
1565 		        IXL_MAX_ITR);
1566 		return (EINVAL);
1567 	}
1568 
1569 	pf->tx_itr = requested_tx_itr;
1570 	ixl_configure_tx_itr(pf);
1571 
1572 	return (error);
1573 }
1574 
1575 /*
1576  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1577  * Writes to the ITR registers immediately.
1578  */
1579 static int
1580 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1581 {
1582 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1583 	device_t dev = pf->dev;
1584 	int error = 0;
1585 	int requested_rx_itr;
1586 
1587 	requested_rx_itr = pf->rx_itr;
1588 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1589 	if ((error) || (req->newptr == NULL))
1590 		return (error);
1591 	if (pf->dynamic_rx_itr) {
1592 		device_printf(dev,
1593 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1594 		    return (EINVAL);
1595 	}
1596 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1597 		device_printf(dev,
1598 		    "Invalid RX itr value; value must be between 0 and %d\n",
1599 		        IXL_MAX_ITR);
1600 		return (EINVAL);
1601 	}
1602 
1603 	pf->rx_itr = requested_rx_itr;
1604 	ixl_configure_rx_itr(pf);
1605 
1606 	return (error);
1607 }
1608 
1609 void
1610 ixl_add_hw_stats(struct ixl_pf *pf)
1611 {
1612 	struct ixl_vsi *vsi = &pf->vsi;
1613 	device_t dev = iflib_get_dev(vsi->ctx);
1614 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1615 
1616 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1617 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1618 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1619 
1620 	/* Driver statistics */
1621 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1622 			CTLFLAG_RD, &pf->admin_irq,
1623 			"Admin Queue IRQs received");
1624 
1625 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1626 
1627 	ixl_add_queues_sysctls(dev, vsi);
1628 
1629 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1630 }
1631 
1632 void
1633 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1634 	struct sysctl_oid_list *child,
1635 	struct i40e_hw_port_stats *stats)
1636 {
1637 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1638 	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1639 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1640 
1641 	struct i40e_eth_stats *eth_stats = &stats->eth;
1642 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1643 
1644 	struct ixl_sysctl_info ctls[] =
1645 	{
1646 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1647 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1648 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1649 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1650 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1651 		/* Packet Reception Stats */
1652 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1653 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1654 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1655 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1656 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1657 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1658 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1659 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1660 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1661 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1662 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1663 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1664 		/* Packet Transmission Stats */
1665 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1666 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1667 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1668 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1669 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1670 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1671 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1672 		/* Flow control */
1673 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1674 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1675 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1676 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1677 		/* End */
1678 		{0,0,0}
1679 	};
1680 
1681 	struct ixl_sysctl_info *entry = ctls;
1682 	while (entry->stat != 0)
1683 	{
1684 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1685 				CTLFLAG_RD, entry->stat,
1686 				entry->description);
1687 		entry++;
1688 	}
1689 }
1690 
1691 void
1692 ixl_set_rss_key(struct ixl_pf *pf)
1693 {
1694 	struct i40e_hw *hw = &pf->hw;
1695 	struct ixl_vsi *vsi = &pf->vsi;
1696 	device_t	dev = pf->dev;
1697 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1698 	enum i40e_status_code status;
1699 
1700 #ifdef RSS
1701         /* Fetch the configured RSS key */
1702         rss_getkey((uint8_t *) &rss_seed);
1703 #else
1704 	ixl_get_default_rss_key(rss_seed);
1705 #endif
1706 	/* Fill out hash function seed */
1707 	if (hw->mac.type == I40E_MAC_X722) {
1708 		struct i40e_aqc_get_set_rss_key_data key_data;
1709 		bcopy(rss_seed, &key_data, 52);
1710 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1711 		if (status)
1712 			device_printf(dev,
1713 			    "i40e_aq_set_rss_key status %s, error %s\n",
1714 			    i40e_stat_str(hw, status),
1715 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1716 	} else {
1717 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1718 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1719 	}
1720 }
1721 
1722 /*
1723  * Configure enabled PCTYPES for RSS.
1724  */
1725 void
1726 ixl_set_rss_pctypes(struct ixl_pf *pf)
1727 {
1728 	struct i40e_hw *hw = &pf->hw;
1729 	u64		set_hena = 0, hena;
1730 
1731 #ifdef RSS
1732 	u32		rss_hash_config;
1733 
1734 	rss_hash_config = rss_gethashconfig();
1735 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1736                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1737 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1738                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1739 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1740                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1741 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1742                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1743 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1744 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1745 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1746                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1747         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1748                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1749 #else
1750 	if (hw->mac.type == I40E_MAC_X722)
1751 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1752 	else
1753 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1754 #endif
1755 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1756 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1757 	hena |= set_hena;
1758 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1759 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1760 
1761 }
1762 
1763 void
1764 ixl_set_rss_hlut(struct ixl_pf *pf)
1765 {
1766 	struct i40e_hw	*hw = &pf->hw;
1767 	struct ixl_vsi *vsi = &pf->vsi;
1768 	device_t	dev = iflib_get_dev(vsi->ctx);
1769 	int		i, que_id;
1770 	int		lut_entry_width;
1771 	u32		lut = 0;
1772 	enum i40e_status_code status;
1773 
1774 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1775 
1776 	/* Populate the LUT with max no. of queues in round robin fashion */
1777 	u8 hlut_buf[512];
1778 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1779 #ifdef RSS
1780 		/*
1781 		 * Fetch the RSS bucket id for the given indirection entry.
1782 		 * Cap it at the number of configured buckets (which is
1783 		 * num_queues.)
1784 		 */
1785 		que_id = rss_get_indirection_to_bucket(i);
1786 		que_id = que_id % vsi->num_rx_queues;
1787 #else
1788 		que_id = i % vsi->num_rx_queues;
1789 #endif
1790 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1791 		hlut_buf[i] = lut;
1792 	}
1793 
1794 	if (hw->mac.type == I40E_MAC_X722) {
1795 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1796 		if (status)
1797 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1798 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1799 	} else {
1800 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1801 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1802 		ixl_flush(hw);
1803 	}
1804 }
1805 
1806 /*
1807 ** Setup the PF's RSS parameters.
1808 */
1809 void
1810 ixl_config_rss(struct ixl_pf *pf)
1811 {
1812 	ixl_set_rss_key(pf);
1813 	ixl_set_rss_pctypes(pf);
1814 	ixl_set_rss_hlut(pf);
1815 }
1816 
1817 /*
1818 ** This routine updates vlan filters, called by init
1819 ** it scans the filter table and then updates the hw
1820 ** after a soft reset.
1821 */
1822 void
1823 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1824 {
1825 	struct ixl_mac_filter	*f;
1826 	int			cnt = 0, flags;
1827 
1828 	if (vsi->num_vlans == 0)
1829 		return;
1830 	/*
1831 	** Scan the filter list for vlan entries,
1832 	** mark them for addition and then call
1833 	** for the AQ update.
1834 	*/
1835 	SLIST_FOREACH(f, &vsi->ftl, next) {
1836 		if (f->flags & IXL_FILTER_VLAN) {
1837 			f->flags |=
1838 			    (IXL_FILTER_ADD |
1839 			    IXL_FILTER_USED);
1840 			cnt++;
1841 		}
1842 	}
1843 	if (cnt == 0) {
1844 		printf("setup vlan: no filters found!\n");
1845 		return;
1846 	}
1847 	flags = IXL_FILTER_VLAN;
1848 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1849 	ixl_add_hw_filters(vsi, flags, cnt);
1850 }
1851 
1852 /*
1853  * In some firmware versions there is default MAC/VLAN filter
1854  * configured which interferes with filters managed by driver.
1855  * Make sure it's removed.
1856  */
1857 void
1858 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1859 {
1860 	struct i40e_aqc_remove_macvlan_element_data e;
1861 
1862 	bzero(&e, sizeof(e));
1863 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1864 	e.vlan_tag = 0;
1865 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1866 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1867 
1868 	bzero(&e, sizeof(e));
1869 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1870 	e.vlan_tag = 0;
1871 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1872 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1873 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1874 }
1875 
1876 /*
1877 ** Initialize filter list and add filters that the hardware
1878 ** needs to know about.
1879 **
1880 ** Requires VSI's filter list & seid to be set before calling.
1881 */
1882 void
1883 ixl_init_filters(struct ixl_vsi *vsi)
1884 {
1885 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1886 
1887 	/* Initialize mac filter list for VSI */
1888 	SLIST_INIT(&vsi->ftl);
1889 
1890 	/* Receive broadcast Ethernet frames */
1891 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1892 
1893 	ixl_del_default_hw_filters(vsi);
1894 
1895 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1896 	/*
1897 	 * Prevent Tx flow control frames from being sent out by
1898 	 * non-firmware transmitters.
1899 	 * This affects every VSI in the PF.
1900 	 */
1901 	if (pf->enable_tx_fc_filter)
1902 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1903 }
1904 
1905 /*
1906 ** This routine adds mulicast filters
1907 */
1908 void
1909 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1910 {
1911 	struct ixl_mac_filter *f;
1912 
1913 	/* Does one already exist */
1914 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1915 	if (f != NULL)
1916 		return;
1917 
1918 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1919 	if (f != NULL)
1920 		f->flags |= IXL_FILTER_MC;
1921 	else
1922 		printf("WARNING: no filter available!!\n");
1923 }
1924 
1925 void
1926 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1927 {
1928 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1929 }
1930 
1931 /*
1932  * This routine adds a MAC/VLAN filter to the software filter
1933  * list, then adds that new filter to the HW if it doesn't already
1934  * exist in the SW filter list.
1935  */
1936 void
1937 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1938 {
1939 	struct ixl_mac_filter	*f, *tmp;
1940 	struct ixl_pf		*pf;
1941 	device_t		dev;
1942 
1943 	DEBUGOUT("ixl_add_filter: begin");
1944 
1945 	pf = vsi->back;
1946 	dev = pf->dev;
1947 
1948 	/* Does one already exist */
1949 	f = ixl_find_filter(vsi, macaddr, vlan);
1950 	if (f != NULL)
1951 		return;
1952 	/*
1953 	** Is this the first vlan being registered, if so we
1954 	** need to remove the ANY filter that indicates we are
1955 	** not in a vlan, and replace that with a 0 filter.
1956 	*/
1957 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1958 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 		if (tmp != NULL) {
1960 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1961 			ixl_add_filter(vsi, macaddr, 0);
1962 		}
1963 	}
1964 
1965 	f = ixl_new_filter(vsi, macaddr, vlan);
1966 	if (f == NULL) {
1967 		device_printf(dev, "WARNING: no filter available!!\n");
1968 		return;
1969 	}
1970 	if (f->vlan != IXL_VLAN_ANY)
1971 		f->flags |= IXL_FILTER_VLAN;
1972 	else
1973 		vsi->num_macs++;
1974 
1975 	f->flags |= IXL_FILTER_USED;
1976 	ixl_add_hw_filters(vsi, f->flags, 1);
1977 }
1978 
1979 void
1980 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1981 {
1982 	struct ixl_mac_filter *f;
1983 
1984 	f = ixl_find_filter(vsi, macaddr, vlan);
1985 	if (f == NULL)
1986 		return;
1987 
1988 	f->flags |= IXL_FILTER_DEL;
1989 	ixl_del_hw_filters(vsi, 1);
1990 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1991 		vsi->num_macs--;
1992 
1993 	/* Check if this is the last vlan removal */
1994 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1995 		/* Switch back to a non-vlan filter */
1996 		ixl_del_filter(vsi, macaddr, 0);
1997 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1998 	}
1999 	return;
2000 }
2001 
2002 /*
2003 ** Find the filter with both matching mac addr and vlan id
2004 */
2005 struct ixl_mac_filter *
2006 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2007 {
2008 	struct ixl_mac_filter	*f;
2009 
2010 	SLIST_FOREACH(f, &vsi->ftl, next) {
2011 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2012 		    && (f->vlan == vlan)) {
2013 			return (f);
2014 		}
2015 	}
2016 
2017 	return (NULL);
2018 }
2019 
2020 /*
2021 ** This routine takes additions to the vsi filter
2022 ** table and creates an Admin Queue call to create
2023 ** the filters in the hardware.
2024 */
2025 void
2026 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2027 {
2028 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2029 	struct ixl_mac_filter	*f;
2030 	struct ixl_pf		*pf;
2031 	struct i40e_hw		*hw;
2032 	device_t		dev;
2033 	enum i40e_status_code	status;
2034 	int			j = 0;
2035 
2036 	pf = vsi->back;
2037 	dev = vsi->dev;
2038 	hw = &pf->hw;
2039 
2040 	if (cnt < 1) {
2041 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2042 		return;
2043 	}
2044 
2045 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2046 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2047 	if (a == NULL) {
2048 		device_printf(dev, "add_hw_filters failed to get memory\n");
2049 		return;
2050 	}
2051 
2052 	/*
2053 	** Scan the filter list, each time we find one
2054 	** we add it to the admin queue array and turn off
2055 	** the add bit.
2056 	*/
2057 	SLIST_FOREACH(f, &vsi->ftl, next) {
2058 		if ((f->flags & flags) == flags) {
2059 			b = &a[j]; // a pox on fvl long names :)
2060 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2061 			if (f->vlan == IXL_VLAN_ANY) {
2062 				b->vlan_tag = 0;
2063 				b->flags = CPU_TO_LE16(
2064 				    I40E_AQC_MACVLAN_ADD_IGNORE_VLAN);
2065 			} else {
2066 				b->vlan_tag = CPU_TO_LE16(f->vlan);
2067 				b->flags = 0;
2068 			}
2069 			b->flags |= CPU_TO_LE16(
2070 			    I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2071 			f->flags &= ~IXL_FILTER_ADD;
2072 			j++;
2073 
2074 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2075 			    MAC_FORMAT_ARGS(f->macaddr));
2076 		}
2077 		if (j == cnt)
2078 			break;
2079 	}
2080 	if (j > 0) {
2081 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2082 		if (status)
2083 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2084 			    "error %s\n", i40e_stat_str(hw, status),
2085 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2086 		else
2087 			vsi->hw_filters_add += j;
2088 	}
2089 	free(a, M_DEVBUF);
2090 	return;
2091 }
2092 
2093 /*
2094 ** This routine takes removals in the vsi filter
2095 ** table and creates an Admin Queue call to delete
2096 ** the filters in the hardware.
2097 */
2098 void
2099 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2100 {
2101 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2102 	struct ixl_pf		*pf;
2103 	struct i40e_hw		*hw;
2104 	device_t		dev;
2105 	struct ixl_mac_filter	*f, *f_temp;
2106 	enum i40e_status_code	status;
2107 	int			j = 0;
2108 
2109 	pf = vsi->back;
2110 	hw = &pf->hw;
2111 	dev = vsi->dev;
2112 
2113 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2114 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2115 	if (d == NULL) {
2116 		device_printf(dev, "%s: failed to get memory\n", __func__);
2117 		return;
2118 	}
2119 
2120 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2121 		if (f->flags & IXL_FILTER_DEL) {
2122 			e = &d[j]; // a pox on fvl long names :)
2123 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2124 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2125 			if (f->vlan == IXL_VLAN_ANY) {
2126 				e->vlan_tag = 0;
2127 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2128 			} else {
2129 				e->vlan_tag = f->vlan;
2130 			}
2131 
2132 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2133 			    MAC_FORMAT_ARGS(f->macaddr));
2134 
2135 			/* delete entry from vsi list */
2136 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2137 			free(f, M_DEVBUF);
2138 			j++;
2139 		}
2140 		if (j == cnt)
2141 			break;
2142 	}
2143 	if (j > 0) {
2144 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2145 		if (status) {
2146 			int sc = 0;
2147 			for (int i = 0; i < j; i++)
2148 				sc += (!d[i].error_code);
2149 			vsi->hw_filters_del += sc;
2150 			device_printf(dev,
2151 			    "Failed to remove %d/%d filters, error %s\n",
2152 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2153 		} else
2154 			vsi->hw_filters_del += j;
2155 	}
2156 	free(d, M_DEVBUF);
2157 	return;
2158 }
2159 
2160 int
2161 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2162 {
2163 	struct i40e_hw	*hw = &pf->hw;
2164 	int		error = 0;
2165 	u32		reg;
2166 	u16		pf_qidx;
2167 
2168 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2169 
2170 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2171 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2172 	    pf_qidx, vsi_qidx);
2173 
2174 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2175 
2176 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2177 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2178 	    I40E_QTX_ENA_QENA_STAT_MASK;
2179 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2180 	/* Verify the enable took */
2181 	for (int j = 0; j < 10; j++) {
2182 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2183 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2184 			break;
2185 		i40e_usec_delay(10);
2186 	}
2187 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2188 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2189 		    pf_qidx);
2190 		error = ETIMEDOUT;
2191 	}
2192 
2193 	return (error);
2194 }
2195 
2196 int
2197 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2198 {
2199 	struct i40e_hw	*hw = &pf->hw;
2200 	int		error = 0;
2201 	u32		reg;
2202 	u16		pf_qidx;
2203 
2204 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2205 
2206 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2207 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2208 	    pf_qidx, vsi_qidx);
2209 
2210 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2211 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2212 	    I40E_QRX_ENA_QENA_STAT_MASK;
2213 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2214 	/* Verify the enable took */
2215 	for (int j = 0; j < 10; j++) {
2216 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2217 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2218 			break;
2219 		i40e_usec_delay(10);
2220 	}
2221 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2222 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2223 		    pf_qidx);
2224 		error = ETIMEDOUT;
2225 	}
2226 
2227 	return (error);
2228 }
2229 
2230 int
2231 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2232 {
2233 	int error = 0;
2234 
2235 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2236 	/* Called function already prints error message */
2237 	if (error)
2238 		return (error);
2239 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2240 	return (error);
2241 }
2242 
2243 /* For PF VSI only */
2244 int
2245 ixl_enable_rings(struct ixl_vsi *vsi)
2246 {
2247 	struct ixl_pf	*pf = vsi->back;
2248 	int		error = 0;
2249 
2250 	for (int i = 0; i < vsi->num_tx_queues; i++)
2251 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2252 
2253 	for (int i = 0; i < vsi->num_rx_queues; i++)
2254 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2255 
2256 	return (error);
2257 }
2258 
2259 /*
2260  * Returns error on first ring that is detected hung.
2261  */
2262 int
2263 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2264 {
2265 	struct i40e_hw	*hw = &pf->hw;
2266 	int		error = 0;
2267 	u32		reg;
2268 	u16		pf_qidx;
2269 
2270 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2271 
2272 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2273 	i40e_usec_delay(500);
2274 
2275 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2276 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2277 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2278 	/* Verify the disable took */
2279 	for (int j = 0; j < 10; j++) {
2280 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2281 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2282 			break;
2283 		i40e_msec_delay(10);
2284 	}
2285 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2286 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2287 		    pf_qidx);
2288 		error = ETIMEDOUT;
2289 	}
2290 
2291 	return (error);
2292 }
2293 
2294 /*
2295  * Returns error on first ring that is detected hung.
2296  */
2297 int
2298 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2299 {
2300 	struct i40e_hw	*hw = &pf->hw;
2301 	int		error = 0;
2302 	u32		reg;
2303 	u16		pf_qidx;
2304 
2305 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2306 
2307 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2308 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2309 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2310 	/* Verify the disable took */
2311 	for (int j = 0; j < 10; j++) {
2312 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2313 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2314 			break;
2315 		i40e_msec_delay(10);
2316 	}
2317 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2318 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2319 		    pf_qidx);
2320 		error = ETIMEDOUT;
2321 	}
2322 
2323 	return (error);
2324 }
2325 
2326 int
2327 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2328 {
2329 	int error = 0;
2330 
2331 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2332 	/* Called function already prints error message */
2333 	if (error)
2334 		return (error);
2335 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2336 	return (error);
2337 }
2338 
2339 int
2340 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2341 {
2342 	int error = 0;
2343 
2344 	for (int i = 0; i < vsi->num_tx_queues; i++)
2345 		error = ixl_disable_tx_ring(pf, qtag, i);
2346 
2347 	for (int i = 0; i < vsi->num_rx_queues; i++)
2348 		error = ixl_disable_rx_ring(pf, qtag, i);
2349 
2350 	return (error);
2351 }
2352 
2353 static void
2354 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2355 {
2356 	struct i40e_hw *hw = &pf->hw;
2357 	device_t dev = pf->dev;
2358 	struct ixl_vf *vf;
2359 	bool mdd_detected = false;
2360 	bool pf_mdd_detected = false;
2361 	bool vf_mdd_detected = false;
2362 	u16 vf_num, queue;
2363 	u8 pf_num, event;
2364 	u8 pf_mdet_num, vp_mdet_num;
2365 	u32 reg;
2366 
2367 	/* find what triggered the MDD event */
2368 	reg = rd32(hw, I40E_GL_MDET_TX);
2369 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2370 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2371 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2372 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2373 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2374 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2375 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2376 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2377 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2378 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2379 		mdd_detected = true;
2380 	}
2381 
2382 	if (!mdd_detected)
2383 		return;
2384 
2385 	reg = rd32(hw, I40E_PF_MDET_TX);
2386 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2387 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2388 		pf_mdet_num = hw->pf_id;
2389 		pf_mdd_detected = true;
2390 	}
2391 
2392 	/* Check if MDD was caused by a VF */
2393 	for (int i = 0; i < pf->num_vfs; i++) {
2394 		vf = &(pf->vfs[i]);
2395 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2396 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2397 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2398 			vp_mdet_num = i;
2399 			vf->num_mdd_events++;
2400 			vf_mdd_detected = true;
2401 		}
2402 	}
2403 
2404 	/* Print out an error message */
2405 	if (vf_mdd_detected && pf_mdd_detected)
2406 		device_printf(dev,
2407 		    "Malicious Driver Detection event %d"
2408 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2409 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2410 	else if (vf_mdd_detected && !pf_mdd_detected)
2411 		device_printf(dev,
2412 		    "Malicious Driver Detection event %d"
2413 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2414 		    event, queue, pf_num, vf_num, vp_mdet_num);
2415 	else if (!vf_mdd_detected && pf_mdd_detected)
2416 		device_printf(dev,
2417 		    "Malicious Driver Detection event %d"
2418 		    " on TX queue %d, pf number %d (PF-%d)\n",
2419 		    event, queue, pf_num, pf_mdet_num);
2420 	/* Theoretically shouldn't happen */
2421 	else
2422 		device_printf(dev,
2423 		    "TX Malicious Driver Detection event (unknown)\n");
2424 }
2425 
2426 static void
2427 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2428 {
2429 	struct i40e_hw *hw = &pf->hw;
2430 	device_t dev = pf->dev;
2431 	struct ixl_vf *vf;
2432 	bool mdd_detected = false;
2433 	bool pf_mdd_detected = false;
2434 	bool vf_mdd_detected = false;
2435 	u16 queue;
2436 	u8 pf_num, event;
2437 	u8 pf_mdet_num, vp_mdet_num;
2438 	u32 reg;
2439 
2440 	/*
2441 	 * GL_MDET_RX doesn't contain VF number information, unlike
2442 	 * GL_MDET_TX.
2443 	 */
2444 	reg = rd32(hw, I40E_GL_MDET_RX);
2445 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2446 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2447 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2448 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2449 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2450 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2451 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2452 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2453 		mdd_detected = true;
2454 	}
2455 
2456 	if (!mdd_detected)
2457 		return;
2458 
2459 	reg = rd32(hw, I40E_PF_MDET_RX);
2460 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2461 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2462 		pf_mdet_num = hw->pf_id;
2463 		pf_mdd_detected = true;
2464 	}
2465 
2466 	/* Check if MDD was caused by a VF */
2467 	for (int i = 0; i < pf->num_vfs; i++) {
2468 		vf = &(pf->vfs[i]);
2469 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2470 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2471 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2472 			vp_mdet_num = i;
2473 			vf->num_mdd_events++;
2474 			vf_mdd_detected = true;
2475 		}
2476 	}
2477 
2478 	/* Print out an error message */
2479 	if (vf_mdd_detected && pf_mdd_detected)
2480 		device_printf(dev,
2481 		    "Malicious Driver Detection event %d"
2482 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2483 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2484 	else if (vf_mdd_detected && !pf_mdd_detected)
2485 		device_printf(dev,
2486 		    "Malicious Driver Detection event %d"
2487 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2488 		    event, queue, pf_num, vp_mdet_num);
2489 	else if (!vf_mdd_detected && pf_mdd_detected)
2490 		device_printf(dev,
2491 		    "Malicious Driver Detection event %d"
2492 		    " on RX queue %d, pf number %d (PF-%d)\n",
2493 		    event, queue, pf_num, pf_mdet_num);
2494 	/* Theoretically shouldn't happen */
2495 	else
2496 		device_printf(dev,
2497 		    "RX Malicious Driver Detection event (unknown)\n");
2498 }
2499 
2500 /**
2501  * ixl_handle_mdd_event
2502  *
2503  * Called from interrupt handler to identify possibly malicious vfs
2504  * (But also detects events from the PF, as well)
2505  **/
2506 void
2507 ixl_handle_mdd_event(struct ixl_pf *pf)
2508 {
2509 	struct i40e_hw *hw = &pf->hw;
2510 	u32 reg;
2511 
2512 	/*
2513 	 * Handle both TX/RX because it's possible they could
2514 	 * both trigger in the same interrupt.
2515 	 */
2516 	ixl_handle_tx_mdd_event(pf);
2517 	ixl_handle_rx_mdd_event(pf);
2518 
2519 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2520 
2521 	/* re-enable mdd interrupt cause */
2522 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2523 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2524 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2525 	ixl_flush(hw);
2526 }
2527 
2528 void
2529 ixl_enable_intr(struct ixl_vsi *vsi)
2530 {
2531 	struct i40e_hw		*hw = vsi->hw;
2532 	struct ixl_rx_queue	*que = vsi->rx_queues;
2533 
2534 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2535 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2536 			ixl_enable_queue(hw, que->rxr.me);
2537 	} else
2538 		ixl_enable_intr0(hw);
2539 }
2540 
2541 void
2542 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2543 {
2544 	struct i40e_hw		*hw = vsi->hw;
2545 	struct ixl_rx_queue	*que = vsi->rx_queues;
2546 
2547 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2548 		ixl_disable_queue(hw, que->rxr.me);
2549 }
2550 
2551 void
2552 ixl_enable_intr0(struct i40e_hw *hw)
2553 {
2554 	u32		reg;
2555 
2556 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2557 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2558 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2559 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2560 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2561 }
2562 
2563 void
2564 ixl_disable_intr0(struct i40e_hw *hw)
2565 {
2566 	u32		reg;
2567 
2568 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2569 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2570 	ixl_flush(hw);
2571 }
2572 
2573 void
2574 ixl_enable_queue(struct i40e_hw *hw, int id)
2575 {
2576 	u32		reg;
2577 
2578 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2579 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2580 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2581 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2582 }
2583 
2584 void
2585 ixl_disable_queue(struct i40e_hw *hw, int id)
2586 {
2587 	u32		reg;
2588 
2589 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2590 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2591 }
2592 
2593 void
2594 ixl_update_stats_counters(struct ixl_pf *pf)
2595 {
2596 	struct i40e_hw	*hw = &pf->hw;
2597 	struct ixl_vsi	*vsi = &pf->vsi;
2598 	struct ixl_vf	*vf;
2599 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2600 
2601 	struct i40e_hw_port_stats *nsd = &pf->stats;
2602 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2603 
2604 	/* Update hw stats */
2605 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2606 			   pf->stat_offsets_loaded,
2607 			   &osd->crc_errors, &nsd->crc_errors);
2608 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2609 			   pf->stat_offsets_loaded,
2610 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2611 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2612 			   I40E_GLPRT_GORCL(hw->port),
2613 			   pf->stat_offsets_loaded,
2614 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2615 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2616 			   I40E_GLPRT_GOTCL(hw->port),
2617 			   pf->stat_offsets_loaded,
2618 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2619 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2620 			   pf->stat_offsets_loaded,
2621 			   &osd->eth.rx_discards,
2622 			   &nsd->eth.rx_discards);
2623 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2624 			   I40E_GLPRT_UPRCL(hw->port),
2625 			   pf->stat_offsets_loaded,
2626 			   &osd->eth.rx_unicast,
2627 			   &nsd->eth.rx_unicast);
2628 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2629 			   I40E_GLPRT_UPTCL(hw->port),
2630 			   pf->stat_offsets_loaded,
2631 			   &osd->eth.tx_unicast,
2632 			   &nsd->eth.tx_unicast);
2633 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2634 			   I40E_GLPRT_MPRCL(hw->port),
2635 			   pf->stat_offsets_loaded,
2636 			   &osd->eth.rx_multicast,
2637 			   &nsd->eth.rx_multicast);
2638 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2639 			   I40E_GLPRT_MPTCL(hw->port),
2640 			   pf->stat_offsets_loaded,
2641 			   &osd->eth.tx_multicast,
2642 			   &nsd->eth.tx_multicast);
2643 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2644 			   I40E_GLPRT_BPRCL(hw->port),
2645 			   pf->stat_offsets_loaded,
2646 			   &osd->eth.rx_broadcast,
2647 			   &nsd->eth.rx_broadcast);
2648 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2649 			   I40E_GLPRT_BPTCL(hw->port),
2650 			   pf->stat_offsets_loaded,
2651 			   &osd->eth.tx_broadcast,
2652 			   &nsd->eth.tx_broadcast);
2653 
2654 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2655 			   pf->stat_offsets_loaded,
2656 			   &osd->tx_dropped_link_down,
2657 			   &nsd->tx_dropped_link_down);
2658 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2659 			   pf->stat_offsets_loaded,
2660 			   &osd->mac_local_faults,
2661 			   &nsd->mac_local_faults);
2662 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2663 			   pf->stat_offsets_loaded,
2664 			   &osd->mac_remote_faults,
2665 			   &nsd->mac_remote_faults);
2666 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2667 			   pf->stat_offsets_loaded,
2668 			   &osd->rx_length_errors,
2669 			   &nsd->rx_length_errors);
2670 
2671 	/* Flow control (LFC) stats */
2672 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2673 			   pf->stat_offsets_loaded,
2674 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2675 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2676 			   pf->stat_offsets_loaded,
2677 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2678 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2679 			   pf->stat_offsets_loaded,
2680 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2681 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2682 			   pf->stat_offsets_loaded,
2683 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2684 
2685 	/*
2686 	 * For watchdog management we need to know if we have been paused
2687 	 * during the last interval, so capture that here.
2688 	 */
2689 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2690 		vsi->shared->isc_pause_frames = 1;
2691 
2692 	/* Packet size stats rx */
2693 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2694 			   I40E_GLPRT_PRC64L(hw->port),
2695 			   pf->stat_offsets_loaded,
2696 			   &osd->rx_size_64, &nsd->rx_size_64);
2697 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2698 			   I40E_GLPRT_PRC127L(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->rx_size_127, &nsd->rx_size_127);
2701 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2702 			   I40E_GLPRT_PRC255L(hw->port),
2703 			   pf->stat_offsets_loaded,
2704 			   &osd->rx_size_255, &nsd->rx_size_255);
2705 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2706 			   I40E_GLPRT_PRC511L(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->rx_size_511, &nsd->rx_size_511);
2709 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2710 			   I40E_GLPRT_PRC1023L(hw->port),
2711 			   pf->stat_offsets_loaded,
2712 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2713 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2714 			   I40E_GLPRT_PRC1522L(hw->port),
2715 			   pf->stat_offsets_loaded,
2716 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2717 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2718 			   I40E_GLPRT_PRC9522L(hw->port),
2719 			   pf->stat_offsets_loaded,
2720 			   &osd->rx_size_big, &nsd->rx_size_big);
2721 
2722 	/* Packet size stats tx */
2723 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2724 			   I40E_GLPRT_PTC64L(hw->port),
2725 			   pf->stat_offsets_loaded,
2726 			   &osd->tx_size_64, &nsd->tx_size_64);
2727 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2728 			   I40E_GLPRT_PTC127L(hw->port),
2729 			   pf->stat_offsets_loaded,
2730 			   &osd->tx_size_127, &nsd->tx_size_127);
2731 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2732 			   I40E_GLPRT_PTC255L(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->tx_size_255, &nsd->tx_size_255);
2735 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2736 			   I40E_GLPRT_PTC511L(hw->port),
2737 			   pf->stat_offsets_loaded,
2738 			   &osd->tx_size_511, &nsd->tx_size_511);
2739 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2740 			   I40E_GLPRT_PTC1023L(hw->port),
2741 			   pf->stat_offsets_loaded,
2742 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2743 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2744 			   I40E_GLPRT_PTC1522L(hw->port),
2745 			   pf->stat_offsets_loaded,
2746 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2747 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2748 			   I40E_GLPRT_PTC9522L(hw->port),
2749 			   pf->stat_offsets_loaded,
2750 			   &osd->tx_size_big, &nsd->tx_size_big);
2751 
2752 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2753 			   pf->stat_offsets_loaded,
2754 			   &osd->rx_undersize, &nsd->rx_undersize);
2755 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2756 			   pf->stat_offsets_loaded,
2757 			   &osd->rx_fragments, &nsd->rx_fragments);
2758 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2759 			   pf->stat_offsets_loaded,
2760 			   &osd->rx_oversize, &nsd->rx_oversize);
2761 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2762 			   pf->stat_offsets_loaded,
2763 			   &osd->rx_jabber, &nsd->rx_jabber);
2764 	pf->stat_offsets_loaded = true;
2765 	/* End hw stats */
2766 
2767 	/* Update vsi stats */
2768 	ixl_update_vsi_stats(vsi);
2769 
2770 	for (int i = 0; i < pf->num_vfs; i++) {
2771 		vf = &pf->vfs[i];
2772 		if (vf->vf_flags & VF_FLAG_ENABLED)
2773 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2774 	}
2775 }
2776 
2777 int
2778 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2779 {
2780 	struct i40e_hw *hw = &pf->hw;
2781 	device_t dev = pf->dev;
2782 	int error = 0;
2783 
2784 	error = i40e_shutdown_lan_hmc(hw);
2785 	if (error)
2786 		device_printf(dev,
2787 		    "Shutdown LAN HMC failed with code %d\n", error);
2788 
2789 	ixl_disable_intr0(hw);
2790 
2791 	error = i40e_shutdown_adminq(hw);
2792 	if (error)
2793 		device_printf(dev,
2794 		    "Shutdown Admin queue failed with code %d\n", error);
2795 
2796 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2797 	return (error);
2798 }
2799 
2800 int
2801 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2802 {
2803 	struct i40e_hw *hw = &pf->hw;
2804 	struct ixl_vsi *vsi = &pf->vsi;
2805 	device_t dev = pf->dev;
2806 	int error = 0;
2807 
2808 	device_printf(dev, "Rebuilding driver state...\n");
2809 
2810 	error = i40e_pf_reset(hw);
2811 	if (error) {
2812 		device_printf(dev, "PF reset failure %s\n",
2813 		    i40e_stat_str(hw, error));
2814 		goto ixl_rebuild_hw_structs_after_reset_err;
2815 	}
2816 
2817 	/* Setup */
2818 	error = i40e_init_adminq(hw);
2819 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2820 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2821 		    error);
2822 		goto ixl_rebuild_hw_structs_after_reset_err;
2823 	}
2824 
2825 	i40e_clear_pxe_mode(hw);
2826 
2827 	error = ixl_get_hw_capabilities(pf);
2828 	if (error) {
2829 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2830 		goto ixl_rebuild_hw_structs_after_reset_err;
2831 	}
2832 
2833 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2834 	    hw->func_caps.num_rx_qp, 0, 0);
2835 	if (error) {
2836 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2837 		goto ixl_rebuild_hw_structs_after_reset_err;
2838 	}
2839 
2840 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2841 	if (error) {
2842 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2843 		goto ixl_rebuild_hw_structs_after_reset_err;
2844 	}
2845 
2846 	/* reserve a contiguous allocation for the PF's VSI */
2847 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2848 	if (error) {
2849 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2850 		    error);
2851 		/* TODO: error handling */
2852 	}
2853 
2854 	error = ixl_switch_config(pf);
2855 	if (error) {
2856 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2857 		     error);
2858 		error = EIO;
2859 		goto ixl_rebuild_hw_structs_after_reset_err;
2860 	}
2861 
2862 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2863 	    NULL);
2864         if (error) {
2865 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2866 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2867 		error = EIO;
2868 		goto ixl_rebuild_hw_structs_after_reset_err;
2869 	}
2870 
2871 	u8 set_fc_err_mask;
2872 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2873 	if (error) {
2874 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2875 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2876 		error = EIO;
2877 		goto ixl_rebuild_hw_structs_after_reset_err;
2878 	}
2879 
2880 	/* Remove default filters reinstalled by FW on reset */
2881 	ixl_del_default_hw_filters(vsi);
2882 
2883 	/* Determine link state */
2884 	if (ixl_attach_get_link_status(pf)) {
2885 		error = EINVAL;
2886 		/* TODO: error handling */
2887 	}
2888 
2889 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2890 	ixl_get_fw_lldp_status(pf);
2891 
2892 	/* Keep admin queue interrupts active while driver is loaded */
2893 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2894  		ixl_configure_intr0_msix(pf);
2895  		ixl_enable_intr0(hw);
2896 	}
2897 
2898 	device_printf(dev, "Rebuilding driver state done.\n");
2899 	return (0);
2900 
2901 ixl_rebuild_hw_structs_after_reset_err:
2902 	device_printf(dev, "Reload the driver to recover\n");
2903 	return (error);
2904 }
2905 
2906 void
2907 ixl_handle_empr_reset(struct ixl_pf *pf)
2908 {
2909 	struct ixl_vsi	*vsi = &pf->vsi;
2910 	struct i40e_hw	*hw = &pf->hw;
2911 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2912 	int count = 0;
2913 	u32 reg;
2914 
2915 	ixl_prepare_for_reset(pf, is_up);
2916 
2917 	/* Typically finishes within 3-4 seconds */
2918 	while (count++ < 100) {
2919 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2920 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2921 		if (reg)
2922 			i40e_msec_delay(100);
2923 		else
2924 			break;
2925 	}
2926 	ixl_dbg(pf, IXL_DBG_INFO,
2927 			"Reset wait count: %d\n", count);
2928 
2929 	ixl_rebuild_hw_structs_after_reset(pf);
2930 
2931 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2932 }
2933 
2934 /**
2935  * Update VSI-specific ethernet statistics counters.
2936  **/
2937 void
2938 ixl_update_eth_stats(struct ixl_vsi *vsi)
2939 {
2940 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2941 	struct i40e_hw *hw = &pf->hw;
2942 	struct i40e_eth_stats *es;
2943 	struct i40e_eth_stats *oes;
2944 	struct i40e_hw_port_stats *nsd;
2945 	u16 stat_idx = vsi->info.stat_counter_idx;
2946 
2947 	es = &vsi->eth_stats;
2948 	oes = &vsi->eth_stats_offsets;
2949 	nsd = &pf->stats;
2950 
2951 	/* Gather up the stats that the hw collects */
2952 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2953 			   vsi->stat_offsets_loaded,
2954 			   &oes->tx_errors, &es->tx_errors);
2955 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2956 			   vsi->stat_offsets_loaded,
2957 			   &oes->rx_discards, &es->rx_discards);
2958 
2959 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2960 			   I40E_GLV_GORCL(stat_idx),
2961 			   vsi->stat_offsets_loaded,
2962 			   &oes->rx_bytes, &es->rx_bytes);
2963 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2964 			   I40E_GLV_UPRCL(stat_idx),
2965 			   vsi->stat_offsets_loaded,
2966 			   &oes->rx_unicast, &es->rx_unicast);
2967 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2968 			   I40E_GLV_MPRCL(stat_idx),
2969 			   vsi->stat_offsets_loaded,
2970 			   &oes->rx_multicast, &es->rx_multicast);
2971 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2972 			   I40E_GLV_BPRCL(stat_idx),
2973 			   vsi->stat_offsets_loaded,
2974 			   &oes->rx_broadcast, &es->rx_broadcast);
2975 
2976 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2977 			   I40E_GLV_GOTCL(stat_idx),
2978 			   vsi->stat_offsets_loaded,
2979 			   &oes->tx_bytes, &es->tx_bytes);
2980 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2981 			   I40E_GLV_UPTCL(stat_idx),
2982 			   vsi->stat_offsets_loaded,
2983 			   &oes->tx_unicast, &es->tx_unicast);
2984 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2985 			   I40E_GLV_MPTCL(stat_idx),
2986 			   vsi->stat_offsets_loaded,
2987 			   &oes->tx_multicast, &es->tx_multicast);
2988 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2989 			   I40E_GLV_BPTCL(stat_idx),
2990 			   vsi->stat_offsets_loaded,
2991 			   &oes->tx_broadcast, &es->tx_broadcast);
2992 	vsi->stat_offsets_loaded = true;
2993 }
2994 
2995 void
2996 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2997 {
2998 	struct ixl_pf		*pf;
2999 	struct ifnet		*ifp;
3000 	struct i40e_eth_stats	*es;
3001 	u64			tx_discards;
3002 
3003 	struct i40e_hw_port_stats *nsd;
3004 
3005 	pf = vsi->back;
3006 	ifp = vsi->ifp;
3007 	es = &vsi->eth_stats;
3008 	nsd = &pf->stats;
3009 
3010 	ixl_update_eth_stats(vsi);
3011 
3012 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3013 
3014 	/* Update ifnet stats */
3015 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3016 	                   es->rx_multicast +
3017 			   es->rx_broadcast);
3018 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3019 	                   es->tx_multicast +
3020 			   es->tx_broadcast);
3021 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3022 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3023 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3024 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3025 
3026 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3027 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3028 	    nsd->rx_jabber);
3029 	IXL_SET_OERRORS(vsi, es->tx_errors);
3030 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3031 	IXL_SET_OQDROPS(vsi, tx_discards);
3032 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3033 	IXL_SET_COLLISIONS(vsi, 0);
3034 }
3035 
3036 /**
3037  * Reset all of the stats for the given pf
3038  **/
3039 void
3040 ixl_pf_reset_stats(struct ixl_pf *pf)
3041 {
3042 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3043 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3044 	pf->stat_offsets_loaded = false;
3045 }
3046 
3047 /**
3048  * Resets all stats of the given vsi
3049  **/
3050 void
3051 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3052 {
3053 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3054 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3055 	vsi->stat_offsets_loaded = false;
3056 }
3057 
3058 /**
3059  * Read and update a 48 bit stat from the hw
3060  *
3061  * Since the device stats are not reset at PFReset, they likely will not
3062  * be zeroed when the driver starts.  We'll save the first values read
3063  * and use them as offsets to be subtracted from the raw values in order
3064  * to report stats that count from zero.
3065  **/
3066 void
3067 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3068 	bool offset_loaded, u64 *offset, u64 *stat)
3069 {
3070 	u64 new_data;
3071 
3072 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3073 	new_data = rd64(hw, loreg);
3074 #else
3075 	/*
3076 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3077 	 * 10 don't support 64-bit bus reads/writes.
3078 	 */
3079 	new_data = rd32(hw, loreg);
3080 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3081 #endif
3082 
3083 	if (!offset_loaded)
3084 		*offset = new_data;
3085 	if (new_data >= *offset)
3086 		*stat = new_data - *offset;
3087 	else
3088 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3089 	*stat &= 0xFFFFFFFFFFFFULL;
3090 }
3091 
3092 /**
3093  * Read and update a 32 bit stat from the hw
3094  **/
3095 void
3096 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3097 	bool offset_loaded, u64 *offset, u64 *stat)
3098 {
3099 	u32 new_data;
3100 
3101 	new_data = rd32(hw, reg);
3102 	if (!offset_loaded)
3103 		*offset = new_data;
3104 	if (new_data >= *offset)
3105 		*stat = (u32)(new_data - *offset);
3106 	else
3107 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3108 }
3109 
3110 void
3111 ixl_add_device_sysctls(struct ixl_pf *pf)
3112 {
3113 	device_t dev = pf->dev;
3114 	struct i40e_hw *hw = &pf->hw;
3115 
3116 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3117 	struct sysctl_oid_list *ctx_list =
3118 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3119 
3120 	struct sysctl_oid *debug_node;
3121 	struct sysctl_oid_list *debug_list;
3122 
3123 	struct sysctl_oid *fec_node;
3124 	struct sysctl_oid_list *fec_list;
3125 
3126 	/* Set up sysctls */
3127 	SYSCTL_ADD_PROC(ctx, ctx_list,
3128 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3129 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3130 
3131 	SYSCTL_ADD_PROC(ctx, ctx_list,
3132 	    OID_AUTO, "advertise_speed",
3133 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3134 	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3135 
3136 	SYSCTL_ADD_PROC(ctx, ctx_list,
3137 	    OID_AUTO, "supported_speeds",
3138 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3139 	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3140 
3141 	SYSCTL_ADD_PROC(ctx, ctx_list,
3142 	    OID_AUTO, "current_speed",
3143 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3144 	    ixl_sysctl_current_speed, "A", "Current Port Speed");
3145 
3146 	SYSCTL_ADD_PROC(ctx, ctx_list,
3147 	    OID_AUTO, "fw_version",
3148 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3149 	    ixl_sysctl_show_fw, "A", "Firmware version");
3150 
3151 	SYSCTL_ADD_PROC(ctx, ctx_list,
3152 	    OID_AUTO, "unallocated_queues",
3153 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3154 	    ixl_sysctl_unallocated_queues, "I",
3155 	    "Queues not allocated to a PF or VF");
3156 
3157 	SYSCTL_ADD_PROC(ctx, ctx_list,
3158 	    OID_AUTO, "tx_itr",
3159 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3160 	    ixl_sysctl_pf_tx_itr, "I",
3161 	    "Immediately set TX ITR value for all queues");
3162 
3163 	SYSCTL_ADD_PROC(ctx, ctx_list,
3164 	    OID_AUTO, "rx_itr",
3165 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3166 	    ixl_sysctl_pf_rx_itr, "I",
3167 	    "Immediately set RX ITR value for all queues");
3168 
3169 	SYSCTL_ADD_INT(ctx, ctx_list,
3170 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3171 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3172 
3173 	SYSCTL_ADD_INT(ctx, ctx_list,
3174 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3175 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3176 
3177 	/* Add FEC sysctls for 25G adapters */
3178 	if (i40e_is_25G_device(hw->device_id)) {
3179 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3180 		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3181 		    "FEC Sysctls");
3182 		fec_list = SYSCTL_CHILDREN(fec_node);
3183 
3184 		SYSCTL_ADD_PROC(ctx, fec_list,
3185 		    OID_AUTO, "fc_ability",
3186 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3187 		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3188 
3189 		SYSCTL_ADD_PROC(ctx, fec_list,
3190 		    OID_AUTO, "rs_ability",
3191 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3192 		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3193 
3194 		SYSCTL_ADD_PROC(ctx, fec_list,
3195 		    OID_AUTO, "fc_requested",
3196 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3197 		    ixl_sysctl_fec_fc_request, "I",
3198 		    "FC FEC mode requested on link");
3199 
3200 		SYSCTL_ADD_PROC(ctx, fec_list,
3201 		    OID_AUTO, "rs_requested",
3202 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3203 		    ixl_sysctl_fec_rs_request, "I",
3204 		    "RS FEC mode requested on link");
3205 
3206 		SYSCTL_ADD_PROC(ctx, fec_list,
3207 		    OID_AUTO, "auto_fec_enabled",
3208 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3209 		    ixl_sysctl_fec_auto_enable, "I",
3210 		    "Let FW decide FEC ability/request modes");
3211 	}
3212 
3213 	SYSCTL_ADD_PROC(ctx, ctx_list,
3214 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3215 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3216 
3217 	/* Add sysctls meant to print debug information, but don't list them
3218 	 * in "sysctl -a" output. */
3219 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3220 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
3221 	    "Debug Sysctls");
3222 	debug_list = SYSCTL_CHILDREN(debug_node);
3223 
3224 	SYSCTL_ADD_UINT(ctx, debug_list,
3225 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3226 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3227 
3228 	SYSCTL_ADD_UINT(ctx, debug_list,
3229 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3230 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3231 
3232 	SYSCTL_ADD_PROC(ctx, debug_list,
3233 	    OID_AUTO, "link_status",
3234 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3235 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3236 
3237 	SYSCTL_ADD_PROC(ctx, debug_list,
3238 	    OID_AUTO, "phy_abilities",
3239 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3240 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3241 
3242 	SYSCTL_ADD_PROC(ctx, debug_list,
3243 	    OID_AUTO, "filter_list",
3244 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3245 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3246 
3247 	SYSCTL_ADD_PROC(ctx, debug_list,
3248 	    OID_AUTO, "hw_res_alloc",
3249 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3250 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3251 
3252 	SYSCTL_ADD_PROC(ctx, debug_list,
3253 	    OID_AUTO, "switch_config",
3254 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3255 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3256 
3257 	SYSCTL_ADD_PROC(ctx, debug_list,
3258 	    OID_AUTO, "rss_key",
3259 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3260 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3261 
3262 	SYSCTL_ADD_PROC(ctx, debug_list,
3263 	    OID_AUTO, "rss_lut",
3264 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3265 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3266 
3267 	SYSCTL_ADD_PROC(ctx, debug_list,
3268 	    OID_AUTO, "rss_hena",
3269 	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3270 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3271 
3272 	SYSCTL_ADD_PROC(ctx, debug_list,
3273 	    OID_AUTO, "disable_fw_link_management",
3274 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3275 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3276 
3277 	SYSCTL_ADD_PROC(ctx, debug_list,
3278 	    OID_AUTO, "dump_debug_data",
3279 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3280 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3281 
3282 	SYSCTL_ADD_PROC(ctx, debug_list,
3283 	    OID_AUTO, "do_pf_reset",
3284 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3285 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3286 
3287 	SYSCTL_ADD_PROC(ctx, debug_list,
3288 	    OID_AUTO, "do_core_reset",
3289 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3290 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3291 
3292 	SYSCTL_ADD_PROC(ctx, debug_list,
3293 	    OID_AUTO, "do_global_reset",
3294 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3295 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3296 
3297 	SYSCTL_ADD_PROC(ctx, debug_list,
3298 	    OID_AUTO, "do_emp_reset",
3299 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3300 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3301 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3302 
3303 	SYSCTL_ADD_PROC(ctx, debug_list,
3304 	    OID_AUTO, "queue_interrupt_table",
3305 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3306 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3307 
3308 	if (pf->has_i2c) {
3309 		SYSCTL_ADD_PROC(ctx, debug_list,
3310 		    OID_AUTO, "read_i2c_byte",
3311 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3312 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3313 
3314 		SYSCTL_ADD_PROC(ctx, debug_list,
3315 		    OID_AUTO, "write_i2c_byte",
3316 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3317 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3318 
3319 		SYSCTL_ADD_PROC(ctx, debug_list,
3320 		    OID_AUTO, "read_i2c_diag_data",
3321 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3322 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3323 	}
3324 }
3325 
3326 /*
3327  * Primarily for finding out how many queues can be assigned to VFs,
3328  * at runtime.
3329  */
3330 static int
3331 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3332 {
3333 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3334 	int queues;
3335 
3336 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3337 
3338 	return sysctl_handle_int(oidp, NULL, queues, req);
3339 }
3340 
3341 /*
3342 ** Set flow control using sysctl:
3343 ** 	0 - off
3344 **	1 - rx pause
3345 **	2 - tx pause
3346 **	3 - full
3347 */
3348 int
3349 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3350 {
3351 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3352 	struct i40e_hw *hw = &pf->hw;
3353 	device_t dev = pf->dev;
3354 	int requested_fc, error = 0;
3355 	enum i40e_status_code aq_error = 0;
3356 	u8 fc_aq_err = 0;
3357 
3358 	/* Get request */
3359 	requested_fc = pf->fc;
3360 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3361 	if ((error) || (req->newptr == NULL))
3362 		return (error);
3363 	if (requested_fc < 0 || requested_fc > 3) {
3364 		device_printf(dev,
3365 		    "Invalid fc mode; valid modes are 0 through 3\n");
3366 		return (EINVAL);
3367 	}
3368 
3369 	/* Set fc ability for port */
3370 	hw->fc.requested_mode = requested_fc;
3371 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3372 	if (aq_error) {
3373 		device_printf(dev,
3374 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3375 		    __func__, aq_error, fc_aq_err);
3376 		return (EIO);
3377 	}
3378 	pf->fc = requested_fc;
3379 
3380 	return (0);
3381 }
3382 
3383 char *
3384 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3385 {
3386 	int index;
3387 
3388 	char *speeds[] = {
3389 		"Unknown",
3390 		"100 Mbps",
3391 		"1 Gbps",
3392 		"10 Gbps",
3393 		"40 Gbps",
3394 		"20 Gbps",
3395 		"25 Gbps",
3396 	};
3397 
3398 	switch (link_speed) {
3399 	case I40E_LINK_SPEED_100MB:
3400 		index = 1;
3401 		break;
3402 	case I40E_LINK_SPEED_1GB:
3403 		index = 2;
3404 		break;
3405 	case I40E_LINK_SPEED_10GB:
3406 		index = 3;
3407 		break;
3408 	case I40E_LINK_SPEED_40GB:
3409 		index = 4;
3410 		break;
3411 	case I40E_LINK_SPEED_20GB:
3412 		index = 5;
3413 		break;
3414 	case I40E_LINK_SPEED_25GB:
3415 		index = 6;
3416 		break;
3417 	case I40E_LINK_SPEED_UNKNOWN:
3418 	default:
3419 		index = 0;
3420 		break;
3421 	}
3422 
3423 	return speeds[index];
3424 }
3425 
3426 int
3427 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3428 {
3429 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3430 	struct i40e_hw *hw = &pf->hw;
3431 	int error = 0;
3432 
3433 	ixl_update_link_status(pf);
3434 
3435 	error = sysctl_handle_string(oidp,
3436 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3437 	    8, req);
3438 	return (error);
3439 }
3440 
3441 /*
3442  * Converts 8-bit speeds value to and from sysctl flags and
3443  * Admin Queue flags.
3444  */
3445 static u8
3446 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3447 {
3448 	static u16 speedmap[6] = {
3449 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3450 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3451 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3452 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3453 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3454 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3455 	};
3456 	u8 retval = 0;
3457 
3458 	for (int i = 0; i < 6; i++) {
3459 		if (to_aq)
3460 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3461 		else
3462 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3463 	}
3464 
3465 	return (retval);
3466 }
3467 
3468 int
3469 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3470 {
3471 	struct i40e_hw *hw = &pf->hw;
3472 	device_t dev = pf->dev;
3473 	struct i40e_aq_get_phy_abilities_resp abilities;
3474 	struct i40e_aq_set_phy_config config;
3475 	enum i40e_status_code aq_error = 0;
3476 
3477 	/* Get current capability information */
3478 	aq_error = i40e_aq_get_phy_capabilities(hw,
3479 	    FALSE, FALSE, &abilities, NULL);
3480 	if (aq_error) {
3481 		device_printf(dev,
3482 		    "%s: Error getting phy capabilities %d,"
3483 		    " aq error: %d\n", __func__, aq_error,
3484 		    hw->aq.asq_last_status);
3485 		return (EIO);
3486 	}
3487 
3488 	/* Prepare new config */
3489 	bzero(&config, sizeof(config));
3490 	if (from_aq)
3491 		config.link_speed = speeds;
3492 	else
3493 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3494 	config.phy_type = abilities.phy_type;
3495 	config.phy_type_ext = abilities.phy_type_ext;
3496 	config.abilities = abilities.abilities
3497 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3498 	config.eee_capability = abilities.eee_capability;
3499 	config.eeer = abilities.eeer_val;
3500 	config.low_power_ctrl = abilities.d3_lpan;
3501 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3502 
3503 	/* Do aq command & restart link */
3504 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3505 	if (aq_error) {
3506 		device_printf(dev,
3507 		    "%s: Error setting new phy config %d,"
3508 		    " aq error: %d\n", __func__, aq_error,
3509 		    hw->aq.asq_last_status);
3510 		return (EIO);
3511 	}
3512 
3513 	return (0);
3514 }
3515 
3516 /*
3517 ** Supported link speedsL
3518 **	Flags:
3519 **	 0x1 - 100 Mb
3520 **	 0x2 - 1G
3521 **	 0x4 - 10G
3522 **	 0x8 - 20G
3523 **	0x10 - 25G
3524 **	0x20 - 40G
3525 */
3526 static int
3527 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3528 {
3529 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3530 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3531 
3532 	return sysctl_handle_int(oidp, NULL, supported, req);
3533 }
3534 
3535 /*
3536 ** Control link advertise speed:
3537 **	Flags:
3538 **	 0x1 - advertise 100 Mb
3539 **	 0x2 - advertise 1G
3540 **	 0x4 - advertise 10G
3541 **	 0x8 - advertise 20G
3542 **	0x10 - advertise 25G
3543 **	0x20 - advertise 40G
3544 **
3545 **	Set to 0 to disable link
3546 */
3547 int
3548 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3549 {
3550 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3551 	device_t dev = pf->dev;
3552 	u8 converted_speeds;
3553 	int requested_ls = 0;
3554 	int error = 0;
3555 
3556 	/* Read in new mode */
3557 	requested_ls = pf->advertised_speed;
3558 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3559 	if ((error) || (req->newptr == NULL))
3560 		return (error);
3561 
3562 	/* Error out if bits outside of possible flag range are set */
3563 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3564 		device_printf(dev, "Input advertised speed out of range; "
3565 		    "valid flags are: 0x%02x\n",
3566 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3567 		return (EINVAL);
3568 	}
3569 
3570 	/* Check if adapter supports input value */
3571 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3572 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3573 		device_printf(dev, "Invalid advertised speed; "
3574 		    "valid flags are: 0x%02x\n",
3575 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3576 		return (EINVAL);
3577 	}
3578 
3579 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3580 	if (error)
3581 		return (error);
3582 
3583 	pf->advertised_speed = requested_ls;
3584 	ixl_update_link_status(pf);
3585 	return (0);
3586 }
3587 
3588 /*
3589 ** Get the width and transaction speed of
3590 ** the bus this adapter is plugged into.
3591 */
3592 void
3593 ixl_get_bus_info(struct ixl_pf *pf)
3594 {
3595 	struct i40e_hw *hw = &pf->hw;
3596 	device_t dev = pf->dev;
3597         u16 link;
3598         u32 offset, num_ports;
3599 	u64 max_speed;
3600 
3601 	/* Some devices don't use PCIE */
3602 	if (hw->mac.type == I40E_MAC_X722)
3603 		return;
3604 
3605         /* Read PCI Express Capabilities Link Status Register */
3606         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3607         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3608 
3609 	/* Fill out hw struct with PCIE info */
3610 	i40e_set_pci_config_data(hw, link);
3611 
3612 	/* Use info to print out bandwidth messages */
3613         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3614             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3615             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3616             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3617             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3618             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3619             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3620             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3621             ("Unknown"));
3622 
3623 	/*
3624 	 * If adapter is in slot with maximum supported speed,
3625 	 * no warning message needs to be printed out.
3626 	 */
3627 	if (hw->bus.speed >= i40e_bus_speed_8000
3628 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3629 		return;
3630 
3631 	num_ports = bitcount32(hw->func_caps.valid_functions);
3632 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3633 
3634 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3635                 device_printf(dev, "PCI-Express bandwidth available"
3636                     " for this device may be insufficient for"
3637                     " optimal performance.\n");
3638                 device_printf(dev, "Please move the device to a different"
3639 		    " PCI-e link with more lanes and/or higher"
3640 		    " transfer rate.\n");
3641         }
3642 }
3643 
3644 static int
3645 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3646 {
3647 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3648 	struct i40e_hw	*hw = &pf->hw;
3649 	struct sbuf	*sbuf;
3650 
3651 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3652 	ixl_nvm_version_str(hw, sbuf);
3653 	sbuf_finish(sbuf);
3654 	sbuf_delete(sbuf);
3655 
3656 	return (0);
3657 }
3658 
3659 void
3660 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3661 {
3662 	if ((nvma->command == I40E_NVM_READ) &&
3663 	    ((nvma->config & 0xFF) == 0xF) &&
3664 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3665 	    (nvma->offset == 0) &&
3666 	    (nvma->data_size == 1)) {
3667 		// device_printf(dev, "- Get Driver Status Command\n");
3668 	}
3669 	else if (nvma->command == I40E_NVM_READ) {
3670 
3671 	}
3672 	else {
3673 		switch (nvma->command) {
3674 		case 0xB:
3675 			device_printf(dev, "- command: I40E_NVM_READ\n");
3676 			break;
3677 		case 0xC:
3678 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3679 			break;
3680 		default:
3681 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3682 			break;
3683 		}
3684 
3685 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3686 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3687 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3688 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3689 	}
3690 }
3691 
3692 int
3693 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3694 {
3695 	struct i40e_hw *hw = &pf->hw;
3696 	struct i40e_nvm_access *nvma;
3697 	device_t dev = pf->dev;
3698 	enum i40e_status_code status = 0;
3699 	size_t nvma_size, ifd_len, exp_len;
3700 	int err, perrno;
3701 
3702 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3703 
3704 	/* Sanity checks */
3705 	nvma_size = sizeof(struct i40e_nvm_access);
3706 	ifd_len = ifd->ifd_len;
3707 
3708 	if (ifd_len < nvma_size ||
3709 	    ifd->ifd_data == NULL) {
3710 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3711 		    __func__);
3712 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3713 		    __func__, ifd_len, nvma_size);
3714 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3715 		    ifd->ifd_data);
3716 		return (EINVAL);
3717 	}
3718 
3719 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3720 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3721 	if (err) {
3722 		device_printf(dev, "%s: Cannot get request from user space\n",
3723 		    __func__);
3724 		free(nvma, M_DEVBUF);
3725 		return (err);
3726 	}
3727 
3728 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3729 		ixl_print_nvm_cmd(dev, nvma);
3730 
3731 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3732 		int count = 0;
3733 		while (count++ < 100) {
3734 			i40e_msec_delay(100);
3735 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3736 				break;
3737 		}
3738 	}
3739 
3740 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3741 		free(nvma, M_DEVBUF);
3742 		return (-EBUSY);
3743 	}
3744 
3745 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3746 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3747 		    __func__);
3748 		free(nvma, M_DEVBUF);
3749 		return (EINVAL);
3750 	}
3751 
3752 	/*
3753 	 * Older versions of the NVM update tool don't set ifd_len to the size
3754 	 * of the entire buffer passed to the ioctl. Check the data_size field
3755 	 * in the contained i40e_nvm_access struct and ensure everything is
3756 	 * copied in from userspace.
3757 	 */
3758 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3759 
3760 	if (ifd_len < exp_len) {
3761 		ifd_len = exp_len;
3762 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3763 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3764 		if (err) {
3765 			device_printf(dev, "%s: Cannot get request from user space\n",
3766 					__func__);
3767 			free(nvma, M_DEVBUF);
3768 			return (err);
3769 		}
3770 	}
3771 
3772 	// TODO: Might need a different lock here
3773 	// IXL_PF_LOCK(pf);
3774 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3775 	// IXL_PF_UNLOCK(pf);
3776 
3777 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3778 	free(nvma, M_DEVBUF);
3779 	if (err) {
3780 		device_printf(dev, "%s: Cannot return data to user space\n",
3781 				__func__);
3782 		return (err);
3783 	}
3784 
3785 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3786 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3787 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3788 		    i40e_stat_str(hw, status), perrno);
3789 
3790 	/*
3791 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3792 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3793 	 */
3794 	if (perrno == -EPERM)
3795 		return (-EACCES);
3796 	else
3797 		return (perrno);
3798 }
3799 
3800 int
3801 ixl_find_i2c_interface(struct ixl_pf *pf)
3802 {
3803 	struct i40e_hw *hw = &pf->hw;
3804 	bool i2c_en, port_matched;
3805 	u32 reg;
3806 
3807 	for (int i = 0; i < 4; i++) {
3808 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3809 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3810 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3811 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3812 		    & BIT(hw->port);
3813 		if (i2c_en && port_matched)
3814 			return (i);
3815 	}
3816 
3817 	return (-1);
3818 }
3819 
3820 static char *
3821 ixl_phy_type_string(u32 bit_pos, bool ext)
3822 {
3823 	static char * phy_types_str[32] = {
3824 		"SGMII",
3825 		"1000BASE-KX",
3826 		"10GBASE-KX4",
3827 		"10GBASE-KR",
3828 		"40GBASE-KR4",
3829 		"XAUI",
3830 		"XFI",
3831 		"SFI",
3832 		"XLAUI",
3833 		"XLPPI",
3834 		"40GBASE-CR4",
3835 		"10GBASE-CR1",
3836 		"SFP+ Active DA",
3837 		"QSFP+ Active DA",
3838 		"Reserved (14)",
3839 		"Reserved (15)",
3840 		"Reserved (16)",
3841 		"100BASE-TX",
3842 		"1000BASE-T",
3843 		"10GBASE-T",
3844 		"10GBASE-SR",
3845 		"10GBASE-LR",
3846 		"10GBASE-SFP+Cu",
3847 		"10GBASE-CR1",
3848 		"40GBASE-CR4",
3849 		"40GBASE-SR4",
3850 		"40GBASE-LR4",
3851 		"1000BASE-SX",
3852 		"1000BASE-LX",
3853 		"1000BASE-T Optical",
3854 		"20GBASE-KR2",
3855 		"Reserved (31)"
3856 	};
3857 	static char * ext_phy_types_str[8] = {
3858 		"25GBASE-KR",
3859 		"25GBASE-CR",
3860 		"25GBASE-SR",
3861 		"25GBASE-LR",
3862 		"25GBASE-AOC",
3863 		"25GBASE-ACC",
3864 		"Reserved (6)",
3865 		"Reserved (7)"
3866 	};
3867 
3868 	if (ext && bit_pos > 7) return "Invalid_Ext";
3869 	if (bit_pos > 31) return "Invalid";
3870 
3871 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3872 }
3873 
3874 /* TODO: ERJ: I don't this is necessary anymore. */
3875 int
3876 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3877 {
3878 	device_t dev = pf->dev;
3879 	struct i40e_hw *hw = &pf->hw;
3880 	struct i40e_aq_desc desc;
3881 	enum i40e_status_code status;
3882 
3883 	struct i40e_aqc_get_link_status *aq_link_status =
3884 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3885 
3886 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3887 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3888 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3889 	if (status) {
3890 		device_printf(dev,
3891 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3892 		    __func__, i40e_stat_str(hw, status),
3893 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3894 		return (EIO);
3895 	}
3896 
3897 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3898 	return (0);
3899 }
3900 
3901 static char *
3902 ixl_phy_type_string_ls(u8 val)
3903 {
3904 	if (val >= 0x1F)
3905 		return ixl_phy_type_string(val - 0x1F, true);
3906 	else
3907 		return ixl_phy_type_string(val, false);
3908 }
3909 
3910 static int
3911 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3912 {
3913 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3914 	device_t dev = pf->dev;
3915 	struct sbuf *buf;
3916 	int error = 0;
3917 
3918 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3919 	if (!buf) {
3920 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3921 		return (ENOMEM);
3922 	}
3923 
3924 	struct i40e_aqc_get_link_status link_status;
3925 	error = ixl_aq_get_link_status(pf, &link_status);
3926 	if (error) {
3927 		sbuf_delete(buf);
3928 		return (error);
3929 	}
3930 
3931 	sbuf_printf(buf, "\n"
3932 	    "PHY Type : 0x%02x<%s>\n"
3933 	    "Speed    : 0x%02x\n"
3934 	    "Link info: 0x%02x\n"
3935 	    "AN info  : 0x%02x\n"
3936 	    "Ext info : 0x%02x\n"
3937 	    "Loopback : 0x%02x\n"
3938 	    "Max Frame: %d\n"
3939 	    "Config   : 0x%02x\n"
3940 	    "Power    : 0x%02x",
3941 	    link_status.phy_type,
3942 	    ixl_phy_type_string_ls(link_status.phy_type),
3943 	    link_status.link_speed,
3944 	    link_status.link_info,
3945 	    link_status.an_info,
3946 	    link_status.ext_info,
3947 	    link_status.loopback,
3948 	    link_status.max_frame_size,
3949 	    link_status.config,
3950 	    link_status.power_desc);
3951 
3952 	error = sbuf_finish(buf);
3953 	if (error)
3954 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3955 
3956 	sbuf_delete(buf);
3957 	return (error);
3958 }
3959 
3960 static int
3961 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3962 {
3963 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3964 	struct i40e_hw *hw = &pf->hw;
3965 	device_t dev = pf->dev;
3966 	enum i40e_status_code status;
3967 	struct i40e_aq_get_phy_abilities_resp abilities;
3968 	struct sbuf *buf;
3969 	int error = 0;
3970 
3971 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3972 	if (!buf) {
3973 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3974 		return (ENOMEM);
3975 	}
3976 
3977 	status = i40e_aq_get_phy_capabilities(hw,
3978 	    FALSE, FALSE, &abilities, NULL);
3979 	if (status) {
3980 		device_printf(dev,
3981 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3982 		    __func__, i40e_stat_str(hw, status),
3983 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3984 		sbuf_delete(buf);
3985 		return (EIO);
3986 	}
3987 
3988 	sbuf_printf(buf, "\n"
3989 	    "PHY Type : %08x",
3990 	    abilities.phy_type);
3991 
3992 	if (abilities.phy_type != 0) {
3993 		sbuf_printf(buf, "<");
3994 		for (int i = 0; i < 32; i++)
3995 			if ((1 << i) & abilities.phy_type)
3996 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3997 		sbuf_printf(buf, ">\n");
3998 	}
3999 
4000 	sbuf_printf(buf, "PHY Ext  : %02x",
4001 	    abilities.phy_type_ext);
4002 
4003 	if (abilities.phy_type_ext != 0) {
4004 		sbuf_printf(buf, "<");
4005 		for (int i = 0; i < 4; i++)
4006 			if ((1 << i) & abilities.phy_type_ext)
4007 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
4008 		sbuf_printf(buf, ">");
4009 	}
4010 	sbuf_printf(buf, "\n");
4011 
4012 	sbuf_printf(buf,
4013 	    "Speed    : %02x\n"
4014 	    "Abilities: %02x\n"
4015 	    "EEE cap  : %04x\n"
4016 	    "EEER reg : %08x\n"
4017 	    "D3 Lpan  : %02x\n"
4018 	    "ID       : %02x %02x %02x %02x\n"
4019 	    "ModType  : %02x %02x %02x\n"
4020 	    "ModType E: %01x\n"
4021 	    "FEC Cfg  : %02x\n"
4022 	    "Ext CC   : %02x",
4023 	    abilities.link_speed,
4024 	    abilities.abilities, abilities.eee_capability,
4025 	    abilities.eeer_val, abilities.d3_lpan,
4026 	    abilities.phy_id[0], abilities.phy_id[1],
4027 	    abilities.phy_id[2], abilities.phy_id[3],
4028 	    abilities.module_type[0], abilities.module_type[1],
4029 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
4030 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
4031 	    abilities.ext_comp_code);
4032 
4033 	error = sbuf_finish(buf);
4034 	if (error)
4035 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4036 
4037 	sbuf_delete(buf);
4038 	return (error);
4039 }
4040 
4041 static int
4042 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4043 {
4044 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4045 	struct ixl_vsi *vsi = &pf->vsi;
4046 	struct ixl_mac_filter *f;
4047 	device_t dev = pf->dev;
4048 	int error = 0, ftl_len = 0, ftl_counter = 0;
4049 
4050 	struct sbuf *buf;
4051 
4052 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4053 	if (!buf) {
4054 		device_printf(dev, "Could not allocate sbuf for output.\n");
4055 		return (ENOMEM);
4056 	}
4057 
4058 	sbuf_printf(buf, "\n");
4059 
4060 	/* Print MAC filters */
4061 	sbuf_printf(buf, "PF Filters:\n");
4062 	SLIST_FOREACH(f, &vsi->ftl, next)
4063 		ftl_len++;
4064 
4065 	if (ftl_len < 1)
4066 		sbuf_printf(buf, "(none)\n");
4067 	else {
4068 		SLIST_FOREACH(f, &vsi->ftl, next) {
4069 			sbuf_printf(buf,
4070 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4071 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4072 			/* don't print '\n' for last entry */
4073 			if (++ftl_counter != ftl_len)
4074 				sbuf_printf(buf, "\n");
4075 		}
4076 	}
4077 
4078 #ifdef PCI_IOV
4079 	/* TODO: Give each VF its own filter list sysctl */
4080 	struct ixl_vf *vf;
4081 	if (pf->num_vfs > 0) {
4082 		sbuf_printf(buf, "\n\n");
4083 		for (int i = 0; i < pf->num_vfs; i++) {
4084 			vf = &pf->vfs[i];
4085 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4086 				continue;
4087 
4088 			vsi = &vf->vsi;
4089 			ftl_len = 0, ftl_counter = 0;
4090 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4091 			SLIST_FOREACH(f, &vsi->ftl, next)
4092 				ftl_len++;
4093 
4094 			if (ftl_len < 1)
4095 				sbuf_printf(buf, "(none)\n");
4096 			else {
4097 				SLIST_FOREACH(f, &vsi->ftl, next) {
4098 					sbuf_printf(buf,
4099 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4100 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4101 				}
4102 			}
4103 		}
4104 	}
4105 #endif
4106 
4107 	error = sbuf_finish(buf);
4108 	if (error)
4109 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4110 	sbuf_delete(buf);
4111 
4112 	return (error);
4113 }
4114 
4115 #define IXL_SW_RES_SIZE 0x14
4116 int
4117 ixl_res_alloc_cmp(const void *a, const void *b)
4118 {
4119 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4120 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4121 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4122 
4123 	return ((int)one->resource_type - (int)two->resource_type);
4124 }
4125 
4126 /*
4127  * Longest string length: 25
4128  */
4129 char *
4130 ixl_switch_res_type_string(u8 type)
4131 {
4132 	// TODO: This should be changed to static const
4133 	char * ixl_switch_res_type_strings[0x14] = {
4134 		"VEB",
4135 		"VSI",
4136 		"Perfect Match MAC address",
4137 		"S-tag",
4138 		"(Reserved)",
4139 		"Multicast hash entry",
4140 		"Unicast hash entry",
4141 		"VLAN",
4142 		"VSI List entry",
4143 		"(Reserved)",
4144 		"VLAN Statistic Pool",
4145 		"Mirror Rule",
4146 		"Queue Set",
4147 		"Inner VLAN Forward filter",
4148 		"(Reserved)",
4149 		"Inner MAC",
4150 		"IP",
4151 		"GRE/VN1 Key",
4152 		"VN2 Key",
4153 		"Tunneling Port"
4154 	};
4155 
4156 	if (type < 0x14)
4157 		return ixl_switch_res_type_strings[type];
4158 	else
4159 		return "(Reserved)";
4160 }
4161 
4162 static int
4163 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4164 {
4165 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4166 	struct i40e_hw *hw = &pf->hw;
4167 	device_t dev = pf->dev;
4168 	struct sbuf *buf;
4169 	enum i40e_status_code status;
4170 	int error = 0;
4171 
4172 	u8 num_entries;
4173 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4174 
4175 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4176 	if (!buf) {
4177 		device_printf(dev, "Could not allocate sbuf for output.\n");
4178 		return (ENOMEM);
4179 	}
4180 
4181 	bzero(resp, sizeof(resp));
4182 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4183 				resp,
4184 				IXL_SW_RES_SIZE,
4185 				NULL);
4186 	if (status) {
4187 		device_printf(dev,
4188 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4189 		    __func__, i40e_stat_str(hw, status),
4190 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4191 		sbuf_delete(buf);
4192 		return (error);
4193 	}
4194 
4195 	/* Sort entries by type for display */
4196 	qsort(resp, num_entries,
4197 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4198 	    &ixl_res_alloc_cmp);
4199 
4200 	sbuf_cat(buf, "\n");
4201 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4202 	sbuf_printf(buf,
4203 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4204 	    "                          | (this)     | (all) | (this) | (all)       \n");
4205 	for (int i = 0; i < num_entries; i++) {
4206 		sbuf_printf(buf,
4207 		    "%25s | %10d   %5d   %6d   %12d",
4208 		    ixl_switch_res_type_string(resp[i].resource_type),
4209 		    resp[i].guaranteed,
4210 		    resp[i].total,
4211 		    resp[i].used,
4212 		    resp[i].total_unalloced);
4213 		if (i < num_entries - 1)
4214 			sbuf_cat(buf, "\n");
4215 	}
4216 
4217 	error = sbuf_finish(buf);
4218 	if (error)
4219 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4220 
4221 	sbuf_delete(buf);
4222 	return (error);
4223 }
4224 
4225 /*
4226 ** Caller must init and delete sbuf; this function will clear and
4227 ** finish it for caller.
4228 */
4229 char *
4230 ixl_switch_element_string(struct sbuf *s,
4231     struct i40e_aqc_switch_config_element_resp *element)
4232 {
4233 	sbuf_clear(s);
4234 
4235 	switch (element->element_type) {
4236 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4237 		sbuf_printf(s, "MAC %3d", element->element_info);
4238 		break;
4239 	case I40E_AQ_SW_ELEM_TYPE_PF:
4240 		sbuf_printf(s, "PF  %3d", element->element_info);
4241 		break;
4242 	case I40E_AQ_SW_ELEM_TYPE_VF:
4243 		sbuf_printf(s, "VF  %3d", element->element_info);
4244 		break;
4245 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4246 		sbuf_cat(s, "EMP");
4247 		break;
4248 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4249 		sbuf_cat(s, "BMC");
4250 		break;
4251 	case I40E_AQ_SW_ELEM_TYPE_PV:
4252 		sbuf_cat(s, "PV");
4253 		break;
4254 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4255 		sbuf_cat(s, "VEB");
4256 		break;
4257 	case I40E_AQ_SW_ELEM_TYPE_PA:
4258 		sbuf_cat(s, "PA");
4259 		break;
4260 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4261 		sbuf_printf(s, "VSI %3d", element->element_info);
4262 		break;
4263 	default:
4264 		sbuf_cat(s, "?");
4265 		break;
4266 	}
4267 
4268 	sbuf_finish(s);
4269 	return sbuf_data(s);
4270 }
4271 
4272 static int
4273 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4274 {
4275 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4276 	struct i40e_hw *hw = &pf->hw;
4277 	device_t dev = pf->dev;
4278 	struct sbuf *buf;
4279 	struct sbuf *nmbuf;
4280 	enum i40e_status_code status;
4281 	int error = 0;
4282 	u16 next = 0;
4283 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4284 
4285 	struct i40e_aqc_get_switch_config_resp *sw_config;
4286 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4287 
4288 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4289 	if (!buf) {
4290 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4291 		return (ENOMEM);
4292 	}
4293 
4294 	status = i40e_aq_get_switch_config(hw, sw_config,
4295 	    sizeof(aq_buf), &next, NULL);
4296 	if (status) {
4297 		device_printf(dev,
4298 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4299 		    __func__, i40e_stat_str(hw, status),
4300 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4301 		sbuf_delete(buf);
4302 		return error;
4303 	}
4304 	if (next)
4305 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4306 		    __func__, next);
4307 
4308 	nmbuf = sbuf_new_auto();
4309 	if (!nmbuf) {
4310 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4311 		sbuf_delete(buf);
4312 		return (ENOMEM);
4313 	}
4314 
4315 	sbuf_cat(buf, "\n");
4316 	/* Assuming <= 255 elements in switch */
4317 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4318 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4319 	/* Exclude:
4320 	** Revision -- all elements are revision 1 for now
4321 	*/
4322 	sbuf_printf(buf,
4323 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4324 	    "                |          |          | (uplink)\n");
4325 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4326 		// "%4d (%8s) | %8s   %8s   %#8x",
4327 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4328 		sbuf_cat(buf, " ");
4329 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4330 		    &sw_config->element[i]));
4331 		sbuf_cat(buf, " | ");
4332 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4333 		sbuf_cat(buf, "   ");
4334 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4335 		sbuf_cat(buf, "   ");
4336 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4337 		if (i < sw_config->header.num_reported - 1)
4338 			sbuf_cat(buf, "\n");
4339 	}
4340 	sbuf_delete(nmbuf);
4341 
4342 	error = sbuf_finish(buf);
4343 	if (error)
4344 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4345 
4346 	sbuf_delete(buf);
4347 
4348 	return (error);
4349 }
4350 
4351 static int
4352 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4353 {
4354 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4355 	struct i40e_hw *hw = &pf->hw;
4356 	device_t dev = pf->dev;
4357 	struct sbuf *buf;
4358 	int error = 0;
4359 	enum i40e_status_code status;
4360 	u32 reg;
4361 
4362 	struct i40e_aqc_get_set_rss_key_data key_data;
4363 
4364 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4365 	if (!buf) {
4366 		device_printf(dev, "Could not allocate sbuf for output.\n");
4367 		return (ENOMEM);
4368 	}
4369 
4370 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4371 
4372 	sbuf_cat(buf, "\n");
4373 	if (hw->mac.type == I40E_MAC_X722) {
4374 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4375 		if (status)
4376 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4377 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4378 	} else {
4379 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4380 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4381 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4382 		}
4383 	}
4384 
4385 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4386 
4387 	error = sbuf_finish(buf);
4388 	if (error)
4389 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4390 	sbuf_delete(buf);
4391 
4392 	return (error);
4393 }
4394 
4395 static void
4396 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4397 {
4398 	int i, j, k, width;
4399 	char c;
4400 
4401 	if (length < 1 || buf == NULL) return;
4402 
4403 	int byte_stride = 16;
4404 	int lines = length / byte_stride;
4405 	int rem = length % byte_stride;
4406 	if (rem > 0)
4407 		lines++;
4408 
4409 	for (i = 0; i < lines; i++) {
4410 		width = (rem > 0 && i == lines - 1)
4411 		    ? rem : byte_stride;
4412 
4413 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4414 
4415 		for (j = 0; j < width; j++)
4416 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4417 
4418 		if (width < byte_stride) {
4419 			for (k = 0; k < (byte_stride - width); k++)
4420 				sbuf_printf(sb, "   ");
4421 		}
4422 
4423 		if (!text) {
4424 			sbuf_printf(sb, "\n");
4425 			continue;
4426 		}
4427 
4428 		for (j = 0; j < width; j++) {
4429 			c = (char)buf[i * byte_stride + j];
4430 			if (c < 32 || c > 126)
4431 				sbuf_printf(sb, ".");
4432 			else
4433 				sbuf_printf(sb, "%c", c);
4434 
4435 			if (j == width - 1)
4436 				sbuf_printf(sb, "\n");
4437 		}
4438 	}
4439 }
4440 
4441 static int
4442 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4443 {
4444 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4445 	struct i40e_hw *hw = &pf->hw;
4446 	device_t dev = pf->dev;
4447 	struct sbuf *buf;
4448 	int error = 0;
4449 	enum i40e_status_code status;
4450 	u8 hlut[512];
4451 	u32 reg;
4452 
4453 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4454 	if (!buf) {
4455 		device_printf(dev, "Could not allocate sbuf for output.\n");
4456 		return (ENOMEM);
4457 	}
4458 
4459 	bzero(hlut, sizeof(hlut));
4460 	sbuf_cat(buf, "\n");
4461 	if (hw->mac.type == I40E_MAC_X722) {
4462 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4463 		if (status)
4464 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4465 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4466 	} else {
4467 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4468 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4469 			bcopy(&reg, &hlut[i << 2], 4);
4470 		}
4471 	}
4472 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4473 
4474 	error = sbuf_finish(buf);
4475 	if (error)
4476 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4477 	sbuf_delete(buf);
4478 
4479 	return (error);
4480 }
4481 
4482 static int
4483 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4484 {
4485 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4486 	struct i40e_hw *hw = &pf->hw;
4487 	u64 hena;
4488 
4489 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4490 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4491 
4492 	return sysctl_handle_long(oidp, NULL, hena, req);
4493 }
4494 
4495 /*
4496  * Sysctl to disable firmware's link management
4497  *
4498  * 1 - Disable link management on this port
4499  * 0 - Re-enable link management
4500  *
4501  * On normal NVMs, firmware manages link by default.
4502  */
4503 static int
4504 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4505 {
4506 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4507 	struct i40e_hw *hw = &pf->hw;
4508 	device_t dev = pf->dev;
4509 	int requested_mode = -1;
4510 	enum i40e_status_code status = 0;
4511 	int error = 0;
4512 
4513 	/* Read in new mode */
4514 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4515 	if ((error) || (req->newptr == NULL))
4516 		return (error);
4517 	/* Check for sane value */
4518 	if (requested_mode < 0 || requested_mode > 1) {
4519 		device_printf(dev, "Valid modes are 0 or 1\n");
4520 		return (EINVAL);
4521 	}
4522 
4523 	/* Set new mode */
4524 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4525 	if (status) {
4526 		device_printf(dev,
4527 		    "%s: Error setting new phy debug mode %s,"
4528 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4529 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4530 		return (EIO);
4531 	}
4532 
4533 	return (0);
4534 }
4535 
4536 /*
4537  * Read some diagnostic data from an SFP module
4538  * Bytes 96-99, 102-105 from device address 0xA2
4539  */
4540 static int
4541 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4542 {
4543 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4544 	device_t dev = pf->dev;
4545 	struct sbuf *sbuf;
4546 	int error = 0;
4547 	u8 output;
4548 
4549 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4550 	if (error) {
4551 		device_printf(dev, "Error reading from i2c\n");
4552 		return (error);
4553 	}
4554 	if (output != 0x3) {
4555 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4556 		return (EIO);
4557 	}
4558 
4559 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4560 	if (!(output & 0x60)) {
4561 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4562 		return (EIO);
4563 	}
4564 
4565 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4566 
4567 	for (u8 offset = 96; offset < 100; offset++) {
4568 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4569 		sbuf_printf(sbuf, "%02X ", output);
4570 	}
4571 	for (u8 offset = 102; offset < 106; offset++) {
4572 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4573 		sbuf_printf(sbuf, "%02X ", output);
4574 	}
4575 
4576 	sbuf_finish(sbuf);
4577 	sbuf_delete(sbuf);
4578 
4579 	return (0);
4580 }
4581 
4582 /*
4583  * Sysctl to read a byte from I2C bus.
4584  *
4585  * Input: 32-bit value:
4586  * 	bits 0-7:   device address (0xA0 or 0xA2)
4587  * 	bits 8-15:  offset (0-255)
4588  *	bits 16-31: unused
4589  * Output: 8-bit value read
4590  */
4591 static int
4592 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4593 {
4594 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4595 	device_t dev = pf->dev;
4596 	int input = -1, error = 0;
4597 	u8 dev_addr, offset, output;
4598 
4599 	/* Read in I2C read parameters */
4600 	error = sysctl_handle_int(oidp, &input, 0, req);
4601 	if ((error) || (req->newptr == NULL))
4602 		return (error);
4603 	/* Validate device address */
4604 	dev_addr = input & 0xFF;
4605 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4606 		return (EINVAL);
4607 	}
4608 	offset = (input >> 8) & 0xFF;
4609 
4610 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4611 	if (error)
4612 		return (error);
4613 
4614 	device_printf(dev, "%02X\n", output);
4615 	return (0);
4616 }
4617 
4618 /*
4619  * Sysctl to write a byte to the I2C bus.
4620  *
4621  * Input: 32-bit value:
4622  * 	bits 0-7:   device address (0xA0 or 0xA2)
4623  * 	bits 8-15:  offset (0-255)
4624  *	bits 16-23: value to write
4625  *	bits 24-31: unused
4626  * Output: 8-bit value written
4627  */
4628 static int
4629 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4630 {
4631 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4632 	device_t dev = pf->dev;
4633 	int input = -1, error = 0;
4634 	u8 dev_addr, offset, value;
4635 
4636 	/* Read in I2C write parameters */
4637 	error = sysctl_handle_int(oidp, &input, 0, req);
4638 	if ((error) || (req->newptr == NULL))
4639 		return (error);
4640 	/* Validate device address */
4641 	dev_addr = input & 0xFF;
4642 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4643 		return (EINVAL);
4644 	}
4645 	offset = (input >> 8) & 0xFF;
4646 	value = (input >> 16) & 0xFF;
4647 
4648 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4649 	if (error)
4650 		return (error);
4651 
4652 	device_printf(dev, "%02X written\n", value);
4653 	return (0);
4654 }
4655 
4656 static int
4657 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4658     u8 bit_pos, int *is_set)
4659 {
4660 	device_t dev = pf->dev;
4661 	struct i40e_hw *hw = &pf->hw;
4662 	enum i40e_status_code status;
4663 
4664 	status = i40e_aq_get_phy_capabilities(hw,
4665 	    FALSE, FALSE, abilities, NULL);
4666 	if (status) {
4667 		device_printf(dev,
4668 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4669 		    __func__, i40e_stat_str(hw, status),
4670 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4671 		return (EIO);
4672 	}
4673 
4674 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4675 	return (0);
4676 }
4677 
4678 static int
4679 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4680     u8 bit_pos, int set)
4681 {
4682 	device_t dev = pf->dev;
4683 	struct i40e_hw *hw = &pf->hw;
4684 	struct i40e_aq_set_phy_config config;
4685 	enum i40e_status_code status;
4686 
4687 	/* Set new PHY config */
4688 	memset(&config, 0, sizeof(config));
4689 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4690 	if (set)
4691 		config.fec_config |= bit_pos;
4692 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4693 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4694 		config.phy_type = abilities->phy_type;
4695 		config.phy_type_ext = abilities->phy_type_ext;
4696 		config.link_speed = abilities->link_speed;
4697 		config.eee_capability = abilities->eee_capability;
4698 		config.eeer = abilities->eeer_val;
4699 		config.low_power_ctrl = abilities->d3_lpan;
4700 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4701 
4702 		if (status) {
4703 			device_printf(dev,
4704 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4705 			    __func__, i40e_stat_str(hw, status),
4706 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4707 			return (EIO);
4708 		}
4709 	}
4710 
4711 	return (0);
4712 }
4713 
4714 static int
4715 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4716 {
4717 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4718 	int mode, error = 0;
4719 
4720 	struct i40e_aq_get_phy_abilities_resp abilities;
4721 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4722 	if (error)
4723 		return (error);
4724 	/* Read in new mode */
4725 	error = sysctl_handle_int(oidp, &mode, 0, req);
4726 	if ((error) || (req->newptr == NULL))
4727 		return (error);
4728 
4729 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4730 }
4731 
4732 static int
4733 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4734 {
4735 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4736 	int mode, error = 0;
4737 
4738 	struct i40e_aq_get_phy_abilities_resp abilities;
4739 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4740 	if (error)
4741 		return (error);
4742 	/* Read in new mode */
4743 	error = sysctl_handle_int(oidp, &mode, 0, req);
4744 	if ((error) || (req->newptr == NULL))
4745 		return (error);
4746 
4747 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4748 }
4749 
4750 static int
4751 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4752 {
4753 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4754 	int mode, error = 0;
4755 
4756 	struct i40e_aq_get_phy_abilities_resp abilities;
4757 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4758 	if (error)
4759 		return (error);
4760 	/* Read in new mode */
4761 	error = sysctl_handle_int(oidp, &mode, 0, req);
4762 	if ((error) || (req->newptr == NULL))
4763 		return (error);
4764 
4765 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4766 }
4767 
4768 static int
4769 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4770 {
4771 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4772 	int mode, error = 0;
4773 
4774 	struct i40e_aq_get_phy_abilities_resp abilities;
4775 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4776 	if (error)
4777 		return (error);
4778 	/* Read in new mode */
4779 	error = sysctl_handle_int(oidp, &mode, 0, req);
4780 	if ((error) || (req->newptr == NULL))
4781 		return (error);
4782 
4783 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4784 }
4785 
4786 static int
4787 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4788 {
4789 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4790 	int mode, error = 0;
4791 
4792 	struct i40e_aq_get_phy_abilities_resp abilities;
4793 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4794 	if (error)
4795 		return (error);
4796 	/* Read in new mode */
4797 	error = sysctl_handle_int(oidp, &mode, 0, req);
4798 	if ((error) || (req->newptr == NULL))
4799 		return (error);
4800 
4801 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4802 }
4803 
4804 static int
4805 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4806 {
4807 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4808 	struct i40e_hw *hw = &pf->hw;
4809 	device_t dev = pf->dev;
4810 	struct sbuf *buf;
4811 	int error = 0;
4812 	enum i40e_status_code status;
4813 
4814 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4815 	if (!buf) {
4816 		device_printf(dev, "Could not allocate sbuf for output.\n");
4817 		return (ENOMEM);
4818 	}
4819 
4820 	u8 *final_buff;
4821 	/* This amount is only necessary if reading the entire cluster into memory */
4822 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4823 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4824 	if (final_buff == NULL) {
4825 		device_printf(dev, "Could not allocate memory for output.\n");
4826 		goto out;
4827 	}
4828 	int final_buff_len = 0;
4829 
4830 	u8 cluster_id = 1;
4831 	bool more = true;
4832 
4833 	u8 dump_buf[4096];
4834 	u16 curr_buff_size = 4096;
4835 	u8 curr_next_table = 0;
4836 	u32 curr_next_index = 0;
4837 
4838 	u16 ret_buff_size;
4839 	u8 ret_next_table;
4840 	u32 ret_next_index;
4841 
4842 	sbuf_cat(buf, "\n");
4843 
4844 	while (more) {
4845 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4846 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4847 		if (status) {
4848 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4849 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4850 			goto free_out;
4851 		}
4852 
4853 		/* copy info out of temp buffer */
4854 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4855 		final_buff_len += ret_buff_size;
4856 
4857 		if (ret_next_table != curr_next_table) {
4858 			/* We're done with the current table; we can dump out read data. */
4859 			sbuf_printf(buf, "%d:", curr_next_table);
4860 			int bytes_printed = 0;
4861 			while (bytes_printed <= final_buff_len) {
4862 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4863 				bytes_printed += 16;
4864 			}
4865 				sbuf_cat(buf, "\n");
4866 
4867 			/* The entire cluster has been read; we're finished */
4868 			if (ret_next_table == 0xFF)
4869 				break;
4870 
4871 			/* Otherwise clear the output buffer and continue reading */
4872 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4873 			final_buff_len = 0;
4874 		}
4875 
4876 		if (ret_next_index == 0xFFFFFFFF)
4877 			ret_next_index = 0;
4878 
4879 		bzero(dump_buf, sizeof(dump_buf));
4880 		curr_next_table = ret_next_table;
4881 		curr_next_index = ret_next_index;
4882 	}
4883 
4884 free_out:
4885 	free(final_buff, M_DEVBUF);
4886 out:
4887 	error = sbuf_finish(buf);
4888 	if (error)
4889 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4890 	sbuf_delete(buf);
4891 
4892 	return (error);
4893 }
4894 
4895 static int
4896 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4897 {
4898 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4899 	struct i40e_hw *hw = &pf->hw;
4900 	device_t dev = pf->dev;
4901 	int error = 0;
4902 	int state, new_state;
4903 	enum i40e_status_code status;
4904 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4905 
4906 	/* Read in new mode */
4907 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4908 	if ((error) || (req->newptr == NULL))
4909 		return (error);
4910 
4911 	/* Already in requested state */
4912 	if (new_state == state)
4913 		return (error);
4914 
4915 	if (new_state == 0) {
4916 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4917 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4918 			return (EINVAL);
4919 		}
4920 
4921 		if (pf->hw.aq.api_maj_ver < 1 ||
4922 		    (pf->hw.aq.api_maj_ver == 1 &&
4923 		    pf->hw.aq.api_min_ver < 7)) {
4924 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4925 			return (EINVAL);
4926 		}
4927 
4928 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4929 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4930 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4931 	} else {
4932 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4933 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4934 			device_printf(dev, "FW LLDP agent is already running\n");
4935 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4936 	}
4937 
4938 	return (0);
4939 }
4940 
4941 /*
4942  * Get FW LLDP Agent status
4943  */
4944 int
4945 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4946 {
4947 	enum i40e_status_code ret = I40E_SUCCESS;
4948 	struct i40e_lldp_variables lldp_cfg;
4949 	struct i40e_hw *hw = &pf->hw;
4950 	u8 adminstatus = 0;
4951 
4952 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4953 	if (ret)
4954 		return ret;
4955 
4956 	/* Get the LLDP AdminStatus for the current port */
4957 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4958 	adminstatus &= 0xf;
4959 
4960 	/* Check if LLDP agent is disabled */
4961 	if (!adminstatus) {
4962 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4963 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4964 	} else
4965 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4966 
4967 	return (0);
4968 }
4969 
4970 int
4971 ixl_attach_get_link_status(struct ixl_pf *pf)
4972 {
4973 	struct i40e_hw *hw = &pf->hw;
4974 	device_t dev = pf->dev;
4975 	int error = 0;
4976 
4977 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4978 	    (hw->aq.fw_maj_ver < 4)) {
4979 		i40e_msec_delay(75);
4980 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4981 		if (error) {
4982 			device_printf(dev, "link restart failed, aq_err=%d\n",
4983 			    pf->hw.aq.asq_last_status);
4984 			return error;
4985 		}
4986 	}
4987 
4988 	/* Determine link state */
4989 	hw->phy.get_link_info = TRUE;
4990 	i40e_get_link_status(hw, &pf->link_up);
4991 	return (0);
4992 }
4993 
4994 static int
4995 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4996 {
4997 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4998 	int requested = 0, error = 0;
4999 
5000 	/* Read in new mode */
5001 	error = sysctl_handle_int(oidp, &requested, 0, req);
5002 	if ((error) || (req->newptr == NULL))
5003 		return (error);
5004 
5005 	/* Initiate the PF reset later in the admin task */
5006 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
5007 
5008 	return (error);
5009 }
5010 
5011 static int
5012 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
5013 {
5014 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5015 	struct i40e_hw *hw = &pf->hw;
5016 	int requested = 0, error = 0;
5017 
5018 	/* Read in new mode */
5019 	error = sysctl_handle_int(oidp, &requested, 0, req);
5020 	if ((error) || (req->newptr == NULL))
5021 		return (error);
5022 
5023 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
5024 
5025 	return (error);
5026 }
5027 
5028 static int
5029 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
5030 {
5031 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5032 	struct i40e_hw *hw = &pf->hw;
5033 	int requested = 0, error = 0;
5034 
5035 	/* Read in new mode */
5036 	error = sysctl_handle_int(oidp, &requested, 0, req);
5037 	if ((error) || (req->newptr == NULL))
5038 		return (error);
5039 
5040 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5041 
5042 	return (error);
5043 }
5044 
5045 static int
5046 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5047 {
5048 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5049 	struct i40e_hw *hw = &pf->hw;
5050 	int requested = 0, error = 0;
5051 
5052 	/* Read in new mode */
5053 	error = sysctl_handle_int(oidp, &requested, 0, req);
5054 	if ((error) || (req->newptr == NULL))
5055 		return (error);
5056 
5057 	/* TODO: Find out how to bypass this */
5058 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5059 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5060 		error = EINVAL;
5061 	} else
5062 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5063 
5064 	return (error);
5065 }
5066 
5067 /*
5068  * Print out mapping of TX queue indexes and Rx queue indexes
5069  * to MSI-X vectors.
5070  */
5071 static int
5072 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5073 {
5074 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5075 	struct ixl_vsi *vsi = &pf->vsi;
5076 	device_t dev = pf->dev;
5077 	struct sbuf *buf;
5078 	int error = 0;
5079 
5080 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5081 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5082 
5083 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5084 	if (!buf) {
5085 		device_printf(dev, "Could not allocate sbuf for output.\n");
5086 		return (ENOMEM);
5087 	}
5088 
5089 	sbuf_cat(buf, "\n");
5090 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5091 		rx_que = &vsi->rx_queues[i];
5092 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5093 	}
5094 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5095 		tx_que = &vsi->tx_queues[i];
5096 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5097 	}
5098 
5099 	error = sbuf_finish(buf);
5100 	if (error)
5101 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5102 	sbuf_delete(buf);
5103 
5104 	return (error);
5105 }
5106