xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 2ef9ff7dd34a78a7890ba4d6de64da34d9c10942)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 /*********************************************************************
641  * 	Filter Routines
642  *
643  *	Routines for multicast and vlan filter management.
644  *
645  *********************************************************************/
646 void
647 ixl_add_multi(struct ixl_vsi *vsi)
648 {
649 	struct	ifmultiaddr	*ifma;
650 	struct ifnet		*ifp = vsi->ifp;
651 	struct i40e_hw		*hw = vsi->hw;
652 	int			mcnt = 0, flags;
653 
654 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
655 
656 	if_maddr_rlock(ifp);
657 	/*
658 	** First just get a count, to decide if we
659 	** we simply use multicast promiscuous.
660 	*/
661 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
662 		if (ifma->ifma_addr->sa_family != AF_LINK)
663 			continue;
664 		mcnt++;
665 	}
666 	if_maddr_runlock(ifp);
667 
668 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
669 		/* delete existing MC filters */
670 		ixl_del_hw_filters(vsi, mcnt);
671 		i40e_aq_set_vsi_multicast_promiscuous(hw,
672 		    vsi->seid, TRUE, NULL);
673 		return;
674 	}
675 
676 	mcnt = 0;
677 	if_maddr_rlock(ifp);
678 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
679 		if (ifma->ifma_addr->sa_family != AF_LINK)
680 			continue;
681 		ixl_add_mc_filter(vsi,
682 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
683 		mcnt++;
684 	}
685 	if_maddr_runlock(ifp);
686 	if (mcnt > 0) {
687 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
688 		ixl_add_hw_filters(vsi, flags, mcnt);
689 	}
690 
691 	IOCTL_DEBUGOUT("ixl_add_multi: end");
692 }
693 
694 int
695 ixl_del_multi(struct ixl_vsi *vsi)
696 {
697 	struct ifnet		*ifp = vsi->ifp;
698 	struct ifmultiaddr	*ifma;
699 	struct ixl_mac_filter	*f;
700 	int			mcnt = 0;
701 	bool		match = FALSE;
702 
703 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
704 
705 	/* Search for removed multicast addresses */
706 	if_maddr_rlock(ifp);
707 	SLIST_FOREACH(f, &vsi->ftl, next) {
708 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
709 			match = FALSE;
710 			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
711 				if (ifma->ifma_addr->sa_family != AF_LINK)
712 					continue;
713 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
714 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
715 					match = TRUE;
716 					break;
717 				}
718 			}
719 			if (match == FALSE) {
720 				f->flags |= IXL_FILTER_DEL;
721 				mcnt++;
722 			}
723 		}
724 	}
725 	if_maddr_runlock(ifp);
726 
727 	if (mcnt > 0)
728 		ixl_del_hw_filters(vsi, mcnt);
729 
730 	return (mcnt);
731 }
732 
733 void
734 ixl_link_up_msg(struct ixl_pf *pf)
735 {
736 	struct i40e_hw *hw = &pf->hw;
737 	struct ifnet *ifp = pf->vsi.ifp;
738 	char *req_fec_string, *neg_fec_string;
739 	u8 fec_abilities;
740 
741 	fec_abilities = hw->phy.link_info.req_fec_info;
742 	/* If both RS and KR are requested, only show RS */
743 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
744 		req_fec_string = ixl_fec_string[0];
745 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
746 		req_fec_string = ixl_fec_string[1];
747 	else
748 		req_fec_string = ixl_fec_string[2];
749 
750 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
751 		neg_fec_string = ixl_fec_string[0];
752 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
753 		neg_fec_string = ixl_fec_string[1];
754 	else
755 		neg_fec_string = ixl_fec_string[2];
756 
757 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
758 	    ifp->if_xname,
759 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
760 	    req_fec_string, neg_fec_string,
761 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
762 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
763 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
764 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
765 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
766 		ixl_fc_string[1] : ixl_fc_string[0]);
767 }
768 
769 /*
770  * Configure admin queue/misc interrupt cause registers in hardware.
771  */
772 void
773 ixl_configure_intr0_msix(struct ixl_pf *pf)
774 {
775 	struct i40e_hw *hw = &pf->hw;
776 	u32 reg;
777 
778 	/* First set up the adminq - vector 0 */
779 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
780 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
781 
782 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
783 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
784 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
785 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
786 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
787 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
788 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
789 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
790 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
791 
792 	/*
793 	 * 0x7FF is the end of the queue list.
794 	 * This means we won't use MSI-X vector 0 for a queue interrupt
795 	 * in MSI-X mode.
796 	 */
797 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
798 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
799 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
800 
801 	wr32(hw, I40E_PFINT_DYN_CTL0,
802 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
803 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
804 
805 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
806 }
807 
808 /*
809  * Configure queue interrupt cause registers in hardware.
810  *
811  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
812  */
813 void
814 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
815 {
816 	struct i40e_hw *hw = &pf->hw;
817 	struct ixl_vsi *vsi = &pf->vsi;
818 	u32		reg;
819 	u16		vector = 1;
820 
821 	// TODO: See if max is really necessary
822 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
823 		/* Make sure interrupt is disabled */
824 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
825 		/* Set linked list head to point to corresponding RX queue
826 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
827 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
828 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
829 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
830 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
831 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
832 
833 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
834 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
835 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
836 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
837 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
838 		wr32(hw, I40E_QINT_RQCTL(i), reg);
839 
840 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
841 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
842 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
843 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
844 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
845 		wr32(hw, I40E_QINT_TQCTL(i), reg);
846 	}
847 }
848 
849 /*
850  * Configure for single interrupt vector operation
851  */
852 void
853 ixl_configure_legacy(struct ixl_pf *pf)
854 {
855 	struct i40e_hw	*hw = &pf->hw;
856 	struct ixl_vsi	*vsi = &pf->vsi;
857 	u32 reg;
858 
859 // TODO: Fix
860 #if 0
861 	/* Configure ITR */
862 	vsi->tx_itr_setting = pf->tx_itr;
863 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
864 	    vsi->tx_itr_setting);
865 	txr->itr = vsi->tx_itr_setting;
866 
867 	vsi->rx_itr_setting = pf->rx_itr;
868 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
869 	    vsi->rx_itr_setting);
870 	rxr->itr = vsi->rx_itr_setting;
871 	/* XXX: Assuming only 1 queue in single interrupt mode */
872 #endif
873 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
874 
875 	/* Setup "other" causes */
876 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
877 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
878 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
879 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
880 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
881 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
882 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
883 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
884 	    ;
885 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
886 
887 	/* No ITR for non-queue interrupts */
888 	wr32(hw, I40E_PFINT_STAT_CTL0,
889 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
890 
891 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
892 	wr32(hw, I40E_PFINT_LNKLST0, 0);
893 
894 	/* Associate the queue pair to the vector and enable the q int */
895 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
896 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
897 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
898 	wr32(hw, I40E_QINT_RQCTL(0), reg);
899 
900 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
901 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
902 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
903 	wr32(hw, I40E_QINT_TQCTL(0), reg);
904 }
905 
906 void
907 ixl_free_pci_resources(struct ixl_pf *pf)
908 {
909 	struct ixl_vsi		*vsi = &pf->vsi;
910 	device_t		dev = iflib_get_dev(vsi->ctx);
911 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
912 
913 	/* We may get here before stations are set up */
914 	if (rx_que == NULL)
915 		goto early;
916 
917 	/*
918 	**  Release all MSI-X VSI resources:
919 	*/
920 	iflib_irq_free(vsi->ctx, &vsi->irq);
921 
922 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
923 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
924 early:
925 	if (pf->pci_mem != NULL)
926 		bus_release_resource(dev, SYS_RES_MEMORY,
927 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
928 }
929 
930 void
931 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
932 {
933 	/* Display supported media types */
934 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
947 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
948 
949 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
950 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
951 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
952 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
953 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
954 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
955 
956 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
957 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
958 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
959 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
960 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
961 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
962 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
963 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
964 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
965 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
966 
967 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
971 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
972 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
973 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
977 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
978 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
979 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
980 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
981 
982 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
984 
985 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
986 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
987 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
988 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
989 
990 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
991 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
992 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
993 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
994 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
995 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
996 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
997 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
998 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
999 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1000 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1001 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1002 }
1003 
1004 /*********************************************************************
1005  *
1006  *  Setup networking device structure and register an interface.
1007  *
1008  **********************************************************************/
1009 int
1010 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1011 {
1012 	struct ixl_vsi *vsi = &pf->vsi;
1013 	if_ctx_t ctx = vsi->ctx;
1014 	struct i40e_hw *hw = &pf->hw;
1015 	struct ifnet *ifp = iflib_get_ifp(ctx);
1016 	struct i40e_aq_get_phy_abilities_resp abilities;
1017 	enum i40e_status_code aq_error = 0;
1018 
1019 	INIT_DBG_DEV(dev, "begin");
1020 
1021 	vsi->shared->isc_max_frame_size =
1022 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1023 	    + ETHER_VLAN_ENCAP_LEN;
1024 
1025 	aq_error = i40e_aq_get_phy_capabilities(hw,
1026 	    FALSE, TRUE, &abilities, NULL);
1027 	/* May need delay to detect fiber correctly */
1028 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1029 		/* TODO: Maybe just retry this in a task... */
1030 		i40e_msec_delay(200);
1031 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1032 		    TRUE, &abilities, NULL);
1033 	}
1034 	if (aq_error) {
1035 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1036 			device_printf(dev, "Unknown PHY type detected!\n");
1037 		else
1038 			device_printf(dev,
1039 			    "Error getting supported media types, err %d,"
1040 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1041 	} else {
1042 		pf->supported_speeds = abilities.link_speed;
1043 #if __FreeBSD_version >= 1100000
1044 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1045 #else
1046 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1047 #endif
1048 
1049 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1050 	}
1051 
1052 	/* Use autoselect media by default */
1053 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1054 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1055 
1056 	return (0);
1057 }
1058 
1059 /*
1060  * Input: bitmap of enum i40e_aq_link_speed
1061  */
1062 u64
1063 ixl_max_aq_speed_to_value(u8 link_speeds)
1064 {
1065 	if (link_speeds & I40E_LINK_SPEED_40GB)
1066 		return IF_Gbps(40);
1067 	if (link_speeds & I40E_LINK_SPEED_25GB)
1068 		return IF_Gbps(25);
1069 	if (link_speeds & I40E_LINK_SPEED_20GB)
1070 		return IF_Gbps(20);
1071 	if (link_speeds & I40E_LINK_SPEED_10GB)
1072 		return IF_Gbps(10);
1073 	if (link_speeds & I40E_LINK_SPEED_1GB)
1074 		return IF_Gbps(1);
1075 	if (link_speeds & I40E_LINK_SPEED_100MB)
1076 		return IF_Mbps(100);
1077 	else
1078 		/* Minimum supported link speed */
1079 		return IF_Mbps(100);
1080 }
1081 
1082 /*
1083 ** Run when the Admin Queue gets a link state change interrupt.
1084 */
1085 void
1086 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1087 {
1088 	struct i40e_hw *hw = &pf->hw;
1089 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1090 	struct i40e_aqc_get_link_status *status =
1091 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1092 
1093 	/* Request link status from adapter */
1094 	hw->phy.get_link_info = TRUE;
1095 	i40e_get_link_status(hw, &pf->link_up);
1096 
1097 	/* Print out message if an unqualified module is found */
1098 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1099 	    (pf->advertised_speed) &&
1100 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1101 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1102 		device_printf(dev, "Link failed because "
1103 		    "an unqualified module was detected!\n");
1104 
1105 	/* OS link info is updated elsewhere */
1106 }
1107 
1108 /*********************************************************************
1109  *
1110  *  Get Firmware Switch configuration
1111  *	- this will need to be more robust when more complex
1112  *	  switch configurations are enabled.
1113  *
1114  **********************************************************************/
1115 int
1116 ixl_switch_config(struct ixl_pf *pf)
1117 {
1118 	struct i40e_hw	*hw = &pf->hw;
1119 	struct ixl_vsi	*vsi = &pf->vsi;
1120 	device_t 	dev = iflib_get_dev(vsi->ctx);
1121 	struct i40e_aqc_get_switch_config_resp *sw_config;
1122 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1123 	int	ret;
1124 	u16	next = 0;
1125 
1126 	memset(&aq_buf, 0, sizeof(aq_buf));
1127 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1128 	ret = i40e_aq_get_switch_config(hw, sw_config,
1129 	    sizeof(aq_buf), &next, NULL);
1130 	if (ret) {
1131 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1132 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1133 		return (ret);
1134 	}
1135 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1136 		device_printf(dev,
1137 		    "Switch config: header reported: %d in structure, %d total\n",
1138 		    sw_config->header.num_reported, sw_config->header.num_total);
1139 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1140 			device_printf(dev,
1141 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1142 			    sw_config->element[i].element_type,
1143 			    sw_config->element[i].seid,
1144 			    sw_config->element[i].uplink_seid,
1145 			    sw_config->element[i].downlink_seid);
1146 		}
1147 	}
1148 	/* Simplified due to a single VSI */
1149 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1150 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1151 	vsi->seid = sw_config->element[0].seid;
1152 	return (ret);
1153 }
1154 
1155 /*********************************************************************
1156  *
1157  *  Initialize the VSI:  this handles contexts, which means things
1158  *  			 like the number of descriptors, buffer size,
1159  *			 plus we init the rings thru this function.
1160  *
1161  **********************************************************************/
1162 int
1163 ixl_initialize_vsi(struct ixl_vsi *vsi)
1164 {
1165 	struct ixl_pf *pf = vsi->back;
1166 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1167 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1168 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1169 	device_t		dev = iflib_get_dev(vsi->ctx);
1170 	struct i40e_hw		*hw = vsi->hw;
1171 	struct i40e_vsi_context	ctxt;
1172 	int 			tc_queues;
1173 	int			err = 0;
1174 
1175 	memset(&ctxt, 0, sizeof(ctxt));
1176 	ctxt.seid = vsi->seid;
1177 	if (pf->veb_seid != 0)
1178 		ctxt.uplink_seid = pf->veb_seid;
1179 	ctxt.pf_num = hw->pf_id;
1180 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1181 	if (err) {
1182 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1183 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1184 		return (err);
1185 	}
1186 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1187 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1188 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1189 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1190 	    ctxt.uplink_seid, ctxt.vsi_number,
1191 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1192 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1193 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1194 	/*
1195 	** Set the queue and traffic class bits
1196 	**  - when multiple traffic classes are supported
1197 	**    this will need to be more robust.
1198 	*/
1199 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1200 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1201 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1202 	ctxt.info.queue_mapping[0] = 0;
1203 	/*
1204 	 * This VSI will only use traffic class 0; start traffic class 0's
1205 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1206 	 * the driver may not use all of them).
1207 	 */
1208 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1209 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1210 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1211 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1212 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1213 
1214 	/* Set VLAN receive stripping mode */
1215 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1216 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1217 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1218 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1219 	else
1220 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1221 
1222 #ifdef IXL_IW
1223 	/* Set TCP Enable for iWARP capable VSI */
1224 	if (ixl_enable_iwarp && pf->iw_enabled) {
1225 		ctxt.info.valid_sections |=
1226 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1227 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1228 	}
1229 #endif
1230 	/* Save VSI number and info for use later */
1231 	vsi->vsi_num = ctxt.vsi_number;
1232 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1233 
1234 	/* Reset VSI statistics */
1235 	ixl_vsi_reset_stats(vsi);
1236 	vsi->hw_filters_add = 0;
1237 	vsi->hw_filters_del = 0;
1238 
1239 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1240 
1241 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1242 	if (err) {
1243 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1244 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1245 		return (err);
1246 	}
1247 
1248 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1249 		struct tx_ring		*txr = &tx_que->txr;
1250 		struct i40e_hmc_obj_txq tctx;
1251 		u32			txctl;
1252 
1253 		/* Setup the HMC TX Context  */
1254 		bzero(&tctx, sizeof(tctx));
1255 		tctx.new_context = 1;
1256 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1257 		tctx.qlen = scctx->isc_ntxd[0];
1258 		tctx.fc_ena = 0;	/* Disable FCoE */
1259 		/*
1260 		 * This value needs to pulled from the VSI that this queue
1261 		 * is assigned to. Index into array is traffic class.
1262 		 */
1263 		tctx.rdylist = vsi->info.qs_handle[0];
1264 		/*
1265 		 * Set these to enable Head Writeback
1266 		 * - Address is last entry in TX ring (reserved for HWB index)
1267 		 * Leave these as 0 for Descriptor Writeback
1268 		 */
1269 		if (vsi->enable_head_writeback) {
1270 			tctx.head_wb_ena = 1;
1271 			tctx.head_wb_addr = txr->tx_paddr +
1272 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1273 		} else {
1274 			tctx.head_wb_ena = 0;
1275 			tctx.head_wb_addr = 0;
1276 		}
1277 		tctx.rdylist_act = 0;
1278 		err = i40e_clear_lan_tx_queue_context(hw, i);
1279 		if (err) {
1280 			device_printf(dev, "Unable to clear TX context\n");
1281 			break;
1282 		}
1283 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1284 		if (err) {
1285 			device_printf(dev, "Unable to set TX context\n");
1286 			break;
1287 		}
1288 		/* Associate the ring with this PF */
1289 		txctl = I40E_QTX_CTL_PF_QUEUE;
1290 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1291 		    I40E_QTX_CTL_PF_INDX_MASK);
1292 		wr32(hw, I40E_QTX_CTL(i), txctl);
1293 		ixl_flush(hw);
1294 
1295 		/* Do ring (re)init */
1296 		ixl_init_tx_ring(vsi, tx_que);
1297 	}
1298 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1299 		struct rx_ring 		*rxr = &rx_que->rxr;
1300 		struct i40e_hmc_obj_rxq rctx;
1301 
1302 		/* Next setup the HMC RX Context  */
1303 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1304 
1305 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1306 
1307 		/* Set up an RX context for the HMC */
1308 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1309 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1310 		/* ignore header split for now */
1311 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1312 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1313 		    scctx->isc_max_frame_size : max_rxmax;
1314 		rctx.dtype = 0;
1315 		rctx.dsize = 1;		/* do 32byte descriptors */
1316 		rctx.hsplit_0 = 0;	/* no header split */
1317 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1318 		rctx.qlen = scctx->isc_nrxd[0];
1319 		rctx.tphrdesc_ena = 1;
1320 		rctx.tphwdesc_ena = 1;
1321 		rctx.tphdata_ena = 0;	/* Header Split related */
1322 		rctx.tphhead_ena = 0;	/* Header Split related */
1323 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1324 		rctx.crcstrip = 1;
1325 		rctx.l2tsel = 1;
1326 		rctx.showiv = 1;	/* Strip inner VLAN header */
1327 		rctx.fc_ena = 0;	/* Disable FCoE */
1328 		rctx.prefena = 1;	/* Prefetch descriptors */
1329 
1330 		err = i40e_clear_lan_rx_queue_context(hw, i);
1331 		if (err) {
1332 			device_printf(dev,
1333 			    "Unable to clear RX context %d\n", i);
1334 			break;
1335 		}
1336 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1337 		if (err) {
1338 			device_printf(dev, "Unable to set RX context %d\n", i);
1339 			break;
1340 		}
1341 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1342 	}
1343 	return (err);
1344 }
1345 
1346 void
1347 ixl_free_mac_filters(struct ixl_vsi *vsi)
1348 {
1349 	struct ixl_mac_filter *f;
1350 
1351 	while (!SLIST_EMPTY(&vsi->ftl)) {
1352 		f = SLIST_FIRST(&vsi->ftl);
1353 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1354 		free(f, M_DEVBUF);
1355 	}
1356 }
1357 
1358 /*
1359 ** Provide a update to the queue RX
1360 ** interrupt moderation value.
1361 */
1362 void
1363 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1364 {
1365 	struct ixl_vsi	*vsi = que->vsi;
1366 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1367 	struct i40e_hw	*hw = vsi->hw;
1368 	struct rx_ring	*rxr = &que->rxr;
1369 	u16		rx_itr;
1370 	u16		rx_latency = 0;
1371 	int		rx_bytes;
1372 
1373 	/* Idle, do nothing */
1374 	if (rxr->bytes == 0)
1375 		return;
1376 
1377 	if (pf->dynamic_rx_itr) {
1378 		rx_bytes = rxr->bytes/rxr->itr;
1379 		rx_itr = rxr->itr;
1380 
1381 		/* Adjust latency range */
1382 		switch (rxr->latency) {
1383 		case IXL_LOW_LATENCY:
1384 			if (rx_bytes > 10) {
1385 				rx_latency = IXL_AVE_LATENCY;
1386 				rx_itr = IXL_ITR_20K;
1387 			}
1388 			break;
1389 		case IXL_AVE_LATENCY:
1390 			if (rx_bytes > 20) {
1391 				rx_latency = IXL_BULK_LATENCY;
1392 				rx_itr = IXL_ITR_8K;
1393 			} else if (rx_bytes <= 10) {
1394 				rx_latency = IXL_LOW_LATENCY;
1395 				rx_itr = IXL_ITR_100K;
1396 			}
1397 			break;
1398 		case IXL_BULK_LATENCY:
1399 			if (rx_bytes <= 20) {
1400 				rx_latency = IXL_AVE_LATENCY;
1401 				rx_itr = IXL_ITR_20K;
1402 			}
1403 			break;
1404        		 }
1405 
1406 		rxr->latency = rx_latency;
1407 
1408 		if (rx_itr != rxr->itr) {
1409 			/* do an exponential smoothing */
1410 			rx_itr = (10 * rx_itr * rxr->itr) /
1411 			    ((9 * rx_itr) + rxr->itr);
1412 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1413 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1414 			    rxr->me), rxr->itr);
1415 		}
1416 	} else { /* We may have have toggled to non-dynamic */
1417 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1418 			vsi->rx_itr_setting = pf->rx_itr;
1419 		/* Update the hardware if needed */
1420 		if (rxr->itr != vsi->rx_itr_setting) {
1421 			rxr->itr = vsi->rx_itr_setting;
1422 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1423 			    rxr->me), rxr->itr);
1424 		}
1425 	}
1426 	rxr->bytes = 0;
1427 	rxr->packets = 0;
1428 }
1429 
1430 
1431 /*
1432 ** Provide a update to the queue TX
1433 ** interrupt moderation value.
1434 */
1435 void
1436 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1437 {
1438 	struct ixl_vsi	*vsi = que->vsi;
1439 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1440 	struct i40e_hw	*hw = vsi->hw;
1441 	struct tx_ring	*txr = &que->txr;
1442 	u16		tx_itr;
1443 	u16		tx_latency = 0;
1444 	int		tx_bytes;
1445 
1446 
1447 	/* Idle, do nothing */
1448 	if (txr->bytes == 0)
1449 		return;
1450 
1451 	if (pf->dynamic_tx_itr) {
1452 		tx_bytes = txr->bytes/txr->itr;
1453 		tx_itr = txr->itr;
1454 
1455 		switch (txr->latency) {
1456 		case IXL_LOW_LATENCY:
1457 			if (tx_bytes > 10) {
1458 				tx_latency = IXL_AVE_LATENCY;
1459 				tx_itr = IXL_ITR_20K;
1460 			}
1461 			break;
1462 		case IXL_AVE_LATENCY:
1463 			if (tx_bytes > 20) {
1464 				tx_latency = IXL_BULK_LATENCY;
1465 				tx_itr = IXL_ITR_8K;
1466 			} else if (tx_bytes <= 10) {
1467 				tx_latency = IXL_LOW_LATENCY;
1468 				tx_itr = IXL_ITR_100K;
1469 			}
1470 			break;
1471 		case IXL_BULK_LATENCY:
1472 			if (tx_bytes <= 20) {
1473 				tx_latency = IXL_AVE_LATENCY;
1474 				tx_itr = IXL_ITR_20K;
1475 			}
1476 			break;
1477 		}
1478 
1479 		txr->latency = tx_latency;
1480 
1481 		if (tx_itr != txr->itr) {
1482        	         /* do an exponential smoothing */
1483 			tx_itr = (10 * tx_itr * txr->itr) /
1484 			    ((9 * tx_itr) + txr->itr);
1485 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1486 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1487 			    txr->me), txr->itr);
1488 		}
1489 
1490 	} else { /* We may have have toggled to non-dynamic */
1491 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1492 			vsi->tx_itr_setting = pf->tx_itr;
1493 		/* Update the hardware if needed */
1494 		if (txr->itr != vsi->tx_itr_setting) {
1495 			txr->itr = vsi->tx_itr_setting;
1496 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1497 			    txr->me), txr->itr);
1498 		}
1499 	}
1500 	txr->bytes = 0;
1501 	txr->packets = 0;
1502 	return;
1503 }
1504 
1505 #ifdef IXL_DEBUG
1506 /**
1507  * ixl_sysctl_qtx_tail_handler
1508  * Retrieves I40E_QTX_TAIL value from hardware
1509  * for a sysctl.
1510  */
1511 int
1512 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1513 {
1514 	struct ixl_tx_queue *tx_que;
1515 	int error;
1516 	u32 val;
1517 
1518 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1519 	if (!tx_que) return 0;
1520 
1521 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1522 	error = sysctl_handle_int(oidp, &val, 0, req);
1523 	if (error || !req->newptr)
1524 		return error;
1525 	return (0);
1526 }
1527 
1528 /**
1529  * ixl_sysctl_qrx_tail_handler
1530  * Retrieves I40E_QRX_TAIL value from hardware
1531  * for a sysctl.
1532  */
1533 int
1534 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1535 {
1536 	struct ixl_rx_queue *rx_que;
1537 	int error;
1538 	u32 val;
1539 
1540 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1541 	if (!rx_que) return 0;
1542 
1543 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1544 	error = sysctl_handle_int(oidp, &val, 0, req);
1545 	if (error || !req->newptr)
1546 		return error;
1547 	return (0);
1548 }
1549 #endif
1550 
1551 /*
1552  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1553  * Writes to the ITR registers immediately.
1554  */
1555 static int
1556 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1557 {
1558 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1559 	device_t dev = pf->dev;
1560 	int error = 0;
1561 	int requested_tx_itr;
1562 
1563 	requested_tx_itr = pf->tx_itr;
1564 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1565 	if ((error) || (req->newptr == NULL))
1566 		return (error);
1567 	if (pf->dynamic_tx_itr) {
1568 		device_printf(dev,
1569 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1570 		    return (EINVAL);
1571 	}
1572 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1573 		device_printf(dev,
1574 		    "Invalid TX itr value; value must be between 0 and %d\n",
1575 		        IXL_MAX_ITR);
1576 		return (EINVAL);
1577 	}
1578 
1579 	pf->tx_itr = requested_tx_itr;
1580 	ixl_configure_tx_itr(pf);
1581 
1582 	return (error);
1583 }
1584 
1585 /*
1586  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1587  * Writes to the ITR registers immediately.
1588  */
1589 static int
1590 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1591 {
1592 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1593 	device_t dev = pf->dev;
1594 	int error = 0;
1595 	int requested_rx_itr;
1596 
1597 	requested_rx_itr = pf->rx_itr;
1598 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1599 	if ((error) || (req->newptr == NULL))
1600 		return (error);
1601 	if (pf->dynamic_rx_itr) {
1602 		device_printf(dev,
1603 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1604 		    return (EINVAL);
1605 	}
1606 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1607 		device_printf(dev,
1608 		    "Invalid RX itr value; value must be between 0 and %d\n",
1609 		        IXL_MAX_ITR);
1610 		return (EINVAL);
1611 	}
1612 
1613 	pf->rx_itr = requested_rx_itr;
1614 	ixl_configure_rx_itr(pf);
1615 
1616 	return (error);
1617 }
1618 
1619 void
1620 ixl_add_hw_stats(struct ixl_pf *pf)
1621 {
1622 	struct ixl_vsi *vsi = &pf->vsi;
1623 	device_t dev = iflib_get_dev(vsi->ctx);
1624 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1625 
1626 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1627 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1628 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1629 
1630 	/* Driver statistics */
1631 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1632 			CTLFLAG_RD, &pf->admin_irq,
1633 			"Admin Queue IRQs received");
1634 
1635 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1636 
1637 	ixl_add_queues_sysctls(dev, vsi);
1638 
1639 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1640 }
1641 
1642 void
1643 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1644 	struct sysctl_oid_list *child,
1645 	struct i40e_hw_port_stats *stats)
1646 {
1647 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1648 				    CTLFLAG_RD, NULL, "Mac Statistics");
1649 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1650 
1651 	struct i40e_eth_stats *eth_stats = &stats->eth;
1652 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1653 
1654 	struct ixl_sysctl_info ctls[] =
1655 	{
1656 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1657 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1658 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1659 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1660 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1661 		/* Packet Reception Stats */
1662 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1663 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1664 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1665 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1666 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1667 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1668 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1669 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1670 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1671 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1672 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1673 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1674 		/* Packet Transmission Stats */
1675 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1676 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1677 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1678 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1679 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1680 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1681 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1682 		/* Flow control */
1683 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1684 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1685 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1686 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1687 		/* End */
1688 		{0,0,0}
1689 	};
1690 
1691 	struct ixl_sysctl_info *entry = ctls;
1692 	while (entry->stat != 0)
1693 	{
1694 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1695 				CTLFLAG_RD, entry->stat,
1696 				entry->description);
1697 		entry++;
1698 	}
1699 }
1700 
1701 void
1702 ixl_set_rss_key(struct ixl_pf *pf)
1703 {
1704 	struct i40e_hw *hw = &pf->hw;
1705 	struct ixl_vsi *vsi = &pf->vsi;
1706 	device_t	dev = pf->dev;
1707 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1708 	enum i40e_status_code status;
1709 
1710 #ifdef RSS
1711         /* Fetch the configured RSS key */
1712         rss_getkey((uint8_t *) &rss_seed);
1713 #else
1714 	ixl_get_default_rss_key(rss_seed);
1715 #endif
1716 	/* Fill out hash function seed */
1717 	if (hw->mac.type == I40E_MAC_X722) {
1718 		struct i40e_aqc_get_set_rss_key_data key_data;
1719 		bcopy(rss_seed, &key_data, 52);
1720 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1721 		if (status)
1722 			device_printf(dev,
1723 			    "i40e_aq_set_rss_key status %s, error %s\n",
1724 			    i40e_stat_str(hw, status),
1725 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1726 	} else {
1727 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1728 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1729 	}
1730 }
1731 
1732 /*
1733  * Configure enabled PCTYPES for RSS.
1734  */
1735 void
1736 ixl_set_rss_pctypes(struct ixl_pf *pf)
1737 {
1738 	struct i40e_hw *hw = &pf->hw;
1739 	u64		set_hena = 0, hena;
1740 
1741 #ifdef RSS
1742 	u32		rss_hash_config;
1743 
1744 	rss_hash_config = rss_gethashconfig();
1745 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1746                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1747 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1748                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1749 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1750                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1751 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1752                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1753 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1754 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1755 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1756                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1757         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1758                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1759 #else
1760 	if (hw->mac.type == I40E_MAC_X722)
1761 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1762 	else
1763 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1764 #endif
1765 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1766 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1767 	hena |= set_hena;
1768 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1769 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1770 
1771 }
1772 
1773 void
1774 ixl_set_rss_hlut(struct ixl_pf *pf)
1775 {
1776 	struct i40e_hw	*hw = &pf->hw;
1777 	struct ixl_vsi *vsi = &pf->vsi;
1778 	device_t	dev = iflib_get_dev(vsi->ctx);
1779 	int		i, que_id;
1780 	int		lut_entry_width;
1781 	u32		lut = 0;
1782 	enum i40e_status_code status;
1783 
1784 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1785 
1786 	/* Populate the LUT with max no. of queues in round robin fashion */
1787 	u8 hlut_buf[512];
1788 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1789 #ifdef RSS
1790 		/*
1791 		 * Fetch the RSS bucket id for the given indirection entry.
1792 		 * Cap it at the number of configured buckets (which is
1793 		 * num_queues.)
1794 		 */
1795 		que_id = rss_get_indirection_to_bucket(i);
1796 		que_id = que_id % vsi->num_rx_queues;
1797 #else
1798 		que_id = i % vsi->num_rx_queues;
1799 #endif
1800 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1801 		hlut_buf[i] = lut;
1802 	}
1803 
1804 	if (hw->mac.type == I40E_MAC_X722) {
1805 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1806 		if (status)
1807 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1808 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1809 	} else {
1810 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1811 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1812 		ixl_flush(hw);
1813 	}
1814 }
1815 
1816 /*
1817 ** Setup the PF's RSS parameters.
1818 */
1819 void
1820 ixl_config_rss(struct ixl_pf *pf)
1821 {
1822 	ixl_set_rss_key(pf);
1823 	ixl_set_rss_pctypes(pf);
1824 	ixl_set_rss_hlut(pf);
1825 }
1826 
1827 /*
1828 ** This routine updates vlan filters, called by init
1829 ** it scans the filter table and then updates the hw
1830 ** after a soft reset.
1831 */
1832 void
1833 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1834 {
1835 	struct ixl_mac_filter	*f;
1836 	int			cnt = 0, flags;
1837 
1838 	if (vsi->num_vlans == 0)
1839 		return;
1840 	/*
1841 	** Scan the filter list for vlan entries,
1842 	** mark them for addition and then call
1843 	** for the AQ update.
1844 	*/
1845 	SLIST_FOREACH(f, &vsi->ftl, next) {
1846 		if (f->flags & IXL_FILTER_VLAN) {
1847 			f->flags |=
1848 			    (IXL_FILTER_ADD |
1849 			    IXL_FILTER_USED);
1850 			cnt++;
1851 		}
1852 	}
1853 	if (cnt == 0) {
1854 		printf("setup vlan: no filters found!\n");
1855 		return;
1856 	}
1857 	flags = IXL_FILTER_VLAN;
1858 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1859 	ixl_add_hw_filters(vsi, flags, cnt);
1860 }
1861 
1862 /*
1863  * In some firmware versions there is default MAC/VLAN filter
1864  * configured which interferes with filters managed by driver.
1865  * Make sure it's removed.
1866  */
1867 void
1868 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1869 {
1870 	struct i40e_aqc_remove_macvlan_element_data e;
1871 
1872 	bzero(&e, sizeof(e));
1873 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1874 	e.vlan_tag = 0;
1875 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1876 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1877 
1878 	bzero(&e, sizeof(e));
1879 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1880 	e.vlan_tag = 0;
1881 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1882 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1883 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1884 }
1885 
1886 /*
1887 ** Initialize filter list and add filters that the hardware
1888 ** needs to know about.
1889 **
1890 ** Requires VSI's filter list & seid to be set before calling.
1891 */
1892 void
1893 ixl_init_filters(struct ixl_vsi *vsi)
1894 {
1895 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1896 
1897 	/* Initialize mac filter list for VSI */
1898 	SLIST_INIT(&vsi->ftl);
1899 
1900 	/* Receive broadcast Ethernet frames */
1901 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1902 
1903 	ixl_del_default_hw_filters(vsi);
1904 
1905 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1906 	/*
1907 	 * Prevent Tx flow control frames from being sent out by
1908 	 * non-firmware transmitters.
1909 	 * This affects every VSI in the PF.
1910 	 */
1911 	if (pf->enable_tx_fc_filter)
1912 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1913 }
1914 
1915 /*
1916 ** This routine adds mulicast filters
1917 */
1918 void
1919 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1920 {
1921 	struct ixl_mac_filter *f;
1922 
1923 	/* Does one already exist */
1924 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1925 	if (f != NULL)
1926 		return;
1927 
1928 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1929 	if (f != NULL)
1930 		f->flags |= IXL_FILTER_MC;
1931 	else
1932 		printf("WARNING: no filter available!!\n");
1933 }
1934 
1935 void
1936 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1937 {
1938 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1939 }
1940 
1941 /*
1942  * This routine adds a MAC/VLAN filter to the software filter
1943  * list, then adds that new filter to the HW if it doesn't already
1944  * exist in the SW filter list.
1945  */
1946 void
1947 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1948 {
1949 	struct ixl_mac_filter	*f, *tmp;
1950 	struct ixl_pf		*pf;
1951 	device_t		dev;
1952 
1953 	DEBUGOUT("ixl_add_filter: begin");
1954 
1955 	pf = vsi->back;
1956 	dev = pf->dev;
1957 
1958 	/* Does one already exist */
1959 	f = ixl_find_filter(vsi, macaddr, vlan);
1960 	if (f != NULL)
1961 		return;
1962 	/*
1963 	** Is this the first vlan being registered, if so we
1964 	** need to remove the ANY filter that indicates we are
1965 	** not in a vlan, and replace that with a 0 filter.
1966 	*/
1967 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1968 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1969 		if (tmp != NULL) {
1970 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1971 			ixl_add_filter(vsi, macaddr, 0);
1972 		}
1973 	}
1974 
1975 	f = ixl_new_filter(vsi, macaddr, vlan);
1976 	if (f == NULL) {
1977 		device_printf(dev, "WARNING: no filter available!!\n");
1978 		return;
1979 	}
1980 	if (f->vlan != IXL_VLAN_ANY)
1981 		f->flags |= IXL_FILTER_VLAN;
1982 	else
1983 		vsi->num_macs++;
1984 
1985 	f->flags |= IXL_FILTER_USED;
1986 	ixl_add_hw_filters(vsi, f->flags, 1);
1987 }
1988 
1989 void
1990 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1991 {
1992 	struct ixl_mac_filter *f;
1993 
1994 	f = ixl_find_filter(vsi, macaddr, vlan);
1995 	if (f == NULL)
1996 		return;
1997 
1998 	f->flags |= IXL_FILTER_DEL;
1999 	ixl_del_hw_filters(vsi, 1);
2000 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2001 		vsi->num_macs--;
2002 
2003 	/* Check if this is the last vlan removal */
2004 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2005 		/* Switch back to a non-vlan filter */
2006 		ixl_del_filter(vsi, macaddr, 0);
2007 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2008 	}
2009 	return;
2010 }
2011 
2012 /*
2013 ** Find the filter with both matching mac addr and vlan id
2014 */
2015 struct ixl_mac_filter *
2016 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2017 {
2018 	struct ixl_mac_filter	*f;
2019 
2020 	SLIST_FOREACH(f, &vsi->ftl, next) {
2021 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2022 		    && (f->vlan == vlan)) {
2023 			return (f);
2024 		}
2025 	}
2026 
2027 	return (NULL);
2028 }
2029 
2030 /*
2031 ** This routine takes additions to the vsi filter
2032 ** table and creates an Admin Queue call to create
2033 ** the filters in the hardware.
2034 */
2035 void
2036 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2037 {
2038 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2039 	struct ixl_mac_filter	*f;
2040 	struct ixl_pf		*pf;
2041 	struct i40e_hw		*hw;
2042 	device_t		dev;
2043 	enum i40e_status_code	status;
2044 	int			j = 0;
2045 
2046 	pf = vsi->back;
2047 	dev = vsi->dev;
2048 	hw = &pf->hw;
2049 
2050 	if (cnt < 1) {
2051 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2052 		return;
2053 	}
2054 
2055 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2056 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2057 	if (a == NULL) {
2058 		device_printf(dev, "add_hw_filters failed to get memory\n");
2059 		return;
2060 	}
2061 
2062 	/*
2063 	** Scan the filter list, each time we find one
2064 	** we add it to the admin queue array and turn off
2065 	** the add bit.
2066 	*/
2067 	SLIST_FOREACH(f, &vsi->ftl, next) {
2068 		if ((f->flags & flags) == flags) {
2069 			b = &a[j]; // a pox on fvl long names :)
2070 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2071 			if (f->vlan == IXL_VLAN_ANY) {
2072 				b->vlan_tag = 0;
2073 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2074 			} else {
2075 				b->vlan_tag = f->vlan;
2076 				b->flags = 0;
2077 			}
2078 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2079 			f->flags &= ~IXL_FILTER_ADD;
2080 			j++;
2081 
2082 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2083 			    MAC_FORMAT_ARGS(f->macaddr));
2084 		}
2085 		if (j == cnt)
2086 			break;
2087 	}
2088 	if (j > 0) {
2089 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2090 		if (status)
2091 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2092 			    "error %s\n", i40e_stat_str(hw, status),
2093 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2094 		else
2095 			vsi->hw_filters_add += j;
2096 	}
2097 	free(a, M_DEVBUF);
2098 	return;
2099 }
2100 
2101 /*
2102 ** This routine takes removals in the vsi filter
2103 ** table and creates an Admin Queue call to delete
2104 ** the filters in the hardware.
2105 */
2106 void
2107 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2108 {
2109 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2110 	struct ixl_pf		*pf;
2111 	struct i40e_hw		*hw;
2112 	device_t		dev;
2113 	struct ixl_mac_filter	*f, *f_temp;
2114 	enum i40e_status_code	status;
2115 	int			j = 0;
2116 
2117 	pf = vsi->back;
2118 	hw = &pf->hw;
2119 	dev = vsi->dev;
2120 
2121 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2122 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2123 	if (d == NULL) {
2124 		device_printf(dev, "%s: failed to get memory\n", __func__);
2125 		return;
2126 	}
2127 
2128 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2129 		if (f->flags & IXL_FILTER_DEL) {
2130 			e = &d[j]; // a pox on fvl long names :)
2131 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2132 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2133 			if (f->vlan == IXL_VLAN_ANY) {
2134 				e->vlan_tag = 0;
2135 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2136 			} else {
2137 				e->vlan_tag = f->vlan;
2138 			}
2139 
2140 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2141 			    MAC_FORMAT_ARGS(f->macaddr));
2142 
2143 			/* delete entry from vsi list */
2144 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2145 			free(f, M_DEVBUF);
2146 			j++;
2147 		}
2148 		if (j == cnt)
2149 			break;
2150 	}
2151 	if (j > 0) {
2152 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2153 		if (status) {
2154 			int sc = 0;
2155 			for (int i = 0; i < j; i++)
2156 				sc += (!d[i].error_code);
2157 			vsi->hw_filters_del += sc;
2158 			device_printf(dev,
2159 			    "Failed to remove %d/%d filters, error %s\n",
2160 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2161 		} else
2162 			vsi->hw_filters_del += j;
2163 	}
2164 	free(d, M_DEVBUF);
2165 	return;
2166 }
2167 
2168 int
2169 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2170 {
2171 	struct i40e_hw	*hw = &pf->hw;
2172 	int		error = 0;
2173 	u32		reg;
2174 	u16		pf_qidx;
2175 
2176 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2177 
2178 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2179 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2180 	    pf_qidx, vsi_qidx);
2181 
2182 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2183 
2184 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2185 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2186 	    I40E_QTX_ENA_QENA_STAT_MASK;
2187 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2188 	/* Verify the enable took */
2189 	for (int j = 0; j < 10; j++) {
2190 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2191 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2192 			break;
2193 		i40e_usec_delay(10);
2194 	}
2195 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2196 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2197 		    pf_qidx);
2198 		error = ETIMEDOUT;
2199 	}
2200 
2201 	return (error);
2202 }
2203 
2204 int
2205 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2206 {
2207 	struct i40e_hw	*hw = &pf->hw;
2208 	int		error = 0;
2209 	u32		reg;
2210 	u16		pf_qidx;
2211 
2212 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2213 
2214 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2215 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2216 	    pf_qidx, vsi_qidx);
2217 
2218 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2219 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2220 	    I40E_QRX_ENA_QENA_STAT_MASK;
2221 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2222 	/* Verify the enable took */
2223 	for (int j = 0; j < 10; j++) {
2224 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2225 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2226 			break;
2227 		i40e_usec_delay(10);
2228 	}
2229 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2230 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2231 		    pf_qidx);
2232 		error = ETIMEDOUT;
2233 	}
2234 
2235 	return (error);
2236 }
2237 
2238 int
2239 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2240 {
2241 	int error = 0;
2242 
2243 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2244 	/* Called function already prints error message */
2245 	if (error)
2246 		return (error);
2247 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2248 	return (error);
2249 }
2250 
2251 /* For PF VSI only */
2252 int
2253 ixl_enable_rings(struct ixl_vsi *vsi)
2254 {
2255 	struct ixl_pf	*pf = vsi->back;
2256 	int		error = 0;
2257 
2258 	for (int i = 0; i < vsi->num_tx_queues; i++)
2259 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2260 
2261 	for (int i = 0; i < vsi->num_rx_queues; i++)
2262 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2263 
2264 	return (error);
2265 }
2266 
2267 /*
2268  * Returns error on first ring that is detected hung.
2269  */
2270 int
2271 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2272 {
2273 	struct i40e_hw	*hw = &pf->hw;
2274 	int		error = 0;
2275 	u32		reg;
2276 	u16		pf_qidx;
2277 
2278 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2279 
2280 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2281 	i40e_usec_delay(500);
2282 
2283 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2284 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2285 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2286 	/* Verify the disable took */
2287 	for (int j = 0; j < 10; j++) {
2288 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2289 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2290 			break;
2291 		i40e_msec_delay(10);
2292 	}
2293 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2294 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2295 		    pf_qidx);
2296 		error = ETIMEDOUT;
2297 	}
2298 
2299 	return (error);
2300 }
2301 
2302 /*
2303  * Returns error on first ring that is detected hung.
2304  */
2305 int
2306 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2307 {
2308 	struct i40e_hw	*hw = &pf->hw;
2309 	int		error = 0;
2310 	u32		reg;
2311 	u16		pf_qidx;
2312 
2313 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2314 
2315 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2316 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2317 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2318 	/* Verify the disable took */
2319 	for (int j = 0; j < 10; j++) {
2320 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2321 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2322 			break;
2323 		i40e_msec_delay(10);
2324 	}
2325 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2326 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2327 		    pf_qidx);
2328 		error = ETIMEDOUT;
2329 	}
2330 
2331 	return (error);
2332 }
2333 
2334 int
2335 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2336 {
2337 	int error = 0;
2338 
2339 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2340 	/* Called function already prints error message */
2341 	if (error)
2342 		return (error);
2343 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2344 	return (error);
2345 }
2346 
2347 int
2348 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2349 {
2350 	int error = 0;
2351 
2352 	for (int i = 0; i < vsi->num_tx_queues; i++)
2353 		error = ixl_disable_tx_ring(pf, qtag, i);
2354 
2355 	for (int i = 0; i < vsi->num_rx_queues; i++)
2356 		error = ixl_disable_rx_ring(pf, qtag, i);
2357 
2358 	return (error);
2359 }
2360 
2361 static void
2362 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2363 {
2364 	struct i40e_hw *hw = &pf->hw;
2365 	device_t dev = pf->dev;
2366 	struct ixl_vf *vf;
2367 	bool mdd_detected = false;
2368 	bool pf_mdd_detected = false;
2369 	bool vf_mdd_detected = false;
2370 	u16 vf_num, queue;
2371 	u8 pf_num, event;
2372 	u8 pf_mdet_num, vp_mdet_num;
2373 	u32 reg;
2374 
2375 	/* find what triggered the MDD event */
2376 	reg = rd32(hw, I40E_GL_MDET_TX);
2377 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2378 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2379 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2380 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2381 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2382 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2383 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2384 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2385 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2386 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2387 		mdd_detected = true;
2388 	}
2389 
2390 	if (!mdd_detected)
2391 		return;
2392 
2393 	reg = rd32(hw, I40E_PF_MDET_TX);
2394 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2395 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2396 		pf_mdet_num = hw->pf_id;
2397 		pf_mdd_detected = true;
2398 	}
2399 
2400 	/* Check if MDD was caused by a VF */
2401 	for (int i = 0; i < pf->num_vfs; i++) {
2402 		vf = &(pf->vfs[i]);
2403 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2404 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2405 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2406 			vp_mdet_num = i;
2407 			vf->num_mdd_events++;
2408 			vf_mdd_detected = true;
2409 		}
2410 	}
2411 
2412 	/* Print out an error message */
2413 	if (vf_mdd_detected && pf_mdd_detected)
2414 		device_printf(dev,
2415 		    "Malicious Driver Detection event %d"
2416 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2417 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2418 	else if (vf_mdd_detected && !pf_mdd_detected)
2419 		device_printf(dev,
2420 		    "Malicious Driver Detection event %d"
2421 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2422 		    event, queue, pf_num, vf_num, vp_mdet_num);
2423 	else if (!vf_mdd_detected && pf_mdd_detected)
2424 		device_printf(dev,
2425 		    "Malicious Driver Detection event %d"
2426 		    " on TX queue %d, pf number %d (PF-%d)\n",
2427 		    event, queue, pf_num, pf_mdet_num);
2428 	/* Theoretically shouldn't happen */
2429 	else
2430 		device_printf(dev,
2431 		    "TX Malicious Driver Detection event (unknown)\n");
2432 }
2433 
2434 static void
2435 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2436 {
2437 	struct i40e_hw *hw = &pf->hw;
2438 	device_t dev = pf->dev;
2439 	struct ixl_vf *vf;
2440 	bool mdd_detected = false;
2441 	bool pf_mdd_detected = false;
2442 	bool vf_mdd_detected = false;
2443 	u16 queue;
2444 	u8 pf_num, event;
2445 	u8 pf_mdet_num, vp_mdet_num;
2446 	u32 reg;
2447 
2448 	/*
2449 	 * GL_MDET_RX doesn't contain VF number information, unlike
2450 	 * GL_MDET_TX.
2451 	 */
2452 	reg = rd32(hw, I40E_GL_MDET_RX);
2453 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2454 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2455 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2456 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2457 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2458 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2459 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2460 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2461 		mdd_detected = true;
2462 	}
2463 
2464 	if (!mdd_detected)
2465 		return;
2466 
2467 	reg = rd32(hw, I40E_PF_MDET_RX);
2468 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2469 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2470 		pf_mdet_num = hw->pf_id;
2471 		pf_mdd_detected = true;
2472 	}
2473 
2474 	/* Check if MDD was caused by a VF */
2475 	for (int i = 0; i < pf->num_vfs; i++) {
2476 		vf = &(pf->vfs[i]);
2477 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2478 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2479 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2480 			vp_mdet_num = i;
2481 			vf->num_mdd_events++;
2482 			vf_mdd_detected = true;
2483 		}
2484 	}
2485 
2486 	/* Print out an error message */
2487 	if (vf_mdd_detected && pf_mdd_detected)
2488 		device_printf(dev,
2489 		    "Malicious Driver Detection event %d"
2490 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2491 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2492 	else if (vf_mdd_detected && !pf_mdd_detected)
2493 		device_printf(dev,
2494 		    "Malicious Driver Detection event %d"
2495 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2496 		    event, queue, pf_num, vp_mdet_num);
2497 	else if (!vf_mdd_detected && pf_mdd_detected)
2498 		device_printf(dev,
2499 		    "Malicious Driver Detection event %d"
2500 		    " on RX queue %d, pf number %d (PF-%d)\n",
2501 		    event, queue, pf_num, pf_mdet_num);
2502 	/* Theoretically shouldn't happen */
2503 	else
2504 		device_printf(dev,
2505 		    "RX Malicious Driver Detection event (unknown)\n");
2506 }
2507 
2508 /**
2509  * ixl_handle_mdd_event
2510  *
2511  * Called from interrupt handler to identify possibly malicious vfs
2512  * (But also detects events from the PF, as well)
2513  **/
2514 void
2515 ixl_handle_mdd_event(struct ixl_pf *pf)
2516 {
2517 	struct i40e_hw *hw = &pf->hw;
2518 	u32 reg;
2519 
2520 	/*
2521 	 * Handle both TX/RX because it's possible they could
2522 	 * both trigger in the same interrupt.
2523 	 */
2524 	ixl_handle_tx_mdd_event(pf);
2525 	ixl_handle_rx_mdd_event(pf);
2526 
2527 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2528 
2529 	/* re-enable mdd interrupt cause */
2530 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2531 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2532 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2533 	ixl_flush(hw);
2534 }
2535 
2536 void
2537 ixl_enable_intr(struct ixl_vsi *vsi)
2538 {
2539 	struct i40e_hw		*hw = vsi->hw;
2540 	struct ixl_rx_queue	*que = vsi->rx_queues;
2541 
2542 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2543 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 			ixl_enable_queue(hw, que->rxr.me);
2545 	} else
2546 		ixl_enable_intr0(hw);
2547 }
2548 
2549 void
2550 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2551 {
2552 	struct i40e_hw		*hw = vsi->hw;
2553 	struct ixl_rx_queue	*que = vsi->rx_queues;
2554 
2555 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2556 		ixl_disable_queue(hw, que->rxr.me);
2557 }
2558 
2559 void
2560 ixl_enable_intr0(struct i40e_hw *hw)
2561 {
2562 	u32		reg;
2563 
2564 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2565 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2566 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2567 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2568 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2569 }
2570 
2571 void
2572 ixl_disable_intr0(struct i40e_hw *hw)
2573 {
2574 	u32		reg;
2575 
2576 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2577 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2578 	ixl_flush(hw);
2579 }
2580 
2581 void
2582 ixl_enable_queue(struct i40e_hw *hw, int id)
2583 {
2584 	u32		reg;
2585 
2586 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2587 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2588 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2589 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2590 }
2591 
2592 void
2593 ixl_disable_queue(struct i40e_hw *hw, int id)
2594 {
2595 	u32		reg;
2596 
2597 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2598 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2599 }
2600 
2601 void
2602 ixl_update_stats_counters(struct ixl_pf *pf)
2603 {
2604 	struct i40e_hw	*hw = &pf->hw;
2605 	struct ixl_vsi	*vsi = &pf->vsi;
2606 	struct ixl_vf	*vf;
2607 
2608 	struct i40e_hw_port_stats *nsd = &pf->stats;
2609 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2610 
2611 	/* Update hw stats */
2612 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2613 			   pf->stat_offsets_loaded,
2614 			   &osd->crc_errors, &nsd->crc_errors);
2615 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2616 			   pf->stat_offsets_loaded,
2617 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2618 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2619 			   I40E_GLPRT_GORCL(hw->port),
2620 			   pf->stat_offsets_loaded,
2621 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2622 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2623 			   I40E_GLPRT_GOTCL(hw->port),
2624 			   pf->stat_offsets_loaded,
2625 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2626 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2627 			   pf->stat_offsets_loaded,
2628 			   &osd->eth.rx_discards,
2629 			   &nsd->eth.rx_discards);
2630 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2631 			   I40E_GLPRT_UPRCL(hw->port),
2632 			   pf->stat_offsets_loaded,
2633 			   &osd->eth.rx_unicast,
2634 			   &nsd->eth.rx_unicast);
2635 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2636 			   I40E_GLPRT_UPTCL(hw->port),
2637 			   pf->stat_offsets_loaded,
2638 			   &osd->eth.tx_unicast,
2639 			   &nsd->eth.tx_unicast);
2640 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2641 			   I40E_GLPRT_MPRCL(hw->port),
2642 			   pf->stat_offsets_loaded,
2643 			   &osd->eth.rx_multicast,
2644 			   &nsd->eth.rx_multicast);
2645 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2646 			   I40E_GLPRT_MPTCL(hw->port),
2647 			   pf->stat_offsets_loaded,
2648 			   &osd->eth.tx_multicast,
2649 			   &nsd->eth.tx_multicast);
2650 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2651 			   I40E_GLPRT_BPRCL(hw->port),
2652 			   pf->stat_offsets_loaded,
2653 			   &osd->eth.rx_broadcast,
2654 			   &nsd->eth.rx_broadcast);
2655 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2656 			   I40E_GLPRT_BPTCL(hw->port),
2657 			   pf->stat_offsets_loaded,
2658 			   &osd->eth.tx_broadcast,
2659 			   &nsd->eth.tx_broadcast);
2660 
2661 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2662 			   pf->stat_offsets_loaded,
2663 			   &osd->tx_dropped_link_down,
2664 			   &nsd->tx_dropped_link_down);
2665 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2666 			   pf->stat_offsets_loaded,
2667 			   &osd->mac_local_faults,
2668 			   &nsd->mac_local_faults);
2669 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2670 			   pf->stat_offsets_loaded,
2671 			   &osd->mac_remote_faults,
2672 			   &nsd->mac_remote_faults);
2673 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2674 			   pf->stat_offsets_loaded,
2675 			   &osd->rx_length_errors,
2676 			   &nsd->rx_length_errors);
2677 
2678 	/* Flow control (LFC) stats */
2679 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2680 			   pf->stat_offsets_loaded,
2681 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2682 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2683 			   pf->stat_offsets_loaded,
2684 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2685 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2686 			   pf->stat_offsets_loaded,
2687 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2688 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2689 			   pf->stat_offsets_loaded,
2690 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2691 
2692 	/* Packet size stats rx */
2693 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2694 			   I40E_GLPRT_PRC64L(hw->port),
2695 			   pf->stat_offsets_loaded,
2696 			   &osd->rx_size_64, &nsd->rx_size_64);
2697 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2698 			   I40E_GLPRT_PRC127L(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->rx_size_127, &nsd->rx_size_127);
2701 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2702 			   I40E_GLPRT_PRC255L(hw->port),
2703 			   pf->stat_offsets_loaded,
2704 			   &osd->rx_size_255, &nsd->rx_size_255);
2705 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2706 			   I40E_GLPRT_PRC511L(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->rx_size_511, &nsd->rx_size_511);
2709 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2710 			   I40E_GLPRT_PRC1023L(hw->port),
2711 			   pf->stat_offsets_loaded,
2712 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2713 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2714 			   I40E_GLPRT_PRC1522L(hw->port),
2715 			   pf->stat_offsets_loaded,
2716 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2717 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2718 			   I40E_GLPRT_PRC9522L(hw->port),
2719 			   pf->stat_offsets_loaded,
2720 			   &osd->rx_size_big, &nsd->rx_size_big);
2721 
2722 	/* Packet size stats tx */
2723 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2724 			   I40E_GLPRT_PTC64L(hw->port),
2725 			   pf->stat_offsets_loaded,
2726 			   &osd->tx_size_64, &nsd->tx_size_64);
2727 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2728 			   I40E_GLPRT_PTC127L(hw->port),
2729 			   pf->stat_offsets_loaded,
2730 			   &osd->tx_size_127, &nsd->tx_size_127);
2731 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2732 			   I40E_GLPRT_PTC255L(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->tx_size_255, &nsd->tx_size_255);
2735 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2736 			   I40E_GLPRT_PTC511L(hw->port),
2737 			   pf->stat_offsets_loaded,
2738 			   &osd->tx_size_511, &nsd->tx_size_511);
2739 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2740 			   I40E_GLPRT_PTC1023L(hw->port),
2741 			   pf->stat_offsets_loaded,
2742 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2743 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2744 			   I40E_GLPRT_PTC1522L(hw->port),
2745 			   pf->stat_offsets_loaded,
2746 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2747 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2748 			   I40E_GLPRT_PTC9522L(hw->port),
2749 			   pf->stat_offsets_loaded,
2750 			   &osd->tx_size_big, &nsd->tx_size_big);
2751 
2752 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2753 			   pf->stat_offsets_loaded,
2754 			   &osd->rx_undersize, &nsd->rx_undersize);
2755 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2756 			   pf->stat_offsets_loaded,
2757 			   &osd->rx_fragments, &nsd->rx_fragments);
2758 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2759 			   pf->stat_offsets_loaded,
2760 			   &osd->rx_oversize, &nsd->rx_oversize);
2761 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2762 			   pf->stat_offsets_loaded,
2763 			   &osd->rx_jabber, &nsd->rx_jabber);
2764 	pf->stat_offsets_loaded = true;
2765 	/* End hw stats */
2766 
2767 	/* Update vsi stats */
2768 	ixl_update_vsi_stats(vsi);
2769 
2770 	for (int i = 0; i < pf->num_vfs; i++) {
2771 		vf = &pf->vfs[i];
2772 		if (vf->vf_flags & VF_FLAG_ENABLED)
2773 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2774 	}
2775 }
2776 
2777 int
2778 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2779 {
2780 	struct i40e_hw *hw = &pf->hw;
2781 	device_t dev = pf->dev;
2782 	int error = 0;
2783 
2784 	error = i40e_shutdown_lan_hmc(hw);
2785 	if (error)
2786 		device_printf(dev,
2787 		    "Shutdown LAN HMC failed with code %d\n", error);
2788 
2789 	ixl_disable_intr0(hw);
2790 
2791 	error = i40e_shutdown_adminq(hw);
2792 	if (error)
2793 		device_printf(dev,
2794 		    "Shutdown Admin queue failed with code %d\n", error);
2795 
2796 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2797 	return (error);
2798 }
2799 
2800 int
2801 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2802 {
2803 	struct i40e_hw *hw = &pf->hw;
2804 	struct ixl_vsi *vsi = &pf->vsi;
2805 	device_t dev = pf->dev;
2806 	int error = 0;
2807 
2808 	device_printf(dev, "Rebuilding driver state...\n");
2809 
2810 	error = i40e_pf_reset(hw);
2811 	if (error) {
2812 		device_printf(dev, "PF reset failure %s\n",
2813 		    i40e_stat_str(hw, error));
2814 		goto ixl_rebuild_hw_structs_after_reset_err;
2815 	}
2816 
2817 	/* Setup */
2818 	error = i40e_init_adminq(hw);
2819 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2820 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2821 		    error);
2822 		goto ixl_rebuild_hw_structs_after_reset_err;
2823 	}
2824 
2825 	i40e_clear_pxe_mode(hw);
2826 
2827 	error = ixl_get_hw_capabilities(pf);
2828 	if (error) {
2829 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2830 		goto ixl_rebuild_hw_structs_after_reset_err;
2831 	}
2832 
2833 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2834 	    hw->func_caps.num_rx_qp, 0, 0);
2835 	if (error) {
2836 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2837 		goto ixl_rebuild_hw_structs_after_reset_err;
2838 	}
2839 
2840 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2841 	if (error) {
2842 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2843 		goto ixl_rebuild_hw_structs_after_reset_err;
2844 	}
2845 
2846 	/* reserve a contiguous allocation for the PF's VSI */
2847 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2848 	if (error) {
2849 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2850 		    error);
2851 		/* TODO: error handling */
2852 	}
2853 
2854 	error = ixl_switch_config(pf);
2855 	if (error) {
2856 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2857 		     error);
2858 		error = EIO;
2859 		goto ixl_rebuild_hw_structs_after_reset_err;
2860 	}
2861 
2862 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2863 	    NULL);
2864         if (error) {
2865 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2866 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2867 		error = EIO;
2868 		goto ixl_rebuild_hw_structs_after_reset_err;
2869 	}
2870 
2871 	u8 set_fc_err_mask;
2872 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2873 	if (error) {
2874 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2875 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2876 		error = EIO;
2877 		goto ixl_rebuild_hw_structs_after_reset_err;
2878 	}
2879 
2880 	/* Remove default filters reinstalled by FW on reset */
2881 	ixl_del_default_hw_filters(vsi);
2882 
2883 	/* Determine link state */
2884 	if (ixl_attach_get_link_status(pf)) {
2885 		error = EINVAL;
2886 		/* TODO: error handling */
2887 	}
2888 
2889 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2890 	ixl_get_fw_lldp_status(pf);
2891 
2892 	/* Keep admin queue interrupts active while driver is loaded */
2893 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2894  		ixl_configure_intr0_msix(pf);
2895  		ixl_enable_intr0(hw);
2896 	}
2897 
2898 	device_printf(dev, "Rebuilding driver state done.\n");
2899 	return (0);
2900 
2901 ixl_rebuild_hw_structs_after_reset_err:
2902 	device_printf(dev, "Reload the driver to recover\n");
2903 	return (error);
2904 }
2905 
2906 void
2907 ixl_handle_empr_reset(struct ixl_pf *pf)
2908 {
2909 	struct ixl_vsi	*vsi = &pf->vsi;
2910 	struct i40e_hw	*hw = &pf->hw;
2911 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2912 	int count = 0;
2913 	u32 reg;
2914 
2915 	ixl_prepare_for_reset(pf, is_up);
2916 
2917 	/* Typically finishes within 3-4 seconds */
2918 	while (count++ < 100) {
2919 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2920 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2921 		if (reg)
2922 			i40e_msec_delay(100);
2923 		else
2924 			break;
2925 	}
2926 	ixl_dbg(pf, IXL_DBG_INFO,
2927 			"Reset wait count: %d\n", count);
2928 
2929 	ixl_rebuild_hw_structs_after_reset(pf);
2930 
2931 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2932 }
2933 
2934 /**
2935  * Update VSI-specific ethernet statistics counters.
2936  **/
2937 void
2938 ixl_update_eth_stats(struct ixl_vsi *vsi)
2939 {
2940 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2941 	struct i40e_hw *hw = &pf->hw;
2942 	struct i40e_eth_stats *es;
2943 	struct i40e_eth_stats *oes;
2944 	struct i40e_hw_port_stats *nsd;
2945 	u16 stat_idx = vsi->info.stat_counter_idx;
2946 
2947 	es = &vsi->eth_stats;
2948 	oes = &vsi->eth_stats_offsets;
2949 	nsd = &pf->stats;
2950 
2951 	/* Gather up the stats that the hw collects */
2952 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2953 			   vsi->stat_offsets_loaded,
2954 			   &oes->tx_errors, &es->tx_errors);
2955 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2956 			   vsi->stat_offsets_loaded,
2957 			   &oes->rx_discards, &es->rx_discards);
2958 
2959 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2960 			   I40E_GLV_GORCL(stat_idx),
2961 			   vsi->stat_offsets_loaded,
2962 			   &oes->rx_bytes, &es->rx_bytes);
2963 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2964 			   I40E_GLV_UPRCL(stat_idx),
2965 			   vsi->stat_offsets_loaded,
2966 			   &oes->rx_unicast, &es->rx_unicast);
2967 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2968 			   I40E_GLV_MPRCL(stat_idx),
2969 			   vsi->stat_offsets_loaded,
2970 			   &oes->rx_multicast, &es->rx_multicast);
2971 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2972 			   I40E_GLV_BPRCL(stat_idx),
2973 			   vsi->stat_offsets_loaded,
2974 			   &oes->rx_broadcast, &es->rx_broadcast);
2975 
2976 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2977 			   I40E_GLV_GOTCL(stat_idx),
2978 			   vsi->stat_offsets_loaded,
2979 			   &oes->tx_bytes, &es->tx_bytes);
2980 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2981 			   I40E_GLV_UPTCL(stat_idx),
2982 			   vsi->stat_offsets_loaded,
2983 			   &oes->tx_unicast, &es->tx_unicast);
2984 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2985 			   I40E_GLV_MPTCL(stat_idx),
2986 			   vsi->stat_offsets_loaded,
2987 			   &oes->tx_multicast, &es->tx_multicast);
2988 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2989 			   I40E_GLV_BPTCL(stat_idx),
2990 			   vsi->stat_offsets_loaded,
2991 			   &oes->tx_broadcast, &es->tx_broadcast);
2992 	vsi->stat_offsets_loaded = true;
2993 }
2994 
2995 void
2996 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2997 {
2998 	struct ixl_pf		*pf;
2999 	struct ifnet		*ifp;
3000 	struct i40e_eth_stats	*es;
3001 	u64			tx_discards;
3002 
3003 	struct i40e_hw_port_stats *nsd;
3004 
3005 	pf = vsi->back;
3006 	ifp = vsi->ifp;
3007 	es = &vsi->eth_stats;
3008 	nsd = &pf->stats;
3009 
3010 	ixl_update_eth_stats(vsi);
3011 
3012 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3013 
3014 	/* Update ifnet stats */
3015 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3016 	                   es->rx_multicast +
3017 			   es->rx_broadcast);
3018 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3019 	                   es->tx_multicast +
3020 			   es->tx_broadcast);
3021 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3022 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3023 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3024 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3025 
3026 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3027 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3028 	    nsd->rx_jabber);
3029 	IXL_SET_OERRORS(vsi, es->tx_errors);
3030 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3031 	IXL_SET_OQDROPS(vsi, tx_discards);
3032 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3033 	IXL_SET_COLLISIONS(vsi, 0);
3034 }
3035 
3036 /**
3037  * Reset all of the stats for the given pf
3038  **/
3039 void
3040 ixl_pf_reset_stats(struct ixl_pf *pf)
3041 {
3042 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3043 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3044 	pf->stat_offsets_loaded = false;
3045 }
3046 
3047 /**
3048  * Resets all stats of the given vsi
3049  **/
3050 void
3051 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3052 {
3053 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3054 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3055 	vsi->stat_offsets_loaded = false;
3056 }
3057 
3058 /**
3059  * Read and update a 48 bit stat from the hw
3060  *
3061  * Since the device stats are not reset at PFReset, they likely will not
3062  * be zeroed when the driver starts.  We'll save the first values read
3063  * and use them as offsets to be subtracted from the raw values in order
3064  * to report stats that count from zero.
3065  **/
3066 void
3067 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3068 	bool offset_loaded, u64 *offset, u64 *stat)
3069 {
3070 	u64 new_data;
3071 
3072 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3073 	new_data = rd64(hw, loreg);
3074 #else
3075 	/*
3076 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3077 	 * 10 don't support 64-bit bus reads/writes.
3078 	 */
3079 	new_data = rd32(hw, loreg);
3080 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3081 #endif
3082 
3083 	if (!offset_loaded)
3084 		*offset = new_data;
3085 	if (new_data >= *offset)
3086 		*stat = new_data - *offset;
3087 	else
3088 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3089 	*stat &= 0xFFFFFFFFFFFFULL;
3090 }
3091 
3092 /**
3093  * Read and update a 32 bit stat from the hw
3094  **/
3095 void
3096 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3097 	bool offset_loaded, u64 *offset, u64 *stat)
3098 {
3099 	u32 new_data;
3100 
3101 	new_data = rd32(hw, reg);
3102 	if (!offset_loaded)
3103 		*offset = new_data;
3104 	if (new_data >= *offset)
3105 		*stat = (u32)(new_data - *offset);
3106 	else
3107 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3108 }
3109 
3110 void
3111 ixl_add_device_sysctls(struct ixl_pf *pf)
3112 {
3113 	device_t dev = pf->dev;
3114 	struct i40e_hw *hw = &pf->hw;
3115 
3116 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3117 	struct sysctl_oid_list *ctx_list =
3118 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3119 
3120 	struct sysctl_oid *debug_node;
3121 	struct sysctl_oid_list *debug_list;
3122 
3123 	struct sysctl_oid *fec_node;
3124 	struct sysctl_oid_list *fec_list;
3125 
3126 	/* Set up sysctls */
3127 	SYSCTL_ADD_PROC(ctx, ctx_list,
3128 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3129 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3130 
3131 	SYSCTL_ADD_PROC(ctx, ctx_list,
3132 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3133 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3134 
3135 	SYSCTL_ADD_PROC(ctx, ctx_list,
3136 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3137 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3138 
3139 	SYSCTL_ADD_PROC(ctx, ctx_list,
3140 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3141 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3142 
3143 	SYSCTL_ADD_PROC(ctx, ctx_list,
3144 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3145 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3146 
3147 	SYSCTL_ADD_PROC(ctx, ctx_list,
3148 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3149 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3150 	    "Queues not allocated to a PF or VF");
3151 
3152 	SYSCTL_ADD_PROC(ctx, ctx_list,
3153 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3154 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3155 	    "Immediately set TX ITR value for all queues");
3156 
3157 	SYSCTL_ADD_PROC(ctx, ctx_list,
3158 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3159 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3160 	    "Immediately set RX ITR value for all queues");
3161 
3162 	SYSCTL_ADD_INT(ctx, ctx_list,
3163 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3164 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3165 
3166 	SYSCTL_ADD_INT(ctx, ctx_list,
3167 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3168 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3169 
3170 	/* Add FEC sysctls for 25G adapters */
3171 	if (i40e_is_25G_device(hw->device_id)) {
3172 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3173 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3174 		fec_list = SYSCTL_CHILDREN(fec_node);
3175 
3176 		SYSCTL_ADD_PROC(ctx, fec_list,
3177 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3178 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3179 
3180 		SYSCTL_ADD_PROC(ctx, fec_list,
3181 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3182 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3183 
3184 		SYSCTL_ADD_PROC(ctx, fec_list,
3185 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3186 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3187 
3188 		SYSCTL_ADD_PROC(ctx, fec_list,
3189 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3190 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3191 
3192 		SYSCTL_ADD_PROC(ctx, fec_list,
3193 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3194 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3195 	}
3196 
3197 	SYSCTL_ADD_PROC(ctx, ctx_list,
3198 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3199 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3200 
3201 	/* Add sysctls meant to print debug information, but don't list them
3202 	 * in "sysctl -a" output. */
3203 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3204 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3205 	debug_list = SYSCTL_CHILDREN(debug_node);
3206 
3207 	SYSCTL_ADD_UINT(ctx, debug_list,
3208 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3209 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3210 
3211 	SYSCTL_ADD_UINT(ctx, debug_list,
3212 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3213 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3214 
3215 	SYSCTL_ADD_PROC(ctx, debug_list,
3216 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3217 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3218 
3219 	SYSCTL_ADD_PROC(ctx, debug_list,
3220 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3221 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3222 
3223 	SYSCTL_ADD_PROC(ctx, debug_list,
3224 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3225 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3226 
3227 	SYSCTL_ADD_PROC(ctx, debug_list,
3228 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3229 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3230 
3231 	SYSCTL_ADD_PROC(ctx, debug_list,
3232 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3233 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3234 
3235 	SYSCTL_ADD_PROC(ctx, debug_list,
3236 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3237 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3238 
3239 	SYSCTL_ADD_PROC(ctx, debug_list,
3240 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3241 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3242 
3243 	SYSCTL_ADD_PROC(ctx, debug_list,
3244 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3245 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3246 
3247 	SYSCTL_ADD_PROC(ctx, debug_list,
3248 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3249 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3250 
3251 	SYSCTL_ADD_PROC(ctx, debug_list,
3252 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3253 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3254 
3255 	SYSCTL_ADD_PROC(ctx, debug_list,
3256 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3258 
3259 	SYSCTL_ADD_PROC(ctx, debug_list,
3260 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3261 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3262 
3263 	SYSCTL_ADD_PROC(ctx, debug_list,
3264 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3265 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3266 
3267 	SYSCTL_ADD_PROC(ctx, debug_list,
3268 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3269 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3270 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3271 
3272 	SYSCTL_ADD_PROC(ctx, debug_list,
3273 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3274 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3275 
3276 	if (pf->has_i2c) {
3277 		SYSCTL_ADD_PROC(ctx, debug_list,
3278 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3279 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3280 
3281 		SYSCTL_ADD_PROC(ctx, debug_list,
3282 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3283 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3284 
3285 		SYSCTL_ADD_PROC(ctx, debug_list,
3286 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3287 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3288 	}
3289 }
3290 
3291 /*
3292  * Primarily for finding out how many queues can be assigned to VFs,
3293  * at runtime.
3294  */
3295 static int
3296 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3297 {
3298 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3299 	int queues;
3300 
3301 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3302 
3303 	return sysctl_handle_int(oidp, NULL, queues, req);
3304 }
3305 
3306 /*
3307 ** Set flow control using sysctl:
3308 ** 	0 - off
3309 **	1 - rx pause
3310 **	2 - tx pause
3311 **	3 - full
3312 */
3313 int
3314 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3315 {
3316 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3317 	struct i40e_hw *hw = &pf->hw;
3318 	device_t dev = pf->dev;
3319 	int requested_fc, error = 0;
3320 	enum i40e_status_code aq_error = 0;
3321 	u8 fc_aq_err = 0;
3322 
3323 	/* Get request */
3324 	requested_fc = pf->fc;
3325 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3326 	if ((error) || (req->newptr == NULL))
3327 		return (error);
3328 	if (requested_fc < 0 || requested_fc > 3) {
3329 		device_printf(dev,
3330 		    "Invalid fc mode; valid modes are 0 through 3\n");
3331 		return (EINVAL);
3332 	}
3333 
3334 	/* Set fc ability for port */
3335 	hw->fc.requested_mode = requested_fc;
3336 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3337 	if (aq_error) {
3338 		device_printf(dev,
3339 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3340 		    __func__, aq_error, fc_aq_err);
3341 		return (EIO);
3342 	}
3343 	pf->fc = requested_fc;
3344 
3345 	return (0);
3346 }
3347 
3348 char *
3349 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3350 {
3351 	int index;
3352 
3353 	char *speeds[] = {
3354 		"Unknown",
3355 		"100 Mbps",
3356 		"1 Gbps",
3357 		"10 Gbps",
3358 		"40 Gbps",
3359 		"20 Gbps",
3360 		"25 Gbps",
3361 	};
3362 
3363 	switch (link_speed) {
3364 	case I40E_LINK_SPEED_100MB:
3365 		index = 1;
3366 		break;
3367 	case I40E_LINK_SPEED_1GB:
3368 		index = 2;
3369 		break;
3370 	case I40E_LINK_SPEED_10GB:
3371 		index = 3;
3372 		break;
3373 	case I40E_LINK_SPEED_40GB:
3374 		index = 4;
3375 		break;
3376 	case I40E_LINK_SPEED_20GB:
3377 		index = 5;
3378 		break;
3379 	case I40E_LINK_SPEED_25GB:
3380 		index = 6;
3381 		break;
3382 	case I40E_LINK_SPEED_UNKNOWN:
3383 	default:
3384 		index = 0;
3385 		break;
3386 	}
3387 
3388 	return speeds[index];
3389 }
3390 
3391 int
3392 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3393 {
3394 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3395 	struct i40e_hw *hw = &pf->hw;
3396 	int error = 0;
3397 
3398 	ixl_update_link_status(pf);
3399 
3400 	error = sysctl_handle_string(oidp,
3401 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3402 	    8, req);
3403 	return (error);
3404 }
3405 
3406 /*
3407  * Converts 8-bit speeds value to and from sysctl flags and
3408  * Admin Queue flags.
3409  */
3410 static u8
3411 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3412 {
3413 	static u16 speedmap[6] = {
3414 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3415 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3416 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3417 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3418 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3419 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3420 	};
3421 	u8 retval = 0;
3422 
3423 	for (int i = 0; i < 6; i++) {
3424 		if (to_aq)
3425 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3426 		else
3427 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3428 	}
3429 
3430 	return (retval);
3431 }
3432 
3433 int
3434 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3435 {
3436 	struct i40e_hw *hw = &pf->hw;
3437 	device_t dev = pf->dev;
3438 	struct i40e_aq_get_phy_abilities_resp abilities;
3439 	struct i40e_aq_set_phy_config config;
3440 	enum i40e_status_code aq_error = 0;
3441 
3442 	/* Get current capability information */
3443 	aq_error = i40e_aq_get_phy_capabilities(hw,
3444 	    FALSE, FALSE, &abilities, NULL);
3445 	if (aq_error) {
3446 		device_printf(dev,
3447 		    "%s: Error getting phy capabilities %d,"
3448 		    " aq error: %d\n", __func__, aq_error,
3449 		    hw->aq.asq_last_status);
3450 		return (EIO);
3451 	}
3452 
3453 	/* Prepare new config */
3454 	bzero(&config, sizeof(config));
3455 	if (from_aq)
3456 		config.link_speed = speeds;
3457 	else
3458 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3459 	config.phy_type = abilities.phy_type;
3460 	config.phy_type_ext = abilities.phy_type_ext;
3461 	config.abilities = abilities.abilities
3462 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3463 	config.eee_capability = abilities.eee_capability;
3464 	config.eeer = abilities.eeer_val;
3465 	config.low_power_ctrl = abilities.d3_lpan;
3466 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3467 
3468 	/* Do aq command & restart link */
3469 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3470 	if (aq_error) {
3471 		device_printf(dev,
3472 		    "%s: Error setting new phy config %d,"
3473 		    " aq error: %d\n", __func__, aq_error,
3474 		    hw->aq.asq_last_status);
3475 		return (EIO);
3476 	}
3477 
3478 	return (0);
3479 }
3480 
3481 /*
3482 ** Supported link speedsL
3483 **	Flags:
3484 **	 0x1 - 100 Mb
3485 **	 0x2 - 1G
3486 **	 0x4 - 10G
3487 **	 0x8 - 20G
3488 **	0x10 - 25G
3489 **	0x20 - 40G
3490 */
3491 static int
3492 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3493 {
3494 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3495 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3496 
3497 	return sysctl_handle_int(oidp, NULL, supported, req);
3498 }
3499 
3500 /*
3501 ** Control link advertise speed:
3502 **	Flags:
3503 **	 0x1 - advertise 100 Mb
3504 **	 0x2 - advertise 1G
3505 **	 0x4 - advertise 10G
3506 **	 0x8 - advertise 20G
3507 **	0x10 - advertise 25G
3508 **	0x20 - advertise 40G
3509 **
3510 **	Set to 0 to disable link
3511 */
3512 int
3513 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3514 {
3515 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3516 	device_t dev = pf->dev;
3517 	u8 converted_speeds;
3518 	int requested_ls = 0;
3519 	int error = 0;
3520 
3521 	/* Read in new mode */
3522 	requested_ls = pf->advertised_speed;
3523 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3524 	if ((error) || (req->newptr == NULL))
3525 		return (error);
3526 
3527 	/* Error out if bits outside of possible flag range are set */
3528 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3529 		device_printf(dev, "Input advertised speed out of range; "
3530 		    "valid flags are: 0x%02x\n",
3531 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3532 		return (EINVAL);
3533 	}
3534 
3535 	/* Check if adapter supports input value */
3536 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3537 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3538 		device_printf(dev, "Invalid advertised speed; "
3539 		    "valid flags are: 0x%02x\n",
3540 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3541 		return (EINVAL);
3542 	}
3543 
3544 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3545 	if (error)
3546 		return (error);
3547 
3548 	pf->advertised_speed = requested_ls;
3549 	ixl_update_link_status(pf);
3550 	return (0);
3551 }
3552 
3553 /*
3554 ** Get the width and transaction speed of
3555 ** the bus this adapter is plugged into.
3556 */
3557 void
3558 ixl_get_bus_info(struct ixl_pf *pf)
3559 {
3560 	struct i40e_hw *hw = &pf->hw;
3561 	device_t dev = pf->dev;
3562         u16 link;
3563         u32 offset, num_ports;
3564 	u64 max_speed;
3565 
3566 	/* Some devices don't use PCIE */
3567 	if (hw->mac.type == I40E_MAC_X722)
3568 		return;
3569 
3570         /* Read PCI Express Capabilities Link Status Register */
3571         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3572         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3573 
3574 	/* Fill out hw struct with PCIE info */
3575 	i40e_set_pci_config_data(hw, link);
3576 
3577 	/* Use info to print out bandwidth messages */
3578         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3579             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3580             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3581             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3582             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3583             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3584             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3585             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3586             ("Unknown"));
3587 
3588 	/*
3589 	 * If adapter is in slot with maximum supported speed,
3590 	 * no warning message needs to be printed out.
3591 	 */
3592 	if (hw->bus.speed >= i40e_bus_speed_8000
3593 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3594 		return;
3595 
3596 	num_ports = bitcount32(hw->func_caps.valid_functions);
3597 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3598 
3599 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3600                 device_printf(dev, "PCI-Express bandwidth available"
3601                     " for this device may be insufficient for"
3602                     " optimal performance.\n");
3603                 device_printf(dev, "Please move the device to a different"
3604 		    " PCI-e link with more lanes and/or higher"
3605 		    " transfer rate.\n");
3606         }
3607 }
3608 
3609 static int
3610 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3611 {
3612 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3613 	struct i40e_hw	*hw = &pf->hw;
3614 	struct sbuf	*sbuf;
3615 
3616 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3617 	ixl_nvm_version_str(hw, sbuf);
3618 	sbuf_finish(sbuf);
3619 	sbuf_delete(sbuf);
3620 
3621 	return (0);
3622 }
3623 
3624 void
3625 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3626 {
3627 	if ((nvma->command == I40E_NVM_READ) &&
3628 	    ((nvma->config & 0xFF) == 0xF) &&
3629 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3630 	    (nvma->offset == 0) &&
3631 	    (nvma->data_size == 1)) {
3632 		// device_printf(dev, "- Get Driver Status Command\n");
3633 	}
3634 	else if (nvma->command == I40E_NVM_READ) {
3635 
3636 	}
3637 	else {
3638 		switch (nvma->command) {
3639 		case 0xB:
3640 			device_printf(dev, "- command: I40E_NVM_READ\n");
3641 			break;
3642 		case 0xC:
3643 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3644 			break;
3645 		default:
3646 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3647 			break;
3648 		}
3649 
3650 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3651 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3652 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3653 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3654 	}
3655 }
3656 
3657 int
3658 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3659 {
3660 	struct i40e_hw *hw = &pf->hw;
3661 	struct i40e_nvm_access *nvma;
3662 	device_t dev = pf->dev;
3663 	enum i40e_status_code status = 0;
3664 	size_t nvma_size, ifd_len, exp_len;
3665 	int err, perrno;
3666 
3667 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3668 
3669 	/* Sanity checks */
3670 	nvma_size = sizeof(struct i40e_nvm_access);
3671 	ifd_len = ifd->ifd_len;
3672 
3673 	if (ifd_len < nvma_size ||
3674 	    ifd->ifd_data == NULL) {
3675 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3676 		    __func__);
3677 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3678 		    __func__, ifd_len, nvma_size);
3679 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3680 		    ifd->ifd_data);
3681 		return (EINVAL);
3682 	}
3683 
3684 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3685 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3686 	if (err) {
3687 		device_printf(dev, "%s: Cannot get request from user space\n",
3688 		    __func__);
3689 		free(nvma, M_DEVBUF);
3690 		return (err);
3691 	}
3692 
3693 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3694 		ixl_print_nvm_cmd(dev, nvma);
3695 
3696 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3697 		int count = 0;
3698 		while (count++ < 100) {
3699 			i40e_msec_delay(100);
3700 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3701 				break;
3702 		}
3703 	}
3704 
3705 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3706 		free(nvma, M_DEVBUF);
3707 		return (-EBUSY);
3708 	}
3709 
3710 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3711 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3712 		    __func__);
3713 		free(nvma, M_DEVBUF);
3714 		return (EINVAL);
3715 	}
3716 
3717 	/*
3718 	 * Older versions of the NVM update tool don't set ifd_len to the size
3719 	 * of the entire buffer passed to the ioctl. Check the data_size field
3720 	 * in the contained i40e_nvm_access struct and ensure everything is
3721 	 * copied in from userspace.
3722 	 */
3723 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3724 
3725 	if (ifd_len < exp_len) {
3726 		ifd_len = exp_len;
3727 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3728 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3729 		if (err) {
3730 			device_printf(dev, "%s: Cannot get request from user space\n",
3731 					__func__);
3732 			free(nvma, M_DEVBUF);
3733 			return (err);
3734 		}
3735 	}
3736 
3737 	// TODO: Might need a different lock here
3738 	// IXL_PF_LOCK(pf);
3739 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3740 	// IXL_PF_UNLOCK(pf);
3741 
3742 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3743 	free(nvma, M_DEVBUF);
3744 	if (err) {
3745 		device_printf(dev, "%s: Cannot return data to user space\n",
3746 				__func__);
3747 		return (err);
3748 	}
3749 
3750 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3751 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3752 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3753 		    i40e_stat_str(hw, status), perrno);
3754 
3755 	/*
3756 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3757 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3758 	 */
3759 	if (perrno == -EPERM)
3760 		return (-EACCES);
3761 	else
3762 		return (perrno);
3763 }
3764 
3765 int
3766 ixl_find_i2c_interface(struct ixl_pf *pf)
3767 {
3768 	struct i40e_hw *hw = &pf->hw;
3769 	bool i2c_en, port_matched;
3770 	u32 reg;
3771 
3772 	for (int i = 0; i < 4; i++) {
3773 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3774 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3775 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3776 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3777 		    & BIT(hw->port);
3778 		if (i2c_en && port_matched)
3779 			return (i);
3780 	}
3781 
3782 	return (-1);
3783 }
3784 
3785 static char *
3786 ixl_phy_type_string(u32 bit_pos, bool ext)
3787 {
3788 	static char * phy_types_str[32] = {
3789 		"SGMII",
3790 		"1000BASE-KX",
3791 		"10GBASE-KX4",
3792 		"10GBASE-KR",
3793 		"40GBASE-KR4",
3794 		"XAUI",
3795 		"XFI",
3796 		"SFI",
3797 		"XLAUI",
3798 		"XLPPI",
3799 		"40GBASE-CR4",
3800 		"10GBASE-CR1",
3801 		"SFP+ Active DA",
3802 		"QSFP+ Active DA",
3803 		"Reserved (14)",
3804 		"Reserved (15)",
3805 		"Reserved (16)",
3806 		"100BASE-TX",
3807 		"1000BASE-T",
3808 		"10GBASE-T",
3809 		"10GBASE-SR",
3810 		"10GBASE-LR",
3811 		"10GBASE-SFP+Cu",
3812 		"10GBASE-CR1",
3813 		"40GBASE-CR4",
3814 		"40GBASE-SR4",
3815 		"40GBASE-LR4",
3816 		"1000BASE-SX",
3817 		"1000BASE-LX",
3818 		"1000BASE-T Optical",
3819 		"20GBASE-KR2",
3820 		"Reserved (31)"
3821 	};
3822 	static char * ext_phy_types_str[8] = {
3823 		"25GBASE-KR",
3824 		"25GBASE-CR",
3825 		"25GBASE-SR",
3826 		"25GBASE-LR",
3827 		"25GBASE-AOC",
3828 		"25GBASE-ACC",
3829 		"Reserved (6)",
3830 		"Reserved (7)"
3831 	};
3832 
3833 	if (ext && bit_pos > 7) return "Invalid_Ext";
3834 	if (bit_pos > 31) return "Invalid";
3835 
3836 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3837 }
3838 
3839 /* TODO: ERJ: I don't this is necessary anymore. */
3840 int
3841 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3842 {
3843 	device_t dev = pf->dev;
3844 	struct i40e_hw *hw = &pf->hw;
3845 	struct i40e_aq_desc desc;
3846 	enum i40e_status_code status;
3847 
3848 	struct i40e_aqc_get_link_status *aq_link_status =
3849 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3850 
3851 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3852 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3853 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3854 	if (status) {
3855 		device_printf(dev,
3856 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3857 		    __func__, i40e_stat_str(hw, status),
3858 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3859 		return (EIO);
3860 	}
3861 
3862 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3863 	return (0);
3864 }
3865 
3866 static char *
3867 ixl_phy_type_string_ls(u8 val)
3868 {
3869 	if (val >= 0x1F)
3870 		return ixl_phy_type_string(val - 0x1F, true);
3871 	else
3872 		return ixl_phy_type_string(val, false);
3873 }
3874 
3875 static int
3876 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3877 {
3878 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3879 	device_t dev = pf->dev;
3880 	struct sbuf *buf;
3881 	int error = 0;
3882 
3883 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3884 	if (!buf) {
3885 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3886 		return (ENOMEM);
3887 	}
3888 
3889 	struct i40e_aqc_get_link_status link_status;
3890 	error = ixl_aq_get_link_status(pf, &link_status);
3891 	if (error) {
3892 		sbuf_delete(buf);
3893 		return (error);
3894 	}
3895 
3896 	sbuf_printf(buf, "\n"
3897 	    "PHY Type : 0x%02x<%s>\n"
3898 	    "Speed    : 0x%02x\n"
3899 	    "Link info: 0x%02x\n"
3900 	    "AN info  : 0x%02x\n"
3901 	    "Ext info : 0x%02x\n"
3902 	    "Loopback : 0x%02x\n"
3903 	    "Max Frame: %d\n"
3904 	    "Config   : 0x%02x\n"
3905 	    "Power    : 0x%02x",
3906 	    link_status.phy_type,
3907 	    ixl_phy_type_string_ls(link_status.phy_type),
3908 	    link_status.link_speed,
3909 	    link_status.link_info,
3910 	    link_status.an_info,
3911 	    link_status.ext_info,
3912 	    link_status.loopback,
3913 	    link_status.max_frame_size,
3914 	    link_status.config,
3915 	    link_status.power_desc);
3916 
3917 	error = sbuf_finish(buf);
3918 	if (error)
3919 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3920 
3921 	sbuf_delete(buf);
3922 	return (error);
3923 }
3924 
3925 static int
3926 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3927 {
3928 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3929 	struct i40e_hw *hw = &pf->hw;
3930 	device_t dev = pf->dev;
3931 	enum i40e_status_code status;
3932 	struct i40e_aq_get_phy_abilities_resp abilities;
3933 	struct sbuf *buf;
3934 	int error = 0;
3935 
3936 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3937 	if (!buf) {
3938 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3939 		return (ENOMEM);
3940 	}
3941 
3942 	status = i40e_aq_get_phy_capabilities(hw,
3943 	    FALSE, FALSE, &abilities, NULL);
3944 	if (status) {
3945 		device_printf(dev,
3946 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3947 		    __func__, i40e_stat_str(hw, status),
3948 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3949 		sbuf_delete(buf);
3950 		return (EIO);
3951 	}
3952 
3953 	sbuf_printf(buf, "\n"
3954 	    "PHY Type : %08x",
3955 	    abilities.phy_type);
3956 
3957 	if (abilities.phy_type != 0) {
3958 		sbuf_printf(buf, "<");
3959 		for (int i = 0; i < 32; i++)
3960 			if ((1 << i) & abilities.phy_type)
3961 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3962 		sbuf_printf(buf, ">\n");
3963 	}
3964 
3965 	sbuf_printf(buf, "PHY Ext  : %02x",
3966 	    abilities.phy_type_ext);
3967 
3968 	if (abilities.phy_type_ext != 0) {
3969 		sbuf_printf(buf, "<");
3970 		for (int i = 0; i < 4; i++)
3971 			if ((1 << i) & abilities.phy_type_ext)
3972 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3973 		sbuf_printf(buf, ">");
3974 	}
3975 	sbuf_printf(buf, "\n");
3976 
3977 	sbuf_printf(buf,
3978 	    "Speed    : %02x\n"
3979 	    "Abilities: %02x\n"
3980 	    "EEE cap  : %04x\n"
3981 	    "EEER reg : %08x\n"
3982 	    "D3 Lpan  : %02x\n"
3983 	    "ID       : %02x %02x %02x %02x\n"
3984 	    "ModType  : %02x %02x %02x\n"
3985 	    "ModType E: %01x\n"
3986 	    "FEC Cfg  : %02x\n"
3987 	    "Ext CC   : %02x",
3988 	    abilities.link_speed,
3989 	    abilities.abilities, abilities.eee_capability,
3990 	    abilities.eeer_val, abilities.d3_lpan,
3991 	    abilities.phy_id[0], abilities.phy_id[1],
3992 	    abilities.phy_id[2], abilities.phy_id[3],
3993 	    abilities.module_type[0], abilities.module_type[1],
3994 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3995 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3996 	    abilities.ext_comp_code);
3997 
3998 	error = sbuf_finish(buf);
3999 	if (error)
4000 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4001 
4002 	sbuf_delete(buf);
4003 	return (error);
4004 }
4005 
4006 static int
4007 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4008 {
4009 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4010 	struct ixl_vsi *vsi = &pf->vsi;
4011 	struct ixl_mac_filter *f;
4012 	device_t dev = pf->dev;
4013 	int error = 0, ftl_len = 0, ftl_counter = 0;
4014 
4015 	struct sbuf *buf;
4016 
4017 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4018 	if (!buf) {
4019 		device_printf(dev, "Could not allocate sbuf for output.\n");
4020 		return (ENOMEM);
4021 	}
4022 
4023 	sbuf_printf(buf, "\n");
4024 
4025 	/* Print MAC filters */
4026 	sbuf_printf(buf, "PF Filters:\n");
4027 	SLIST_FOREACH(f, &vsi->ftl, next)
4028 		ftl_len++;
4029 
4030 	if (ftl_len < 1)
4031 		sbuf_printf(buf, "(none)\n");
4032 	else {
4033 		SLIST_FOREACH(f, &vsi->ftl, next) {
4034 			sbuf_printf(buf,
4035 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4036 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4037 			/* don't print '\n' for last entry */
4038 			if (++ftl_counter != ftl_len)
4039 				sbuf_printf(buf, "\n");
4040 		}
4041 	}
4042 
4043 #ifdef PCI_IOV
4044 	/* TODO: Give each VF its own filter list sysctl */
4045 	struct ixl_vf *vf;
4046 	if (pf->num_vfs > 0) {
4047 		sbuf_printf(buf, "\n\n");
4048 		for (int i = 0; i < pf->num_vfs; i++) {
4049 			vf = &pf->vfs[i];
4050 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4051 				continue;
4052 
4053 			vsi = &vf->vsi;
4054 			ftl_len = 0, ftl_counter = 0;
4055 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4056 			SLIST_FOREACH(f, &vsi->ftl, next)
4057 				ftl_len++;
4058 
4059 			if (ftl_len < 1)
4060 				sbuf_printf(buf, "(none)\n");
4061 			else {
4062 				SLIST_FOREACH(f, &vsi->ftl, next) {
4063 					sbuf_printf(buf,
4064 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4065 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4066 				}
4067 			}
4068 		}
4069 	}
4070 #endif
4071 
4072 	error = sbuf_finish(buf);
4073 	if (error)
4074 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4075 	sbuf_delete(buf);
4076 
4077 	return (error);
4078 }
4079 
4080 #define IXL_SW_RES_SIZE 0x14
4081 int
4082 ixl_res_alloc_cmp(const void *a, const void *b)
4083 {
4084 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4085 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4086 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4087 
4088 	return ((int)one->resource_type - (int)two->resource_type);
4089 }
4090 
4091 /*
4092  * Longest string length: 25
4093  */
4094 char *
4095 ixl_switch_res_type_string(u8 type)
4096 {
4097 	// TODO: This should be changed to static const
4098 	char * ixl_switch_res_type_strings[0x14] = {
4099 		"VEB",
4100 		"VSI",
4101 		"Perfect Match MAC address",
4102 		"S-tag",
4103 		"(Reserved)",
4104 		"Multicast hash entry",
4105 		"Unicast hash entry",
4106 		"VLAN",
4107 		"VSI List entry",
4108 		"(Reserved)",
4109 		"VLAN Statistic Pool",
4110 		"Mirror Rule",
4111 		"Queue Set",
4112 		"Inner VLAN Forward filter",
4113 		"(Reserved)",
4114 		"Inner MAC",
4115 		"IP",
4116 		"GRE/VN1 Key",
4117 		"VN2 Key",
4118 		"Tunneling Port"
4119 	};
4120 
4121 	if (type < 0x14)
4122 		return ixl_switch_res_type_strings[type];
4123 	else
4124 		return "(Reserved)";
4125 }
4126 
4127 static int
4128 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4129 {
4130 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4131 	struct i40e_hw *hw = &pf->hw;
4132 	device_t dev = pf->dev;
4133 	struct sbuf *buf;
4134 	enum i40e_status_code status;
4135 	int error = 0;
4136 
4137 	u8 num_entries;
4138 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4139 
4140 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4141 	if (!buf) {
4142 		device_printf(dev, "Could not allocate sbuf for output.\n");
4143 		return (ENOMEM);
4144 	}
4145 
4146 	bzero(resp, sizeof(resp));
4147 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4148 				resp,
4149 				IXL_SW_RES_SIZE,
4150 				NULL);
4151 	if (status) {
4152 		device_printf(dev,
4153 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4154 		    __func__, i40e_stat_str(hw, status),
4155 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4156 		sbuf_delete(buf);
4157 		return (error);
4158 	}
4159 
4160 	/* Sort entries by type for display */
4161 	qsort(resp, num_entries,
4162 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4163 	    &ixl_res_alloc_cmp);
4164 
4165 	sbuf_cat(buf, "\n");
4166 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4167 	sbuf_printf(buf,
4168 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4169 	    "                          | (this)     | (all) | (this) | (all)       \n");
4170 	for (int i = 0; i < num_entries; i++) {
4171 		sbuf_printf(buf,
4172 		    "%25s | %10d   %5d   %6d   %12d",
4173 		    ixl_switch_res_type_string(resp[i].resource_type),
4174 		    resp[i].guaranteed,
4175 		    resp[i].total,
4176 		    resp[i].used,
4177 		    resp[i].total_unalloced);
4178 		if (i < num_entries - 1)
4179 			sbuf_cat(buf, "\n");
4180 	}
4181 
4182 	error = sbuf_finish(buf);
4183 	if (error)
4184 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4185 
4186 	sbuf_delete(buf);
4187 	return (error);
4188 }
4189 
4190 /*
4191 ** Caller must init and delete sbuf; this function will clear and
4192 ** finish it for caller.
4193 */
4194 char *
4195 ixl_switch_element_string(struct sbuf *s,
4196     struct i40e_aqc_switch_config_element_resp *element)
4197 {
4198 	sbuf_clear(s);
4199 
4200 	switch (element->element_type) {
4201 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4202 		sbuf_printf(s, "MAC %3d", element->element_info);
4203 		break;
4204 	case I40E_AQ_SW_ELEM_TYPE_PF:
4205 		sbuf_printf(s, "PF  %3d", element->element_info);
4206 		break;
4207 	case I40E_AQ_SW_ELEM_TYPE_VF:
4208 		sbuf_printf(s, "VF  %3d", element->element_info);
4209 		break;
4210 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4211 		sbuf_cat(s, "EMP");
4212 		break;
4213 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4214 		sbuf_cat(s, "BMC");
4215 		break;
4216 	case I40E_AQ_SW_ELEM_TYPE_PV:
4217 		sbuf_cat(s, "PV");
4218 		break;
4219 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4220 		sbuf_cat(s, "VEB");
4221 		break;
4222 	case I40E_AQ_SW_ELEM_TYPE_PA:
4223 		sbuf_cat(s, "PA");
4224 		break;
4225 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4226 		sbuf_printf(s, "VSI %3d", element->element_info);
4227 		break;
4228 	default:
4229 		sbuf_cat(s, "?");
4230 		break;
4231 	}
4232 
4233 	sbuf_finish(s);
4234 	return sbuf_data(s);
4235 }
4236 
4237 static int
4238 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4239 {
4240 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4241 	struct i40e_hw *hw = &pf->hw;
4242 	device_t dev = pf->dev;
4243 	struct sbuf *buf;
4244 	struct sbuf *nmbuf;
4245 	enum i40e_status_code status;
4246 	int error = 0;
4247 	u16 next = 0;
4248 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4249 
4250 	struct i40e_aqc_get_switch_config_resp *sw_config;
4251 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4252 
4253 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4254 	if (!buf) {
4255 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4256 		return (ENOMEM);
4257 	}
4258 
4259 	status = i40e_aq_get_switch_config(hw, sw_config,
4260 	    sizeof(aq_buf), &next, NULL);
4261 	if (status) {
4262 		device_printf(dev,
4263 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4264 		    __func__, i40e_stat_str(hw, status),
4265 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4266 		sbuf_delete(buf);
4267 		return error;
4268 	}
4269 	if (next)
4270 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4271 		    __func__, next);
4272 
4273 	nmbuf = sbuf_new_auto();
4274 	if (!nmbuf) {
4275 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4276 		sbuf_delete(buf);
4277 		return (ENOMEM);
4278 	}
4279 
4280 	sbuf_cat(buf, "\n");
4281 	/* Assuming <= 255 elements in switch */
4282 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4283 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4284 	/* Exclude:
4285 	** Revision -- all elements are revision 1 for now
4286 	*/
4287 	sbuf_printf(buf,
4288 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4289 	    "                |          |          | (uplink)\n");
4290 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4291 		// "%4d (%8s) | %8s   %8s   %#8x",
4292 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4293 		sbuf_cat(buf, " ");
4294 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4295 		    &sw_config->element[i]));
4296 		sbuf_cat(buf, " | ");
4297 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4298 		sbuf_cat(buf, "   ");
4299 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4300 		sbuf_cat(buf, "   ");
4301 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4302 		if (i < sw_config->header.num_reported - 1)
4303 			sbuf_cat(buf, "\n");
4304 	}
4305 	sbuf_delete(nmbuf);
4306 
4307 	error = sbuf_finish(buf);
4308 	if (error)
4309 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4310 
4311 	sbuf_delete(buf);
4312 
4313 	return (error);
4314 }
4315 
4316 static int
4317 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4318 {
4319 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4320 	struct i40e_hw *hw = &pf->hw;
4321 	device_t dev = pf->dev;
4322 	struct sbuf *buf;
4323 	int error = 0;
4324 	enum i40e_status_code status;
4325 	u32 reg;
4326 
4327 	struct i40e_aqc_get_set_rss_key_data key_data;
4328 
4329 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4330 	if (!buf) {
4331 		device_printf(dev, "Could not allocate sbuf for output.\n");
4332 		return (ENOMEM);
4333 	}
4334 
4335 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4336 
4337 	sbuf_cat(buf, "\n");
4338 	if (hw->mac.type == I40E_MAC_X722) {
4339 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4340 		if (status)
4341 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4342 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4343 	} else {
4344 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4345 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4346 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4347 		}
4348 	}
4349 
4350 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4351 
4352 	error = sbuf_finish(buf);
4353 	if (error)
4354 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4355 	sbuf_delete(buf);
4356 
4357 	return (error);
4358 }
4359 
4360 static void
4361 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4362 {
4363 	int i, j, k, width;
4364 	char c;
4365 
4366 	if (length < 1 || buf == NULL) return;
4367 
4368 	int byte_stride = 16;
4369 	int lines = length / byte_stride;
4370 	int rem = length % byte_stride;
4371 	if (rem > 0)
4372 		lines++;
4373 
4374 	for (i = 0; i < lines; i++) {
4375 		width = (rem > 0 && i == lines - 1)
4376 		    ? rem : byte_stride;
4377 
4378 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4379 
4380 		for (j = 0; j < width; j++)
4381 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4382 
4383 		if (width < byte_stride) {
4384 			for (k = 0; k < (byte_stride - width); k++)
4385 				sbuf_printf(sb, "   ");
4386 		}
4387 
4388 		if (!text) {
4389 			sbuf_printf(sb, "\n");
4390 			continue;
4391 		}
4392 
4393 		for (j = 0; j < width; j++) {
4394 			c = (char)buf[i * byte_stride + j];
4395 			if (c < 32 || c > 126)
4396 				sbuf_printf(sb, ".");
4397 			else
4398 				sbuf_printf(sb, "%c", c);
4399 
4400 			if (j == width - 1)
4401 				sbuf_printf(sb, "\n");
4402 		}
4403 	}
4404 }
4405 
4406 static int
4407 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4408 {
4409 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4410 	struct i40e_hw *hw = &pf->hw;
4411 	device_t dev = pf->dev;
4412 	struct sbuf *buf;
4413 	int error = 0;
4414 	enum i40e_status_code status;
4415 	u8 hlut[512];
4416 	u32 reg;
4417 
4418 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4419 	if (!buf) {
4420 		device_printf(dev, "Could not allocate sbuf for output.\n");
4421 		return (ENOMEM);
4422 	}
4423 
4424 	bzero(hlut, sizeof(hlut));
4425 	sbuf_cat(buf, "\n");
4426 	if (hw->mac.type == I40E_MAC_X722) {
4427 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4428 		if (status)
4429 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4430 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4431 	} else {
4432 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4433 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4434 			bcopy(&reg, &hlut[i << 2], 4);
4435 		}
4436 	}
4437 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4438 
4439 	error = sbuf_finish(buf);
4440 	if (error)
4441 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4442 	sbuf_delete(buf);
4443 
4444 	return (error);
4445 }
4446 
4447 static int
4448 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4449 {
4450 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4451 	struct i40e_hw *hw = &pf->hw;
4452 	u64 hena;
4453 
4454 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4455 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4456 
4457 	return sysctl_handle_long(oidp, NULL, hena, req);
4458 }
4459 
4460 /*
4461  * Sysctl to disable firmware's link management
4462  *
4463  * 1 - Disable link management on this port
4464  * 0 - Re-enable link management
4465  *
4466  * On normal NVMs, firmware manages link by default.
4467  */
4468 static int
4469 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4470 {
4471 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4472 	struct i40e_hw *hw = &pf->hw;
4473 	device_t dev = pf->dev;
4474 	int requested_mode = -1;
4475 	enum i40e_status_code status = 0;
4476 	int error = 0;
4477 
4478 	/* Read in new mode */
4479 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4480 	if ((error) || (req->newptr == NULL))
4481 		return (error);
4482 	/* Check for sane value */
4483 	if (requested_mode < 0 || requested_mode > 1) {
4484 		device_printf(dev, "Valid modes are 0 or 1\n");
4485 		return (EINVAL);
4486 	}
4487 
4488 	/* Set new mode */
4489 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4490 	if (status) {
4491 		device_printf(dev,
4492 		    "%s: Error setting new phy debug mode %s,"
4493 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4494 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4495 		return (EIO);
4496 	}
4497 
4498 	return (0);
4499 }
4500 
4501 /*
4502  * Read some diagnostic data from an SFP module
4503  * Bytes 96-99, 102-105 from device address 0xA2
4504  */
4505 static int
4506 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4507 {
4508 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4509 	device_t dev = pf->dev;
4510 	struct sbuf *sbuf;
4511 	int error = 0;
4512 	u8 output;
4513 
4514 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4515 	if (error) {
4516 		device_printf(dev, "Error reading from i2c\n");
4517 		return (error);
4518 	}
4519 	if (output != 0x3) {
4520 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4521 		return (EIO);
4522 	}
4523 
4524 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4525 	if (!(output & 0x60)) {
4526 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4527 		return (EIO);
4528 	}
4529 
4530 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4531 
4532 	for (u8 offset = 96; offset < 100; offset++) {
4533 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4534 		sbuf_printf(sbuf, "%02X ", output);
4535 	}
4536 	for (u8 offset = 102; offset < 106; offset++) {
4537 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4538 		sbuf_printf(sbuf, "%02X ", output);
4539 	}
4540 
4541 	sbuf_finish(sbuf);
4542 	sbuf_delete(sbuf);
4543 
4544 	return (0);
4545 }
4546 
4547 /*
4548  * Sysctl to read a byte from I2C bus.
4549  *
4550  * Input: 32-bit value:
4551  * 	bits 0-7:   device address (0xA0 or 0xA2)
4552  * 	bits 8-15:  offset (0-255)
4553  *	bits 16-31: unused
4554  * Output: 8-bit value read
4555  */
4556 static int
4557 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4558 {
4559 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4560 	device_t dev = pf->dev;
4561 	int input = -1, error = 0;
4562 	u8 dev_addr, offset, output;
4563 
4564 	/* Read in I2C read parameters */
4565 	error = sysctl_handle_int(oidp, &input, 0, req);
4566 	if ((error) || (req->newptr == NULL))
4567 		return (error);
4568 	/* Validate device address */
4569 	dev_addr = input & 0xFF;
4570 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4571 		return (EINVAL);
4572 	}
4573 	offset = (input >> 8) & 0xFF;
4574 
4575 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4576 	if (error)
4577 		return (error);
4578 
4579 	device_printf(dev, "%02X\n", output);
4580 	return (0);
4581 }
4582 
4583 /*
4584  * Sysctl to write a byte to the I2C bus.
4585  *
4586  * Input: 32-bit value:
4587  * 	bits 0-7:   device address (0xA0 or 0xA2)
4588  * 	bits 8-15:  offset (0-255)
4589  *	bits 16-23: value to write
4590  *	bits 24-31: unused
4591  * Output: 8-bit value written
4592  */
4593 static int
4594 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4595 {
4596 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4597 	device_t dev = pf->dev;
4598 	int input = -1, error = 0;
4599 	u8 dev_addr, offset, value;
4600 
4601 	/* Read in I2C write parameters */
4602 	error = sysctl_handle_int(oidp, &input, 0, req);
4603 	if ((error) || (req->newptr == NULL))
4604 		return (error);
4605 	/* Validate device address */
4606 	dev_addr = input & 0xFF;
4607 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4608 		return (EINVAL);
4609 	}
4610 	offset = (input >> 8) & 0xFF;
4611 	value = (input >> 16) & 0xFF;
4612 
4613 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4614 	if (error)
4615 		return (error);
4616 
4617 	device_printf(dev, "%02X written\n", value);
4618 	return (0);
4619 }
4620 
4621 static int
4622 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4623     u8 bit_pos, int *is_set)
4624 {
4625 	device_t dev = pf->dev;
4626 	struct i40e_hw *hw = &pf->hw;
4627 	enum i40e_status_code status;
4628 
4629 	status = i40e_aq_get_phy_capabilities(hw,
4630 	    FALSE, FALSE, abilities, NULL);
4631 	if (status) {
4632 		device_printf(dev,
4633 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4634 		    __func__, i40e_stat_str(hw, status),
4635 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4636 		return (EIO);
4637 	}
4638 
4639 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4640 	return (0);
4641 }
4642 
4643 static int
4644 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4645     u8 bit_pos, int set)
4646 {
4647 	device_t dev = pf->dev;
4648 	struct i40e_hw *hw = &pf->hw;
4649 	struct i40e_aq_set_phy_config config;
4650 	enum i40e_status_code status;
4651 
4652 	/* Set new PHY config */
4653 	memset(&config, 0, sizeof(config));
4654 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4655 	if (set)
4656 		config.fec_config |= bit_pos;
4657 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4658 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4659 		config.phy_type = abilities->phy_type;
4660 		config.phy_type_ext = abilities->phy_type_ext;
4661 		config.link_speed = abilities->link_speed;
4662 		config.eee_capability = abilities->eee_capability;
4663 		config.eeer = abilities->eeer_val;
4664 		config.low_power_ctrl = abilities->d3_lpan;
4665 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4666 
4667 		if (status) {
4668 			device_printf(dev,
4669 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4670 			    __func__, i40e_stat_str(hw, status),
4671 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4672 			return (EIO);
4673 		}
4674 	}
4675 
4676 	return (0);
4677 }
4678 
4679 static int
4680 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4681 {
4682 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4683 	int mode, error = 0;
4684 
4685 	struct i40e_aq_get_phy_abilities_resp abilities;
4686 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4687 	if (error)
4688 		return (error);
4689 	/* Read in new mode */
4690 	error = sysctl_handle_int(oidp, &mode, 0, req);
4691 	if ((error) || (req->newptr == NULL))
4692 		return (error);
4693 
4694 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4695 }
4696 
4697 static int
4698 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4699 {
4700 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4701 	int mode, error = 0;
4702 
4703 	struct i40e_aq_get_phy_abilities_resp abilities;
4704 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4705 	if (error)
4706 		return (error);
4707 	/* Read in new mode */
4708 	error = sysctl_handle_int(oidp, &mode, 0, req);
4709 	if ((error) || (req->newptr == NULL))
4710 		return (error);
4711 
4712 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4713 }
4714 
4715 static int
4716 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4717 {
4718 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4719 	int mode, error = 0;
4720 
4721 	struct i40e_aq_get_phy_abilities_resp abilities;
4722 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4723 	if (error)
4724 		return (error);
4725 	/* Read in new mode */
4726 	error = sysctl_handle_int(oidp, &mode, 0, req);
4727 	if ((error) || (req->newptr == NULL))
4728 		return (error);
4729 
4730 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4731 }
4732 
4733 static int
4734 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4735 {
4736 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4737 	int mode, error = 0;
4738 
4739 	struct i40e_aq_get_phy_abilities_resp abilities;
4740 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4741 	if (error)
4742 		return (error);
4743 	/* Read in new mode */
4744 	error = sysctl_handle_int(oidp, &mode, 0, req);
4745 	if ((error) || (req->newptr == NULL))
4746 		return (error);
4747 
4748 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4749 }
4750 
4751 static int
4752 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4753 {
4754 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4755 	int mode, error = 0;
4756 
4757 	struct i40e_aq_get_phy_abilities_resp abilities;
4758 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4759 	if (error)
4760 		return (error);
4761 	/* Read in new mode */
4762 	error = sysctl_handle_int(oidp, &mode, 0, req);
4763 	if ((error) || (req->newptr == NULL))
4764 		return (error);
4765 
4766 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4767 }
4768 
4769 static int
4770 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4771 {
4772 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4773 	struct i40e_hw *hw = &pf->hw;
4774 	device_t dev = pf->dev;
4775 	struct sbuf *buf;
4776 	int error = 0;
4777 	enum i40e_status_code status;
4778 
4779 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4780 	if (!buf) {
4781 		device_printf(dev, "Could not allocate sbuf for output.\n");
4782 		return (ENOMEM);
4783 	}
4784 
4785 	u8 *final_buff;
4786 	/* This amount is only necessary if reading the entire cluster into memory */
4787 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4788 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4789 	if (final_buff == NULL) {
4790 		device_printf(dev, "Could not allocate memory for output.\n");
4791 		goto out;
4792 	}
4793 	int final_buff_len = 0;
4794 
4795 	u8 cluster_id = 1;
4796 	bool more = true;
4797 
4798 	u8 dump_buf[4096];
4799 	u16 curr_buff_size = 4096;
4800 	u8 curr_next_table = 0;
4801 	u32 curr_next_index = 0;
4802 
4803 	u16 ret_buff_size;
4804 	u8 ret_next_table;
4805 	u32 ret_next_index;
4806 
4807 	sbuf_cat(buf, "\n");
4808 
4809 	while (more) {
4810 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4811 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4812 		if (status) {
4813 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4814 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4815 			goto free_out;
4816 		}
4817 
4818 		/* copy info out of temp buffer */
4819 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4820 		final_buff_len += ret_buff_size;
4821 
4822 		if (ret_next_table != curr_next_table) {
4823 			/* We're done with the current table; we can dump out read data. */
4824 			sbuf_printf(buf, "%d:", curr_next_table);
4825 			int bytes_printed = 0;
4826 			while (bytes_printed <= final_buff_len) {
4827 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4828 				bytes_printed += 16;
4829 			}
4830 				sbuf_cat(buf, "\n");
4831 
4832 			/* The entire cluster has been read; we're finished */
4833 			if (ret_next_table == 0xFF)
4834 				break;
4835 
4836 			/* Otherwise clear the output buffer and continue reading */
4837 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4838 			final_buff_len = 0;
4839 		}
4840 
4841 		if (ret_next_index == 0xFFFFFFFF)
4842 			ret_next_index = 0;
4843 
4844 		bzero(dump_buf, sizeof(dump_buf));
4845 		curr_next_table = ret_next_table;
4846 		curr_next_index = ret_next_index;
4847 	}
4848 
4849 free_out:
4850 	free(final_buff, M_DEVBUF);
4851 out:
4852 	error = sbuf_finish(buf);
4853 	if (error)
4854 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4855 	sbuf_delete(buf);
4856 
4857 	return (error);
4858 }
4859 
4860 static int
4861 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4862 {
4863 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4864 	struct i40e_hw *hw = &pf->hw;
4865 	device_t dev = pf->dev;
4866 	int error = 0;
4867 	int state, new_state;
4868 	enum i40e_status_code status;
4869 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4870 
4871 	/* Read in new mode */
4872 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4873 	if ((error) || (req->newptr == NULL))
4874 		return (error);
4875 
4876 	/* Already in requested state */
4877 	if (new_state == state)
4878 		return (error);
4879 
4880 	if (new_state == 0) {
4881 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4882 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4883 			return (EINVAL);
4884 		}
4885 
4886 		if (pf->hw.aq.api_maj_ver < 1 ||
4887 		    (pf->hw.aq.api_maj_ver == 1 &&
4888 		    pf->hw.aq.api_min_ver < 7)) {
4889 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4890 			return (EINVAL);
4891 		}
4892 
4893 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4894 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4895 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4896 	} else {
4897 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4898 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4899 			device_printf(dev, "FW LLDP agent is already running\n");
4900 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4901 	}
4902 
4903 	return (0);
4904 }
4905 
4906 /*
4907  * Get FW LLDP Agent status
4908  */
4909 int
4910 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4911 {
4912 	enum i40e_status_code ret = I40E_SUCCESS;
4913 	struct i40e_lldp_variables lldp_cfg;
4914 	struct i40e_hw *hw = &pf->hw;
4915 	u8 adminstatus = 0;
4916 
4917 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4918 	if (ret)
4919 		return ret;
4920 
4921 	/* Get the LLDP AdminStatus for the current port */
4922 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4923 	adminstatus &= 0xf;
4924 
4925 	/* Check if LLDP agent is disabled */
4926 	if (!adminstatus) {
4927 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4928 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4929 	} else
4930 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4931 
4932 	return (0);
4933 }
4934 
4935 int
4936 ixl_attach_get_link_status(struct ixl_pf *pf)
4937 {
4938 	struct i40e_hw *hw = &pf->hw;
4939 	device_t dev = pf->dev;
4940 	int error = 0;
4941 
4942 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4943 	    (hw->aq.fw_maj_ver < 4)) {
4944 		i40e_msec_delay(75);
4945 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4946 		if (error) {
4947 			device_printf(dev, "link restart failed, aq_err=%d\n",
4948 			    pf->hw.aq.asq_last_status);
4949 			return error;
4950 		}
4951 	}
4952 
4953 	/* Determine link state */
4954 	hw->phy.get_link_info = TRUE;
4955 	i40e_get_link_status(hw, &pf->link_up);
4956 	return (0);
4957 }
4958 
4959 static int
4960 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4961 {
4962 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4963 	int requested = 0, error = 0;
4964 
4965 	/* Read in new mode */
4966 	error = sysctl_handle_int(oidp, &requested, 0, req);
4967 	if ((error) || (req->newptr == NULL))
4968 		return (error);
4969 
4970 	/* Initiate the PF reset later in the admin task */
4971 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4972 
4973 	return (error);
4974 }
4975 
4976 static int
4977 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4978 {
4979 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4980 	struct i40e_hw *hw = &pf->hw;
4981 	int requested = 0, error = 0;
4982 
4983 	/* Read in new mode */
4984 	error = sysctl_handle_int(oidp, &requested, 0, req);
4985 	if ((error) || (req->newptr == NULL))
4986 		return (error);
4987 
4988 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4989 
4990 	return (error);
4991 }
4992 
4993 static int
4994 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4995 {
4996 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4997 	struct i40e_hw *hw = &pf->hw;
4998 	int requested = 0, error = 0;
4999 
5000 	/* Read in new mode */
5001 	error = sysctl_handle_int(oidp, &requested, 0, req);
5002 	if ((error) || (req->newptr == NULL))
5003 		return (error);
5004 
5005 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5006 
5007 	return (error);
5008 }
5009 
5010 static int
5011 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5012 {
5013 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5014 	struct i40e_hw *hw = &pf->hw;
5015 	int requested = 0, error = 0;
5016 
5017 	/* Read in new mode */
5018 	error = sysctl_handle_int(oidp, &requested, 0, req);
5019 	if ((error) || (req->newptr == NULL))
5020 		return (error);
5021 
5022 	/* TODO: Find out how to bypass this */
5023 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5024 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5025 		error = EINVAL;
5026 	} else
5027 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5028 
5029 	return (error);
5030 }
5031 
5032 /*
5033  * Print out mapping of TX queue indexes and Rx queue indexes
5034  * to MSI-X vectors.
5035  */
5036 static int
5037 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5038 {
5039 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5040 	struct ixl_vsi *vsi = &pf->vsi;
5041 	device_t dev = pf->dev;
5042 	struct sbuf *buf;
5043 	int error = 0;
5044 
5045 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5046 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5047 
5048 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5049 	if (!buf) {
5050 		device_printf(dev, "Could not allocate sbuf for output.\n");
5051 		return (ENOMEM);
5052 	}
5053 
5054 	sbuf_cat(buf, "\n");
5055 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5056 		rx_que = &vsi->rx_queues[i];
5057 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5058 	}
5059 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5060 		tx_que = &vsi->tx_queues[i];
5061 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5062 	}
5063 
5064 	error = sbuf_finish(buf);
5065 	if (error)
5066 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5067 	sbuf_delete(buf);
5068 
5069 	return (error);
5070 }
5071