xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
282 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
283 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
284 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
285 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
287 	    "MDIO shared");
288 
289 	return (0);
290 }
291 
292 /* For the set_advertise sysctl */
293 void
294 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
295 {
296 	device_t dev = pf->dev;
297 	int err;
298 
299 	/* Make sure to initialize the device to the complete list of
300 	 * supported speeds on driver load, to ensure unloading and
301 	 * reloading the driver will restore this value.
302 	 */
303 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
304 	if (err) {
305 		/* Non-fatal error */
306 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
307 			      __func__, err);
308 		return;
309 	}
310 
311 	pf->advertised_speed =
312 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
313 }
314 
315 int
316 ixl_teardown_hw_structs(struct ixl_pf *pf)
317 {
318 	enum i40e_status_code status = 0;
319 	struct i40e_hw *hw = &pf->hw;
320 	device_t dev = pf->dev;
321 
322 	/* Shutdown LAN HMC */
323 	if (hw->hmc.hmc_obj) {
324 		status = i40e_shutdown_lan_hmc(hw);
325 		if (status) {
326 			device_printf(dev,
327 			    "init: LAN HMC shutdown failure; status %s\n",
328 			    i40e_stat_str(hw, status));
329 			goto err_out;
330 		}
331 	}
332 
333 	/* Shutdown admin queue */
334 	ixl_disable_intr0(hw);
335 	status = i40e_shutdown_adminq(hw);
336 	if (status)
337 		device_printf(dev,
338 		    "init: Admin Queue shutdown failure; status %s\n",
339 		    i40e_stat_str(hw, status));
340 
341 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
342 err_out:
343 	return (status);
344 }
345 
346 int
347 ixl_reset(struct ixl_pf *pf)
348 {
349 	struct i40e_hw *hw = &pf->hw;
350 	device_t dev = pf->dev;
351 	u32 reg;
352 	int error = 0;
353 
354 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
355 	i40e_clear_hw(hw);
356 	error = i40e_pf_reset(hw);
357 	if (error) {
358 		device_printf(dev, "init: PF reset failure\n");
359 		error = EIO;
360 		goto err_out;
361 	}
362 
363 	error = i40e_init_adminq(hw);
364 	if (error) {
365 		device_printf(dev, "init: Admin queue init failure;"
366 		    " status code %d\n", error);
367 		error = EIO;
368 		goto err_out;
369 	}
370 
371 	i40e_clear_pxe_mode(hw);
372 
373 #if 0
374 	error = ixl_get_hw_capabilities(pf);
375 	if (error) {
376 		device_printf(dev, "init: Error retrieving HW capabilities;"
377 		    " status code %d\n", error);
378 		goto err_out;
379 	}
380 
381 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
382 	    hw->func_caps.num_rx_qp, 0, 0);
383 	if (error) {
384 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
385 		    error);
386 		error = EIO;
387 		goto err_out;
388 	}
389 
390 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
391 	if (error) {
392 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
393 		    error);
394 		error = EIO;
395 		goto err_out;
396 	}
397 
398 	// XXX: possible fix for panic, but our failure recovery is still broken
399 	error = ixl_switch_config(pf);
400 	if (error) {
401 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
402 		     error);
403 		goto err_out;
404 	}
405 
406 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
407 	    NULL);
408         if (error) {
409 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
410 		    " aq_err %d\n", error, hw->aq.asq_last_status);
411 		error = EIO;
412 		goto err_out;
413 	}
414 
415 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
416 	if (error) {
417 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
418 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
419 		goto err_out;
420 	}
421 
422 	// XXX: (Rebuild VSIs?)
423 
424 	/* Firmware delay workaround */
425 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
426 	    (hw->aq.fw_maj_ver < 4)) {
427 		i40e_msec_delay(75);
428 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
429 		if (error) {
430 			device_printf(dev, "init: link restart failed, aq_err %d\n",
431 			    hw->aq.asq_last_status);
432 			goto err_out;
433 		}
434 	}
435 
436 
437 	/* Re-enable admin queue interrupt */
438 	if (pf->msix > 1) {
439 		ixl_configure_intr0_msix(pf);
440 		ixl_enable_intr0(hw);
441 	}
442 
443 err_out:
444 	return (error);
445 #endif
446 	ixl_rebuild_hw_structs_after_reset(pf);
447 
448 	/* The PF reset should have cleared any critical errors */
449 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
451 
452 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
453 	reg |= IXL_ICR0_CRIT_ERR_MASK;
454 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
455 
456  err_out:
457  	return (error);
458 }
459 
460 /*
461  * TODO: Make sure this properly handles admin queue / single rx queue intr
462  */
463 int
464 ixl_intr(void *arg)
465 {
466 	struct ixl_pf		*pf = arg;
467 	struct i40e_hw		*hw =  &pf->hw;
468 	struct ixl_vsi		*vsi = &pf->vsi;
469 	struct ixl_rx_queue	*que = vsi->rx_queues;
470         u32			icr0;
471 
472 	// pf->admin_irq++
473 	++que->irqs;
474 
475 // TODO: Check against proper field
476 #if 0
477 	/* Clear PBA at start of ISR if using legacy interrupts */
478 	if (pf->msix == 0)
479 		wr32(hw, I40E_PFINT_DYN_CTL0,
480 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
481 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
482 #endif
483 
484 	icr0 = rd32(hw, I40E_PFINT_ICR0);
485 
486 
487 #ifdef PCI_IOV
488 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
489 		iflib_iov_intr_deferred(vsi->ctx);
490 #endif
491 
492 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
493 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
494 		iflib_admin_intr_deferred(vsi->ctx);
495 
496 	// TODO: Is intr0 enabled somewhere else?
497 	ixl_enable_intr0(hw);
498 
499 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
500 		return (FILTER_SCHEDULE_THREAD);
501 	else
502 		return (FILTER_HANDLED);
503 }
504 
505 
506 /*********************************************************************
507  *
508  *  MSIX VSI Interrupt Service routine
509  *
510  **********************************************************************/
511 int
512 ixl_msix_que(void *arg)
513 {
514 	struct ixl_rx_queue *rx_que = arg;
515 
516 	++rx_que->irqs;
517 
518 	ixl_set_queue_rx_itr(rx_que);
519 	// ixl_set_queue_tx_itr(que);
520 
521 	return (FILTER_SCHEDULE_THREAD);
522 }
523 
524 
525 /*********************************************************************
526  *
527  *  MSIX Admin Queue Interrupt Service routine
528  *
529  **********************************************************************/
530 int
531 ixl_msix_adminq(void *arg)
532 {
533 	struct ixl_pf	*pf = arg;
534 	struct i40e_hw	*hw = &pf->hw;
535 	device_t	dev = pf->dev;
536 	u32		reg, mask, rstat_reg;
537 	bool		do_task = FALSE;
538 
539 	DDPRINTF(dev, "begin");
540 
541 	++pf->admin_irq;
542 
543 	reg = rd32(hw, I40E_PFINT_ICR0);
544 	/*
545 	 * For masking off interrupt causes that need to be handled before
546 	 * they can be re-enabled
547 	 */
548 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
549 
550 	/* Check on the cause */
551 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
552 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
553 		do_task = TRUE;
554 	}
555 
556 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
557 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
558 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
559 		do_task = TRUE;
560 	}
561 
562 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
563 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
564 		device_printf(dev, "Reset Requested!\n");
565 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
566 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
567 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
568 		device_printf(dev, "Reset type: ");
569 		switch (rstat_reg) {
570 		/* These others might be handled similarly to an EMPR reset */
571 		case I40E_RESET_CORER:
572 			printf("CORER\n");
573 			break;
574 		case I40E_RESET_GLOBR:
575 			printf("GLOBR\n");
576 			break;
577 		case I40E_RESET_EMPR:
578 			printf("EMPR\n");
579 			break;
580 		default:
581 			printf("POR\n");
582 			break;
583 		}
584 		/* overload admin queue task to check reset progress */
585 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
586 		do_task = TRUE;
587 	}
588 
589 	/*
590 	 * PE / PCI / ECC exceptions are all handled in the same way:
591 	 * mask out these three causes, then request a PF reset
592 	 *
593 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
594 	 */
595 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
596  		device_printf(dev, "ECC Error detected!\n");
597 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
598 		device_printf(dev, "PCI Exception detected!\n");
599 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
600 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
601 	/* Checks against the conditions above */
602 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
603 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
604 		atomic_set_32(&pf->state,
605 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
606 		do_task = TRUE;
607 	}
608 
609 	// TODO: Linux driver never re-enables this interrupt once it has been detected
610 	// Then what is supposed to happen? A PF reset? Should it never happen?
611 	// TODO: Parse out this error into something human readable
612 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
613 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
614 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
615 			device_printf(dev, "HMC Error detected!\n");
616 			device_printf(dev, "INFO 0x%08x\n", reg);
617 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
618 			device_printf(dev, "DATA 0x%08x\n", reg);
619 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
620 		}
621 	}
622 
623 #ifdef PCI_IOV
624 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
625 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
626 		iflib_iov_intr_deferred(pf->vsi.ctx);
627 	}
628 #endif
629 
630 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
631 	ixl_enable_intr0(hw);
632 
633 	if (do_task)
634 		return (FILTER_SCHEDULE_THREAD);
635 	else
636 		return (FILTER_HANDLED);
637 }
638 
639 /*********************************************************************
640  * 	Filter Routines
641  *
642  *	Routines for multicast and vlan filter management.
643  *
644  *********************************************************************/
645 void
646 ixl_add_multi(struct ixl_vsi *vsi)
647 {
648 	struct	ifmultiaddr	*ifma;
649 	struct ifnet		*ifp = vsi->ifp;
650 	struct i40e_hw		*hw = vsi->hw;
651 	int			mcnt = 0, flags;
652 
653 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
654 
655 	if_maddr_rlock(ifp);
656 	/*
657 	** First just get a count, to decide if we
658 	** we simply use multicast promiscuous.
659 	*/
660 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
661 		if (ifma->ifma_addr->sa_family != AF_LINK)
662 			continue;
663 		mcnt++;
664 	}
665 	if_maddr_runlock(ifp);
666 
667 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
668 		/* delete existing MC filters */
669 		ixl_del_hw_filters(vsi, mcnt);
670 		i40e_aq_set_vsi_multicast_promiscuous(hw,
671 		    vsi->seid, TRUE, NULL);
672 		return;
673 	}
674 
675 	mcnt = 0;
676 	if_maddr_rlock(ifp);
677 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
678 		if (ifma->ifma_addr->sa_family != AF_LINK)
679 			continue;
680 		ixl_add_mc_filter(vsi,
681 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
682 		mcnt++;
683 	}
684 	if_maddr_runlock(ifp);
685 	if (mcnt > 0) {
686 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
687 		ixl_add_hw_filters(vsi, flags, mcnt);
688 	}
689 
690 	IOCTL_DEBUGOUT("ixl_add_multi: end");
691 }
692 
693 int
694 ixl_del_multi(struct ixl_vsi *vsi)
695 {
696 	struct ifnet		*ifp = vsi->ifp;
697 	struct ifmultiaddr	*ifma;
698 	struct ixl_mac_filter	*f;
699 	int			mcnt = 0;
700 	bool		match = FALSE;
701 
702 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
703 
704 	/* Search for removed multicast addresses */
705 	if_maddr_rlock(ifp);
706 	SLIST_FOREACH(f, &vsi->ftl, next) {
707 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
708 			match = FALSE;
709 			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
710 				if (ifma->ifma_addr->sa_family != AF_LINK)
711 					continue;
712 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
713 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
714 					match = TRUE;
715 					break;
716 				}
717 			}
718 			if (match == FALSE) {
719 				f->flags |= IXL_FILTER_DEL;
720 				mcnt++;
721 			}
722 		}
723 	}
724 	if_maddr_runlock(ifp);
725 
726 	if (mcnt > 0)
727 		ixl_del_hw_filters(vsi, mcnt);
728 
729 	return (mcnt);
730 }
731 
732 void
733 ixl_link_up_msg(struct ixl_pf *pf)
734 {
735 	struct i40e_hw *hw = &pf->hw;
736 	struct ifnet *ifp = pf->vsi.ifp;
737 	char *req_fec_string, *neg_fec_string;
738 	u8 fec_abilities;
739 
740 	fec_abilities = hw->phy.link_info.req_fec_info;
741 	/* If both RS and KR are requested, only show RS */
742 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
743 		req_fec_string = ixl_fec_string[0];
744 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
745 		req_fec_string = ixl_fec_string[1];
746 	else
747 		req_fec_string = ixl_fec_string[2];
748 
749 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
750 		neg_fec_string = ixl_fec_string[0];
751 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
752 		neg_fec_string = ixl_fec_string[1];
753 	else
754 		neg_fec_string = ixl_fec_string[2];
755 
756 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
757 	    ifp->if_xname,
758 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
759 	    req_fec_string, neg_fec_string,
760 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
761 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
762 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
763 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
764 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
765 		ixl_fc_string[1] : ixl_fc_string[0]);
766 }
767 
768 /*
769  * Configure admin queue/misc interrupt cause registers in hardware.
770  */
771 void
772 ixl_configure_intr0_msix(struct ixl_pf *pf)
773 {
774 	struct i40e_hw *hw = &pf->hw;
775 	u32 reg;
776 
777 	/* First set up the adminq - vector 0 */
778 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
779 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
780 
781 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
782 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
783 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
784 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
785 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
786 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
787 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
788 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
789 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
790 
791 	/*
792 	 * 0x7FF is the end of the queue list.
793 	 * This means we won't use MSI-X vector 0 for a queue interrupt
794 	 * in MSIX mode.
795 	 */
796 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
797 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
798 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
799 
800 	wr32(hw, I40E_PFINT_DYN_CTL0,
801 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
802 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
803 
804 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
805 }
806 
807 /*
808  * Configure queue interrupt cause registers in hardware.
809  *
810  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
811  */
812 void
813 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
814 {
815 	struct i40e_hw *hw = &pf->hw;
816 	struct ixl_vsi *vsi = &pf->vsi;
817 	u32		reg;
818 	u16		vector = 1;
819 
820 	// TODO: See if max is really necessary
821 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
822 		/* Make sure interrupt is disabled */
823 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
824 		/* Set linked list head to point to corresponding RX queue
825 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
826 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
827 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
828 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
829 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
830 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
831 
832 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
833 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
834 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
835 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
836 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
837 		wr32(hw, I40E_QINT_RQCTL(i), reg);
838 
839 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
840 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
841 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
842 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
843 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
844 		wr32(hw, I40E_QINT_TQCTL(i), reg);
845 	}
846 }
847 
848 /*
849  * Configure for single interrupt vector operation
850  */
851 void
852 ixl_configure_legacy(struct ixl_pf *pf)
853 {
854 	struct i40e_hw	*hw = &pf->hw;
855 	struct ixl_vsi	*vsi = &pf->vsi;
856 	u32 reg;
857 
858 // TODO: Fix
859 #if 0
860 	/* Configure ITR */
861 	vsi->tx_itr_setting = pf->tx_itr;
862 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
863 	    vsi->tx_itr_setting);
864 	txr->itr = vsi->tx_itr_setting;
865 
866 	vsi->rx_itr_setting = pf->rx_itr;
867 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
868 	    vsi->rx_itr_setting);
869 	rxr->itr = vsi->rx_itr_setting;
870 	/* XXX: Assuming only 1 queue in single interrupt mode */
871 #endif
872 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
873 
874 	/* Setup "other" causes */
875 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
876 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
877 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
878 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
879 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
880 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
881 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
882 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
883 	    ;
884 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
885 
886 	/* No ITR for non-queue interrupts */
887 	wr32(hw, I40E_PFINT_STAT_CTL0,
888 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
889 
890 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
891 	wr32(hw, I40E_PFINT_LNKLST0, 0);
892 
893 	/* Associate the queue pair to the vector and enable the q int */
894 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
895 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
896 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
897 	wr32(hw, I40E_QINT_RQCTL(0), reg);
898 
899 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
900 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
901 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
902 	wr32(hw, I40E_QINT_TQCTL(0), reg);
903 }
904 
905 void
906 ixl_free_pci_resources(struct ixl_pf *pf)
907 {
908 	struct ixl_vsi		*vsi = &pf->vsi;
909 	device_t		dev = iflib_get_dev(vsi->ctx);
910 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
911 
912 	/* We may get here before stations are setup */
913 	if (rx_que == NULL)
914 		goto early;
915 
916 	/*
917 	**  Release all msix VSI resources:
918 	*/
919 	iflib_irq_free(vsi->ctx, &vsi->irq);
920 
921 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
922 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
923 early:
924 	if (pf->pci_mem != NULL)
925 		bus_release_resource(dev, SYS_RES_MEMORY,
926 		    PCIR_BAR(0), pf->pci_mem);
927 }
928 
929 void
930 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
931 {
932 	/* Display supported media types */
933 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
934 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
935 
936 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
937 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
938 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
939 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
940 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
941 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
942 
943 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
944 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
946 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
947 
948 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
949 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
950 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
951 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
952 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
953 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
954 
955 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
956 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
957 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
958 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
959 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
965 
966 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
967 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
968 
969 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
970 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
972 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
973 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
974 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
975 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
976 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
977 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
978 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
979 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
980 
981 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
982 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
983 
984 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
985 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
986 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
987 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
988 
989 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
990 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
991 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
992 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
993 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
994 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
995 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
996 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
997 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
998 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
999 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1000 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1001 }
1002 
1003 /*********************************************************************
1004  *
1005  *  Setup networking device structure and register an interface.
1006  *
1007  **********************************************************************/
1008 int
1009 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1010 {
1011 	struct ixl_vsi *vsi = &pf->vsi;
1012 	if_ctx_t ctx = vsi->ctx;
1013 	struct i40e_hw *hw = &pf->hw;
1014 	struct ifnet *ifp = iflib_get_ifp(ctx);
1015 	struct i40e_aq_get_phy_abilities_resp abilities;
1016 	enum i40e_status_code aq_error = 0;
1017 
1018 	INIT_DBG_DEV(dev, "begin");
1019 
1020 	vsi->shared->isc_max_frame_size =
1021 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1022 	    + ETHER_VLAN_ENCAP_LEN;
1023 
1024 	aq_error = i40e_aq_get_phy_capabilities(hw,
1025 	    FALSE, TRUE, &abilities, NULL);
1026 	/* May need delay to detect fiber correctly */
1027 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1028 		/* TODO: Maybe just retry this in a task... */
1029 		i40e_msec_delay(200);
1030 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1031 		    TRUE, &abilities, NULL);
1032 	}
1033 	if (aq_error) {
1034 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1035 			device_printf(dev, "Unknown PHY type detected!\n");
1036 		else
1037 			device_printf(dev,
1038 			    "Error getting supported media types, err %d,"
1039 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1040 	} else {
1041 		pf->supported_speeds = abilities.link_speed;
1042 #if __FreeBSD_version >= 1100000
1043 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1044 #else
1045 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1046 #endif
1047 
1048 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1049 	}
1050 
1051 	/* Use autoselect media by default */
1052 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1053 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1054 
1055 	return (0);
1056 }
1057 
1058 /*
1059  * Input: bitmap of enum i40e_aq_link_speed
1060  */
1061 u64
1062 ixl_max_aq_speed_to_value(u8 link_speeds)
1063 {
1064 	if (link_speeds & I40E_LINK_SPEED_40GB)
1065 		return IF_Gbps(40);
1066 	if (link_speeds & I40E_LINK_SPEED_25GB)
1067 		return IF_Gbps(25);
1068 	if (link_speeds & I40E_LINK_SPEED_20GB)
1069 		return IF_Gbps(20);
1070 	if (link_speeds & I40E_LINK_SPEED_10GB)
1071 		return IF_Gbps(10);
1072 	if (link_speeds & I40E_LINK_SPEED_1GB)
1073 		return IF_Gbps(1);
1074 	if (link_speeds & I40E_LINK_SPEED_100MB)
1075 		return IF_Mbps(100);
1076 	else
1077 		/* Minimum supported link speed */
1078 		return IF_Mbps(100);
1079 }
1080 
1081 /*
1082 ** Run when the Admin Queue gets a link state change interrupt.
1083 */
1084 void
1085 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1086 {
1087 	struct i40e_hw *hw = &pf->hw;
1088 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1089 	struct i40e_aqc_get_link_status *status =
1090 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1091 
1092 	/* Request link status from adapter */
1093 	hw->phy.get_link_info = TRUE;
1094 	i40e_get_link_status(hw, &pf->link_up);
1095 
1096 	/* Print out message if an unqualified module is found */
1097 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1098 	    (pf->advertised_speed) &&
1099 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1100 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1101 		device_printf(dev, "Link failed because "
1102 		    "an unqualified module was detected!\n");
1103 
1104 	/* OS link info is updated elsewhere */
1105 }
1106 
1107 /*********************************************************************
1108  *
1109  *  Get Firmware Switch configuration
1110  *	- this will need to be more robust when more complex
1111  *	  switch configurations are enabled.
1112  *
1113  **********************************************************************/
1114 int
1115 ixl_switch_config(struct ixl_pf *pf)
1116 {
1117 	struct i40e_hw	*hw = &pf->hw;
1118 	struct ixl_vsi	*vsi = &pf->vsi;
1119 	device_t 	dev = iflib_get_dev(vsi->ctx);
1120 	struct i40e_aqc_get_switch_config_resp *sw_config;
1121 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1122 	int	ret;
1123 	u16	next = 0;
1124 
1125 	memset(&aq_buf, 0, sizeof(aq_buf));
1126 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1127 	ret = i40e_aq_get_switch_config(hw, sw_config,
1128 	    sizeof(aq_buf), &next, NULL);
1129 	if (ret) {
1130 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1131 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1132 		return (ret);
1133 	}
1134 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1135 		device_printf(dev,
1136 		    "Switch config: header reported: %d in structure, %d total\n",
1137 		    sw_config->header.num_reported, sw_config->header.num_total);
1138 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1139 			device_printf(dev,
1140 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1141 			    sw_config->element[i].element_type,
1142 			    sw_config->element[i].seid,
1143 			    sw_config->element[i].uplink_seid,
1144 			    sw_config->element[i].downlink_seid);
1145 		}
1146 	}
1147 	/* Simplified due to a single VSI */
1148 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1149 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1150 	vsi->seid = sw_config->element[0].seid;
1151 	return (ret);
1152 }
1153 
1154 /*********************************************************************
1155  *
1156  *  Initialize the VSI:  this handles contexts, which means things
1157  *  			 like the number of descriptors, buffer size,
1158  *			 plus we init the rings thru this function.
1159  *
1160  **********************************************************************/
1161 int
1162 ixl_initialize_vsi(struct ixl_vsi *vsi)
1163 {
1164 	struct ixl_pf *pf = vsi->back;
1165 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1166 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1167 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1168 	device_t		dev = iflib_get_dev(vsi->ctx);
1169 	struct i40e_hw		*hw = vsi->hw;
1170 	struct i40e_vsi_context	ctxt;
1171 	int 			tc_queues;
1172 	int			err = 0;
1173 
1174 	memset(&ctxt, 0, sizeof(ctxt));
1175 	ctxt.seid = vsi->seid;
1176 	if (pf->veb_seid != 0)
1177 		ctxt.uplink_seid = pf->veb_seid;
1178 	ctxt.pf_num = hw->pf_id;
1179 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1180 	if (err) {
1181 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1182 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1183 		return (err);
1184 	}
1185 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1186 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1187 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1188 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1189 	    ctxt.uplink_seid, ctxt.vsi_number,
1190 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1191 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1192 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1193 	/*
1194 	** Set the queue and traffic class bits
1195 	**  - when multiple traffic classes are supported
1196 	**    this will need to be more robust.
1197 	*/
1198 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1199 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1200 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1201 	ctxt.info.queue_mapping[0] = 0;
1202 	/*
1203 	 * This VSI will only use traffic class 0; start traffic class 0's
1204 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1205 	 * the driver may not use all of them).
1206 	 */
1207 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1208 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1209 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1210 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1211 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1212 
1213 	/* Set VLAN receive stripping mode */
1214 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1215 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1216 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1217 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1218 	else
1219 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1220 
1221 #ifdef IXL_IW
1222 	/* Set TCP Enable for iWARP capable VSI */
1223 	if (ixl_enable_iwarp && pf->iw_enabled) {
1224 		ctxt.info.valid_sections |=
1225 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1226 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1227 	}
1228 #endif
1229 	/* Save VSI number and info for use later */
1230 	vsi->vsi_num = ctxt.vsi_number;
1231 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1232 
1233 	/* Reset VSI statistics */
1234 	ixl_vsi_reset_stats(vsi);
1235 	vsi->hw_filters_add = 0;
1236 	vsi->hw_filters_del = 0;
1237 
1238 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1239 
1240 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1241 	if (err) {
1242 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1243 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1244 		return (err);
1245 	}
1246 
1247 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1248 		struct tx_ring		*txr = &tx_que->txr;
1249 		struct i40e_hmc_obj_txq tctx;
1250 		u32			txctl;
1251 
1252 		/* Setup the HMC TX Context  */
1253 		bzero(&tctx, sizeof(tctx));
1254 		tctx.new_context = 1;
1255 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1256 		tctx.qlen = scctx->isc_ntxd[0];
1257 		tctx.fc_ena = 0;	/* Disable FCoE */
1258 		/*
1259 		 * This value needs to pulled from the VSI that this queue
1260 		 * is assigned to. Index into array is traffic class.
1261 		 */
1262 		tctx.rdylist = vsi->info.qs_handle[0];
1263 		/*
1264 		 * Set these to enable Head Writeback
1265 		 * - Address is last entry in TX ring (reserved for HWB index)
1266 		 * Leave these as 0 for Descriptor Writeback
1267 		 */
1268 		if (vsi->enable_head_writeback) {
1269 			tctx.head_wb_ena = 1;
1270 			tctx.head_wb_addr = txr->tx_paddr +
1271 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1272 		} else {
1273 			tctx.head_wb_ena = 0;
1274 			tctx.head_wb_addr = 0;
1275 		}
1276 		tctx.rdylist_act = 0;
1277 		err = i40e_clear_lan_tx_queue_context(hw, i);
1278 		if (err) {
1279 			device_printf(dev, "Unable to clear TX context\n");
1280 			break;
1281 		}
1282 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1283 		if (err) {
1284 			device_printf(dev, "Unable to set TX context\n");
1285 			break;
1286 		}
1287 		/* Associate the ring with this PF */
1288 		txctl = I40E_QTX_CTL_PF_QUEUE;
1289 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1290 		    I40E_QTX_CTL_PF_INDX_MASK);
1291 		wr32(hw, I40E_QTX_CTL(i), txctl);
1292 		ixl_flush(hw);
1293 
1294 		/* Do ring (re)init */
1295 		ixl_init_tx_ring(vsi, tx_que);
1296 	}
1297 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1298 		struct rx_ring 		*rxr = &rx_que->rxr;
1299 		struct i40e_hmc_obj_rxq rctx;
1300 
1301 		/* Next setup the HMC RX Context  */
1302 		if (scctx->isc_max_frame_size <= MCLBYTES)
1303 			rxr->mbuf_sz = MCLBYTES;
1304 		else
1305 			rxr->mbuf_sz = MJUMPAGESIZE;
1306 
1307 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1308 
1309 		/* Set up an RX context for the HMC */
1310 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1311 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1312 		/* ignore header split for now */
1313 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1314 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1315 		    scctx->isc_max_frame_size : max_rxmax;
1316 		rctx.dtype = 0;
1317 		rctx.dsize = 1;		/* do 32byte descriptors */
1318 		rctx.hsplit_0 = 0;	/* no header split */
1319 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1320 		rctx.qlen = scctx->isc_nrxd[0];
1321 		rctx.tphrdesc_ena = 1;
1322 		rctx.tphwdesc_ena = 1;
1323 		rctx.tphdata_ena = 0;	/* Header Split related */
1324 		rctx.tphhead_ena = 0;	/* Header Split related */
1325 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1326 		rctx.crcstrip = 1;
1327 		rctx.l2tsel = 1;
1328 		rctx.showiv = 1;	/* Strip inner VLAN header */
1329 		rctx.fc_ena = 0;	/* Disable FCoE */
1330 		rctx.prefena = 1;	/* Prefetch descriptors */
1331 
1332 		err = i40e_clear_lan_rx_queue_context(hw, i);
1333 		if (err) {
1334 			device_printf(dev,
1335 			    "Unable to clear RX context %d\n", i);
1336 			break;
1337 		}
1338 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1339 		if (err) {
1340 			device_printf(dev, "Unable to set RX context %d\n", i);
1341 			break;
1342 		}
1343 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1344 	}
1345 	return (err);
1346 }
1347 
1348 void
1349 ixl_free_mac_filters(struct ixl_vsi *vsi)
1350 {
1351 	struct ixl_mac_filter *f;
1352 
1353 	while (!SLIST_EMPTY(&vsi->ftl)) {
1354 		f = SLIST_FIRST(&vsi->ftl);
1355 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1356 		free(f, M_DEVBUF);
1357 	}
1358 }
1359 
1360 /*
1361 ** Provide a update to the queue RX
1362 ** interrupt moderation value.
1363 */
1364 void
1365 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1366 {
1367 	struct ixl_vsi	*vsi = que->vsi;
1368 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1369 	struct i40e_hw	*hw = vsi->hw;
1370 	struct rx_ring	*rxr = &que->rxr;
1371 	u16		rx_itr;
1372 	u16		rx_latency = 0;
1373 	int		rx_bytes;
1374 
1375 	/* Idle, do nothing */
1376 	if (rxr->bytes == 0)
1377 		return;
1378 
1379 	if (pf->dynamic_rx_itr) {
1380 		rx_bytes = rxr->bytes/rxr->itr;
1381 		rx_itr = rxr->itr;
1382 
1383 		/* Adjust latency range */
1384 		switch (rxr->latency) {
1385 		case IXL_LOW_LATENCY:
1386 			if (rx_bytes > 10) {
1387 				rx_latency = IXL_AVE_LATENCY;
1388 				rx_itr = IXL_ITR_20K;
1389 			}
1390 			break;
1391 		case IXL_AVE_LATENCY:
1392 			if (rx_bytes > 20) {
1393 				rx_latency = IXL_BULK_LATENCY;
1394 				rx_itr = IXL_ITR_8K;
1395 			} else if (rx_bytes <= 10) {
1396 				rx_latency = IXL_LOW_LATENCY;
1397 				rx_itr = IXL_ITR_100K;
1398 			}
1399 			break;
1400 		case IXL_BULK_LATENCY:
1401 			if (rx_bytes <= 20) {
1402 				rx_latency = IXL_AVE_LATENCY;
1403 				rx_itr = IXL_ITR_20K;
1404 			}
1405 			break;
1406        		 }
1407 
1408 		rxr->latency = rx_latency;
1409 
1410 		if (rx_itr != rxr->itr) {
1411 			/* do an exponential smoothing */
1412 			rx_itr = (10 * rx_itr * rxr->itr) /
1413 			    ((9 * rx_itr) + rxr->itr);
1414 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1415 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1416 			    rxr->me), rxr->itr);
1417 		}
1418 	} else { /* We may have have toggled to non-dynamic */
1419 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1420 			vsi->rx_itr_setting = pf->rx_itr;
1421 		/* Update the hardware if needed */
1422 		if (rxr->itr != vsi->rx_itr_setting) {
1423 			rxr->itr = vsi->rx_itr_setting;
1424 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1425 			    rxr->me), rxr->itr);
1426 		}
1427 	}
1428 	rxr->bytes = 0;
1429 	rxr->packets = 0;
1430 }
1431 
1432 
1433 /*
1434 ** Provide a update to the queue TX
1435 ** interrupt moderation value.
1436 */
1437 void
1438 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1439 {
1440 	struct ixl_vsi	*vsi = que->vsi;
1441 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1442 	struct i40e_hw	*hw = vsi->hw;
1443 	struct tx_ring	*txr = &que->txr;
1444 	u16		tx_itr;
1445 	u16		tx_latency = 0;
1446 	int		tx_bytes;
1447 
1448 
1449 	/* Idle, do nothing */
1450 	if (txr->bytes == 0)
1451 		return;
1452 
1453 	if (pf->dynamic_tx_itr) {
1454 		tx_bytes = txr->bytes/txr->itr;
1455 		tx_itr = txr->itr;
1456 
1457 		switch (txr->latency) {
1458 		case IXL_LOW_LATENCY:
1459 			if (tx_bytes > 10) {
1460 				tx_latency = IXL_AVE_LATENCY;
1461 				tx_itr = IXL_ITR_20K;
1462 			}
1463 			break;
1464 		case IXL_AVE_LATENCY:
1465 			if (tx_bytes > 20) {
1466 				tx_latency = IXL_BULK_LATENCY;
1467 				tx_itr = IXL_ITR_8K;
1468 			} else if (tx_bytes <= 10) {
1469 				tx_latency = IXL_LOW_LATENCY;
1470 				tx_itr = IXL_ITR_100K;
1471 			}
1472 			break;
1473 		case IXL_BULK_LATENCY:
1474 			if (tx_bytes <= 20) {
1475 				tx_latency = IXL_AVE_LATENCY;
1476 				tx_itr = IXL_ITR_20K;
1477 			}
1478 			break;
1479 		}
1480 
1481 		txr->latency = tx_latency;
1482 
1483 		if (tx_itr != txr->itr) {
1484        	         /* do an exponential smoothing */
1485 			tx_itr = (10 * tx_itr * txr->itr) /
1486 			    ((9 * tx_itr) + txr->itr);
1487 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1488 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1489 			    txr->me), txr->itr);
1490 		}
1491 
1492 	} else { /* We may have have toggled to non-dynamic */
1493 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1494 			vsi->tx_itr_setting = pf->tx_itr;
1495 		/* Update the hardware if needed */
1496 		if (txr->itr != vsi->tx_itr_setting) {
1497 			txr->itr = vsi->tx_itr_setting;
1498 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1499 			    txr->me), txr->itr);
1500 		}
1501 	}
1502 	txr->bytes = 0;
1503 	txr->packets = 0;
1504 	return;
1505 }
1506 
1507 #ifdef IXL_DEBUG
1508 /**
1509  * ixl_sysctl_qtx_tail_handler
1510  * Retrieves I40E_QTX_TAIL value from hardware
1511  * for a sysctl.
1512  */
1513 int
1514 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1515 {
1516 	struct ixl_tx_queue *tx_que;
1517 	int error;
1518 	u32 val;
1519 
1520 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1521 	if (!tx_que) return 0;
1522 
1523 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1524 	error = sysctl_handle_int(oidp, &val, 0, req);
1525 	if (error || !req->newptr)
1526 		return error;
1527 	return (0);
1528 }
1529 
1530 /**
1531  * ixl_sysctl_qrx_tail_handler
1532  * Retrieves I40E_QRX_TAIL value from hardware
1533  * for a sysctl.
1534  */
1535 int
1536 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1537 {
1538 	struct ixl_rx_queue *rx_que;
1539 	int error;
1540 	u32 val;
1541 
1542 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1543 	if (!rx_que) return 0;
1544 
1545 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1546 	error = sysctl_handle_int(oidp, &val, 0, req);
1547 	if (error || !req->newptr)
1548 		return error;
1549 	return (0);
1550 }
1551 #endif
1552 
1553 /*
1554  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1555  * Writes to the ITR registers immediately.
1556  */
1557 static int
1558 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1559 {
1560 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1561 	device_t dev = pf->dev;
1562 	int error = 0;
1563 	int requested_tx_itr;
1564 
1565 	requested_tx_itr = pf->tx_itr;
1566 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1567 	if ((error) || (req->newptr == NULL))
1568 		return (error);
1569 	if (pf->dynamic_tx_itr) {
1570 		device_printf(dev,
1571 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1572 		    return (EINVAL);
1573 	}
1574 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1575 		device_printf(dev,
1576 		    "Invalid TX itr value; value must be between 0 and %d\n",
1577 		        IXL_MAX_ITR);
1578 		return (EINVAL);
1579 	}
1580 
1581 	pf->tx_itr = requested_tx_itr;
1582 	ixl_configure_tx_itr(pf);
1583 
1584 	return (error);
1585 }
1586 
1587 /*
1588  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1589  * Writes to the ITR registers immediately.
1590  */
1591 static int
1592 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1593 {
1594 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1595 	device_t dev = pf->dev;
1596 	int error = 0;
1597 	int requested_rx_itr;
1598 
1599 	requested_rx_itr = pf->rx_itr;
1600 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1601 	if ((error) || (req->newptr == NULL))
1602 		return (error);
1603 	if (pf->dynamic_rx_itr) {
1604 		device_printf(dev,
1605 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1606 		    return (EINVAL);
1607 	}
1608 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1609 		device_printf(dev,
1610 		    "Invalid RX itr value; value must be between 0 and %d\n",
1611 		        IXL_MAX_ITR);
1612 		return (EINVAL);
1613 	}
1614 
1615 	pf->rx_itr = requested_rx_itr;
1616 	ixl_configure_rx_itr(pf);
1617 
1618 	return (error);
1619 }
1620 
1621 void
1622 ixl_add_hw_stats(struct ixl_pf *pf)
1623 {
1624 	struct ixl_vsi *vsi = &pf->vsi;
1625 	device_t dev = iflib_get_dev(vsi->ctx);
1626 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1627 
1628 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1629 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1630 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1631 
1632 	/* Driver statistics */
1633 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1634 			CTLFLAG_RD, &pf->admin_irq,
1635 			"Admin Queue IRQs received");
1636 
1637 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1638 
1639 	ixl_add_queues_sysctls(dev, vsi);
1640 
1641 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1642 }
1643 
1644 void
1645 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1646 	struct sysctl_oid_list *child,
1647 	struct i40e_hw_port_stats *stats)
1648 {
1649 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1650 				    CTLFLAG_RD, NULL, "Mac Statistics");
1651 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1652 
1653 	struct i40e_eth_stats *eth_stats = &stats->eth;
1654 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1655 
1656 	struct ixl_sysctl_info ctls[] =
1657 	{
1658 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1659 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1660 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1661 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1662 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1663 		/* Packet Reception Stats */
1664 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1665 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1666 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1667 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1668 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1669 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1670 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1671 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1672 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1673 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1674 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1675 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1676 		/* Packet Transmission Stats */
1677 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1678 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1679 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1680 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1681 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1682 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1683 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1684 		/* Flow control */
1685 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1686 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1687 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1688 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1689 		/* End */
1690 		{0,0,0}
1691 	};
1692 
1693 	struct ixl_sysctl_info *entry = ctls;
1694 	while (entry->stat != 0)
1695 	{
1696 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1697 				CTLFLAG_RD, entry->stat,
1698 				entry->description);
1699 		entry++;
1700 	}
1701 }
1702 
1703 void
1704 ixl_set_rss_key(struct ixl_pf *pf)
1705 {
1706 	struct i40e_hw *hw = &pf->hw;
1707 	struct ixl_vsi *vsi = &pf->vsi;
1708 	device_t	dev = pf->dev;
1709 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1710 	enum i40e_status_code status;
1711 
1712 #ifdef RSS
1713         /* Fetch the configured RSS key */
1714         rss_getkey((uint8_t *) &rss_seed);
1715 #else
1716 	ixl_get_default_rss_key(rss_seed);
1717 #endif
1718 	/* Fill out hash function seed */
1719 	if (hw->mac.type == I40E_MAC_X722) {
1720 		struct i40e_aqc_get_set_rss_key_data key_data;
1721 		bcopy(rss_seed, &key_data, 52);
1722 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1723 		if (status)
1724 			device_printf(dev,
1725 			    "i40e_aq_set_rss_key status %s, error %s\n",
1726 			    i40e_stat_str(hw, status),
1727 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1728 	} else {
1729 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1730 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1731 	}
1732 }
1733 
1734 /*
1735  * Configure enabled PCTYPES for RSS.
1736  */
1737 void
1738 ixl_set_rss_pctypes(struct ixl_pf *pf)
1739 {
1740 	struct i40e_hw *hw = &pf->hw;
1741 	u64		set_hena = 0, hena;
1742 
1743 #ifdef RSS
1744 	u32		rss_hash_config;
1745 
1746 	rss_hash_config = rss_gethashconfig();
1747 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1748                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1749 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1750                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1751 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1752                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1753 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1754                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1755 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1756 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1757 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1758                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1759         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1760                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1761 #else
1762 	if (hw->mac.type == I40E_MAC_X722)
1763 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1764 	else
1765 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1766 #endif
1767 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1768 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1769 	hena |= set_hena;
1770 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1771 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1772 
1773 }
1774 
1775 void
1776 ixl_set_rss_hlut(struct ixl_pf *pf)
1777 {
1778 	struct i40e_hw	*hw = &pf->hw;
1779 	struct ixl_vsi *vsi = &pf->vsi;
1780 	device_t	dev = iflib_get_dev(vsi->ctx);
1781 	int		i, que_id;
1782 	int		lut_entry_width;
1783 	u32		lut = 0;
1784 	enum i40e_status_code status;
1785 
1786 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1787 
1788 	/* Populate the LUT with max no. of queues in round robin fashion */
1789 	u8 hlut_buf[512];
1790 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1791 #ifdef RSS
1792 		/*
1793 		 * Fetch the RSS bucket id for the given indirection entry.
1794 		 * Cap it at the number of configured buckets (which is
1795 		 * num_queues.)
1796 		 */
1797 		que_id = rss_get_indirection_to_bucket(i);
1798 		que_id = que_id % vsi->num_rx_queues;
1799 #else
1800 		que_id = i % vsi->num_rx_queues;
1801 #endif
1802 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1803 		hlut_buf[i] = lut;
1804 	}
1805 
1806 	if (hw->mac.type == I40E_MAC_X722) {
1807 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1808 		if (status)
1809 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1810 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1811 	} else {
1812 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1813 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1814 		ixl_flush(hw);
1815 	}
1816 }
1817 
1818 /*
1819 ** Setup the PF's RSS parameters.
1820 */
1821 void
1822 ixl_config_rss(struct ixl_pf *pf)
1823 {
1824 	ixl_set_rss_key(pf);
1825 	ixl_set_rss_pctypes(pf);
1826 	ixl_set_rss_hlut(pf);
1827 }
1828 
1829 /*
1830 ** This routine updates vlan filters, called by init
1831 ** it scans the filter table and then updates the hw
1832 ** after a soft reset.
1833 */
1834 void
1835 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1836 {
1837 	struct ixl_mac_filter	*f;
1838 	int			cnt = 0, flags;
1839 
1840 	if (vsi->num_vlans == 0)
1841 		return;
1842 	/*
1843 	** Scan the filter list for vlan entries,
1844 	** mark them for addition and then call
1845 	** for the AQ update.
1846 	*/
1847 	SLIST_FOREACH(f, &vsi->ftl, next) {
1848 		if (f->flags & IXL_FILTER_VLAN) {
1849 			f->flags |=
1850 			    (IXL_FILTER_ADD |
1851 			    IXL_FILTER_USED);
1852 			cnt++;
1853 		}
1854 	}
1855 	if (cnt == 0) {
1856 		printf("setup vlan: no filters found!\n");
1857 		return;
1858 	}
1859 	flags = IXL_FILTER_VLAN;
1860 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1861 	ixl_add_hw_filters(vsi, flags, cnt);
1862 }
1863 
1864 /*
1865  * In some firmware versions there is default MAC/VLAN filter
1866  * configured which interferes with filters managed by driver.
1867  * Make sure it's removed.
1868  */
1869 void
1870 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1871 {
1872 	struct i40e_aqc_remove_macvlan_element_data e;
1873 
1874 	bzero(&e, sizeof(e));
1875 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1876 	e.vlan_tag = 0;
1877 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1878 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1879 
1880 	bzero(&e, sizeof(e));
1881 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1882 	e.vlan_tag = 0;
1883 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1884 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1885 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1886 }
1887 
1888 /*
1889 ** Initialize filter list and add filters that the hardware
1890 ** needs to know about.
1891 **
1892 ** Requires VSI's filter list & seid to be set before calling.
1893 */
1894 void
1895 ixl_init_filters(struct ixl_vsi *vsi)
1896 {
1897 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1898 
1899 	/* Initialize mac filter list for VSI */
1900 	SLIST_INIT(&vsi->ftl);
1901 
1902 	/* Receive broadcast Ethernet frames */
1903 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1904 
1905 	ixl_del_default_hw_filters(vsi);
1906 
1907 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1908 	/*
1909 	 * Prevent Tx flow control frames from being sent out by
1910 	 * non-firmware transmitters.
1911 	 * This affects every VSI in the PF.
1912 	 */
1913 	if (pf->enable_tx_fc_filter)
1914 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1915 }
1916 
1917 /*
1918 ** This routine adds mulicast filters
1919 */
1920 void
1921 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1922 {
1923 	struct ixl_mac_filter *f;
1924 
1925 	/* Does one already exist */
1926 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1927 	if (f != NULL)
1928 		return;
1929 
1930 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1931 	if (f != NULL)
1932 		f->flags |= IXL_FILTER_MC;
1933 	else
1934 		printf("WARNING: no filter available!!\n");
1935 }
1936 
1937 void
1938 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1939 {
1940 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1941 }
1942 
1943 /*
1944  * This routine adds a MAC/VLAN filter to the software filter
1945  * list, then adds that new filter to the HW if it doesn't already
1946  * exist in the SW filter list.
1947  */
1948 void
1949 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1950 {
1951 	struct ixl_mac_filter	*f, *tmp;
1952 	struct ixl_pf		*pf;
1953 	device_t		dev;
1954 
1955 	DEBUGOUT("ixl_add_filter: begin");
1956 
1957 	pf = vsi->back;
1958 	dev = pf->dev;
1959 
1960 	/* Does one already exist */
1961 	f = ixl_find_filter(vsi, macaddr, vlan);
1962 	if (f != NULL)
1963 		return;
1964 	/*
1965 	** Is this the first vlan being registered, if so we
1966 	** need to remove the ANY filter that indicates we are
1967 	** not in a vlan, and replace that with a 0 filter.
1968 	*/
1969 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1970 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1971 		if (tmp != NULL) {
1972 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1973 			ixl_add_filter(vsi, macaddr, 0);
1974 		}
1975 	}
1976 
1977 	f = ixl_new_filter(vsi, macaddr, vlan);
1978 	if (f == NULL) {
1979 		device_printf(dev, "WARNING: no filter available!!\n");
1980 		return;
1981 	}
1982 	if (f->vlan != IXL_VLAN_ANY)
1983 		f->flags |= IXL_FILTER_VLAN;
1984 	else
1985 		vsi->num_macs++;
1986 
1987 	f->flags |= IXL_FILTER_USED;
1988 	ixl_add_hw_filters(vsi, f->flags, 1);
1989 }
1990 
1991 void
1992 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1993 {
1994 	struct ixl_mac_filter *f;
1995 
1996 	f = ixl_find_filter(vsi, macaddr, vlan);
1997 	if (f == NULL)
1998 		return;
1999 
2000 	f->flags |= IXL_FILTER_DEL;
2001 	ixl_del_hw_filters(vsi, 1);
2002 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2003 		vsi->num_macs--;
2004 
2005 	/* Check if this is the last vlan removal */
2006 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2007 		/* Switch back to a non-vlan filter */
2008 		ixl_del_filter(vsi, macaddr, 0);
2009 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2010 	}
2011 	return;
2012 }
2013 
2014 /*
2015 ** Find the filter with both matching mac addr and vlan id
2016 */
2017 struct ixl_mac_filter *
2018 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2019 {
2020 	struct ixl_mac_filter	*f;
2021 
2022 	SLIST_FOREACH(f, &vsi->ftl, next) {
2023 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2024 		    && (f->vlan == vlan)) {
2025 			return (f);
2026 		}
2027 	}
2028 
2029 	return (NULL);
2030 }
2031 
2032 /*
2033 ** This routine takes additions to the vsi filter
2034 ** table and creates an Admin Queue call to create
2035 ** the filters in the hardware.
2036 */
2037 void
2038 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2039 {
2040 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2041 	struct ixl_mac_filter	*f;
2042 	struct ixl_pf		*pf;
2043 	struct i40e_hw		*hw;
2044 	device_t		dev;
2045 	enum i40e_status_code	status;
2046 	int			j = 0;
2047 
2048 	pf = vsi->back;
2049 	dev = vsi->dev;
2050 	hw = &pf->hw;
2051 
2052 	if (cnt < 1) {
2053 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2054 		return;
2055 	}
2056 
2057 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2058 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2059 	if (a == NULL) {
2060 		device_printf(dev, "add_hw_filters failed to get memory\n");
2061 		return;
2062 	}
2063 
2064 	/*
2065 	** Scan the filter list, each time we find one
2066 	** we add it to the admin queue array and turn off
2067 	** the add bit.
2068 	*/
2069 	SLIST_FOREACH(f, &vsi->ftl, next) {
2070 		if ((f->flags & flags) == flags) {
2071 			b = &a[j]; // a pox on fvl long names :)
2072 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2073 			if (f->vlan == IXL_VLAN_ANY) {
2074 				b->vlan_tag = 0;
2075 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2076 			} else {
2077 				b->vlan_tag = f->vlan;
2078 				b->flags = 0;
2079 			}
2080 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2081 			f->flags &= ~IXL_FILTER_ADD;
2082 			j++;
2083 
2084 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2085 			    MAC_FORMAT_ARGS(f->macaddr));
2086 		}
2087 		if (j == cnt)
2088 			break;
2089 	}
2090 	if (j > 0) {
2091 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2092 		if (status)
2093 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2094 			    "error %s\n", i40e_stat_str(hw, status),
2095 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2096 		else
2097 			vsi->hw_filters_add += j;
2098 	}
2099 	free(a, M_DEVBUF);
2100 	return;
2101 }
2102 
2103 /*
2104 ** This routine takes removals in the vsi filter
2105 ** table and creates an Admin Queue call to delete
2106 ** the filters in the hardware.
2107 */
2108 void
2109 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2110 {
2111 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2112 	struct ixl_pf		*pf;
2113 	struct i40e_hw		*hw;
2114 	device_t		dev;
2115 	struct ixl_mac_filter	*f, *f_temp;
2116 	enum i40e_status_code	status;
2117 	int			j = 0;
2118 
2119 	pf = vsi->back;
2120 	hw = &pf->hw;
2121 	dev = vsi->dev;
2122 
2123 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2124 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2125 	if (d == NULL) {
2126 		device_printf(dev, "%s: failed to get memory\n", __func__);
2127 		return;
2128 	}
2129 
2130 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2131 		if (f->flags & IXL_FILTER_DEL) {
2132 			e = &d[j]; // a pox on fvl long names :)
2133 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2134 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2135 			if (f->vlan == IXL_VLAN_ANY) {
2136 				e->vlan_tag = 0;
2137 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2138 			} else {
2139 				e->vlan_tag = f->vlan;
2140 			}
2141 
2142 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2143 			    MAC_FORMAT_ARGS(f->macaddr));
2144 
2145 			/* delete entry from vsi list */
2146 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2147 			free(f, M_DEVBUF);
2148 			j++;
2149 		}
2150 		if (j == cnt)
2151 			break;
2152 	}
2153 	if (j > 0) {
2154 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2155 		if (status) {
2156 			int sc = 0;
2157 			for (int i = 0; i < j; i++)
2158 				sc += (!d[i].error_code);
2159 			vsi->hw_filters_del += sc;
2160 			device_printf(dev,
2161 			    "Failed to remove %d/%d filters, error %s\n",
2162 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2163 		} else
2164 			vsi->hw_filters_del += j;
2165 	}
2166 	free(d, M_DEVBUF);
2167 	return;
2168 }
2169 
2170 int
2171 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2172 {
2173 	struct i40e_hw	*hw = &pf->hw;
2174 	int		error = 0;
2175 	u32		reg;
2176 	u16		pf_qidx;
2177 
2178 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2179 
2180 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2181 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2182 	    pf_qidx, vsi_qidx);
2183 
2184 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2185 
2186 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2187 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2188 	    I40E_QTX_ENA_QENA_STAT_MASK;
2189 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2190 	/* Verify the enable took */
2191 	for (int j = 0; j < 10; j++) {
2192 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2193 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2194 			break;
2195 		i40e_usec_delay(10);
2196 	}
2197 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2198 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2199 		    pf_qidx);
2200 		error = ETIMEDOUT;
2201 	}
2202 
2203 	return (error);
2204 }
2205 
2206 int
2207 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2208 {
2209 	struct i40e_hw	*hw = &pf->hw;
2210 	int		error = 0;
2211 	u32		reg;
2212 	u16		pf_qidx;
2213 
2214 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2215 
2216 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2217 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2218 	    pf_qidx, vsi_qidx);
2219 
2220 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2221 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2222 	    I40E_QRX_ENA_QENA_STAT_MASK;
2223 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2224 	/* Verify the enable took */
2225 	for (int j = 0; j < 10; j++) {
2226 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2227 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2228 			break;
2229 		i40e_usec_delay(10);
2230 	}
2231 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2232 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2233 		    pf_qidx);
2234 		error = ETIMEDOUT;
2235 	}
2236 
2237 	return (error);
2238 }
2239 
2240 int
2241 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2242 {
2243 	int error = 0;
2244 
2245 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2246 	/* Called function already prints error message */
2247 	if (error)
2248 		return (error);
2249 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2250 	return (error);
2251 }
2252 
2253 /* For PF VSI only */
2254 int
2255 ixl_enable_rings(struct ixl_vsi *vsi)
2256 {
2257 	struct ixl_pf	*pf = vsi->back;
2258 	int		error = 0;
2259 
2260 	for (int i = 0; i < vsi->num_tx_queues; i++)
2261 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2262 
2263 	for (int i = 0; i < vsi->num_rx_queues; i++)
2264 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2265 
2266 	return (error);
2267 }
2268 
2269 /*
2270  * Returns error on first ring that is detected hung.
2271  */
2272 int
2273 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2274 {
2275 	struct i40e_hw	*hw = &pf->hw;
2276 	int		error = 0;
2277 	u32		reg;
2278 	u16		pf_qidx;
2279 
2280 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2281 
2282 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2283 	i40e_usec_delay(500);
2284 
2285 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2286 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2287 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2288 	/* Verify the disable took */
2289 	for (int j = 0; j < 10; j++) {
2290 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2291 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2292 			break;
2293 		i40e_msec_delay(10);
2294 	}
2295 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2296 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2297 		    pf_qidx);
2298 		error = ETIMEDOUT;
2299 	}
2300 
2301 	return (error);
2302 }
2303 
2304 /*
2305  * Returns error on first ring that is detected hung.
2306  */
2307 int
2308 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2309 {
2310 	struct i40e_hw	*hw = &pf->hw;
2311 	int		error = 0;
2312 	u32		reg;
2313 	u16		pf_qidx;
2314 
2315 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2316 
2317 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2318 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2319 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2320 	/* Verify the disable took */
2321 	for (int j = 0; j < 10; j++) {
2322 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2323 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2324 			break;
2325 		i40e_msec_delay(10);
2326 	}
2327 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2328 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2329 		    pf_qidx);
2330 		error = ETIMEDOUT;
2331 	}
2332 
2333 	return (error);
2334 }
2335 
2336 int
2337 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2338 {
2339 	int error = 0;
2340 
2341 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2342 	/* Called function already prints error message */
2343 	if (error)
2344 		return (error);
2345 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2346 	return (error);
2347 }
2348 
2349 int
2350 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2351 {
2352 	int error = 0;
2353 
2354 	for (int i = 0; i < vsi->num_tx_queues; i++)
2355 		error = ixl_disable_tx_ring(pf, qtag, i);
2356 
2357 	for (int i = 0; i < vsi->num_rx_queues; i++)
2358 		error = ixl_disable_rx_ring(pf, qtag, i);
2359 
2360 	return (error);
2361 }
2362 
2363 static void
2364 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2365 {
2366 	struct i40e_hw *hw = &pf->hw;
2367 	device_t dev = pf->dev;
2368 	struct ixl_vf *vf;
2369 	bool mdd_detected = false;
2370 	bool pf_mdd_detected = false;
2371 	bool vf_mdd_detected = false;
2372 	u16 vf_num, queue;
2373 	u8 pf_num, event;
2374 	u8 pf_mdet_num, vp_mdet_num;
2375 	u32 reg;
2376 
2377 	/* find what triggered the MDD event */
2378 	reg = rd32(hw, I40E_GL_MDET_TX);
2379 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2380 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2381 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2382 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2383 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2384 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2385 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2386 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2387 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2388 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2389 		mdd_detected = true;
2390 	}
2391 
2392 	if (!mdd_detected)
2393 		return;
2394 
2395 	reg = rd32(hw, I40E_PF_MDET_TX);
2396 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2397 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2398 		pf_mdet_num = hw->pf_id;
2399 		pf_mdd_detected = true;
2400 	}
2401 
2402 	/* Check if MDD was caused by a VF */
2403 	for (int i = 0; i < pf->num_vfs; i++) {
2404 		vf = &(pf->vfs[i]);
2405 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2406 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2407 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2408 			vp_mdet_num = i;
2409 			vf->num_mdd_events++;
2410 			vf_mdd_detected = true;
2411 		}
2412 	}
2413 
2414 	/* Print out an error message */
2415 	if (vf_mdd_detected && pf_mdd_detected)
2416 		device_printf(dev,
2417 		    "Malicious Driver Detection event %d"
2418 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2419 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2420 	else if (vf_mdd_detected && !pf_mdd_detected)
2421 		device_printf(dev,
2422 		    "Malicious Driver Detection event %d"
2423 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2424 		    event, queue, pf_num, vf_num, vp_mdet_num);
2425 	else if (!vf_mdd_detected && pf_mdd_detected)
2426 		device_printf(dev,
2427 		    "Malicious Driver Detection event %d"
2428 		    " on TX queue %d, pf number %d (PF-%d)\n",
2429 		    event, queue, pf_num, pf_mdet_num);
2430 	/* Theoretically shouldn't happen */
2431 	else
2432 		device_printf(dev,
2433 		    "TX Malicious Driver Detection event (unknown)\n");
2434 }
2435 
2436 static void
2437 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2438 {
2439 	struct i40e_hw *hw = &pf->hw;
2440 	device_t dev = pf->dev;
2441 	struct ixl_vf *vf;
2442 	bool mdd_detected = false;
2443 	bool pf_mdd_detected = false;
2444 	bool vf_mdd_detected = false;
2445 	u16 queue;
2446 	u8 pf_num, event;
2447 	u8 pf_mdet_num, vp_mdet_num;
2448 	u32 reg;
2449 
2450 	/*
2451 	 * GL_MDET_RX doesn't contain VF number information, unlike
2452 	 * GL_MDET_TX.
2453 	 */
2454 	reg = rd32(hw, I40E_GL_MDET_RX);
2455 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2456 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2457 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2458 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2459 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2460 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2461 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2462 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2463 		mdd_detected = true;
2464 	}
2465 
2466 	if (!mdd_detected)
2467 		return;
2468 
2469 	reg = rd32(hw, I40E_PF_MDET_RX);
2470 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2471 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2472 		pf_mdet_num = hw->pf_id;
2473 		pf_mdd_detected = true;
2474 	}
2475 
2476 	/* Check if MDD was caused by a VF */
2477 	for (int i = 0; i < pf->num_vfs; i++) {
2478 		vf = &(pf->vfs[i]);
2479 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2480 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2481 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2482 			vp_mdet_num = i;
2483 			vf->num_mdd_events++;
2484 			vf_mdd_detected = true;
2485 		}
2486 	}
2487 
2488 	/* Print out an error message */
2489 	if (vf_mdd_detected && pf_mdd_detected)
2490 		device_printf(dev,
2491 		    "Malicious Driver Detection event %d"
2492 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2493 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2494 	else if (vf_mdd_detected && !pf_mdd_detected)
2495 		device_printf(dev,
2496 		    "Malicious Driver Detection event %d"
2497 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2498 		    event, queue, pf_num, vp_mdet_num);
2499 	else if (!vf_mdd_detected && pf_mdd_detected)
2500 		device_printf(dev,
2501 		    "Malicious Driver Detection event %d"
2502 		    " on RX queue %d, pf number %d (PF-%d)\n",
2503 		    event, queue, pf_num, pf_mdet_num);
2504 	/* Theoretically shouldn't happen */
2505 	else
2506 		device_printf(dev,
2507 		    "RX Malicious Driver Detection event (unknown)\n");
2508 }
2509 
2510 /**
2511  * ixl_handle_mdd_event
2512  *
2513  * Called from interrupt handler to identify possibly malicious vfs
2514  * (But also detects events from the PF, as well)
2515  **/
2516 void
2517 ixl_handle_mdd_event(struct ixl_pf *pf)
2518 {
2519 	struct i40e_hw *hw = &pf->hw;
2520 	u32 reg;
2521 
2522 	/*
2523 	 * Handle both TX/RX because it's possible they could
2524 	 * both trigger in the same interrupt.
2525 	 */
2526 	ixl_handle_tx_mdd_event(pf);
2527 	ixl_handle_rx_mdd_event(pf);
2528 
2529 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2530 
2531 	/* re-enable mdd interrupt cause */
2532 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2533 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2534 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2535 	ixl_flush(hw);
2536 }
2537 
2538 void
2539 ixl_enable_intr(struct ixl_vsi *vsi)
2540 {
2541 	struct i40e_hw		*hw = vsi->hw;
2542 	struct ixl_rx_queue	*que = vsi->rx_queues;
2543 
2544 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2545 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2546 			ixl_enable_queue(hw, que->rxr.me);
2547 	} else
2548 		ixl_enable_intr0(hw);
2549 }
2550 
2551 void
2552 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2553 {
2554 	struct i40e_hw		*hw = vsi->hw;
2555 	struct ixl_rx_queue	*que = vsi->rx_queues;
2556 
2557 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2558 		ixl_disable_queue(hw, que->rxr.me);
2559 }
2560 
2561 void
2562 ixl_enable_intr0(struct i40e_hw *hw)
2563 {
2564 	u32		reg;
2565 
2566 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2567 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2568 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2569 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2570 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2571 }
2572 
2573 void
2574 ixl_disable_intr0(struct i40e_hw *hw)
2575 {
2576 	u32		reg;
2577 
2578 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2579 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2580 	ixl_flush(hw);
2581 }
2582 
2583 void
2584 ixl_enable_queue(struct i40e_hw *hw, int id)
2585 {
2586 	u32		reg;
2587 
2588 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2589 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2590 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2591 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2592 }
2593 
2594 void
2595 ixl_disable_queue(struct i40e_hw *hw, int id)
2596 {
2597 	u32		reg;
2598 
2599 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2600 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2601 }
2602 
2603 void
2604 ixl_update_stats_counters(struct ixl_pf *pf)
2605 {
2606 	struct i40e_hw	*hw = &pf->hw;
2607 	struct ixl_vsi	*vsi = &pf->vsi;
2608 	struct ixl_vf	*vf;
2609 
2610 	struct i40e_hw_port_stats *nsd = &pf->stats;
2611 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2612 
2613 	/* Update hw stats */
2614 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2615 			   pf->stat_offsets_loaded,
2616 			   &osd->crc_errors, &nsd->crc_errors);
2617 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2618 			   pf->stat_offsets_loaded,
2619 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2620 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2621 			   I40E_GLPRT_GORCL(hw->port),
2622 			   pf->stat_offsets_loaded,
2623 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2624 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2625 			   I40E_GLPRT_GOTCL(hw->port),
2626 			   pf->stat_offsets_loaded,
2627 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2628 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2629 			   pf->stat_offsets_loaded,
2630 			   &osd->eth.rx_discards,
2631 			   &nsd->eth.rx_discards);
2632 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2633 			   I40E_GLPRT_UPRCL(hw->port),
2634 			   pf->stat_offsets_loaded,
2635 			   &osd->eth.rx_unicast,
2636 			   &nsd->eth.rx_unicast);
2637 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2638 			   I40E_GLPRT_UPTCL(hw->port),
2639 			   pf->stat_offsets_loaded,
2640 			   &osd->eth.tx_unicast,
2641 			   &nsd->eth.tx_unicast);
2642 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2643 			   I40E_GLPRT_MPRCL(hw->port),
2644 			   pf->stat_offsets_loaded,
2645 			   &osd->eth.rx_multicast,
2646 			   &nsd->eth.rx_multicast);
2647 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2648 			   I40E_GLPRT_MPTCL(hw->port),
2649 			   pf->stat_offsets_loaded,
2650 			   &osd->eth.tx_multicast,
2651 			   &nsd->eth.tx_multicast);
2652 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2653 			   I40E_GLPRT_BPRCL(hw->port),
2654 			   pf->stat_offsets_loaded,
2655 			   &osd->eth.rx_broadcast,
2656 			   &nsd->eth.rx_broadcast);
2657 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2658 			   I40E_GLPRT_BPTCL(hw->port),
2659 			   pf->stat_offsets_loaded,
2660 			   &osd->eth.tx_broadcast,
2661 			   &nsd->eth.tx_broadcast);
2662 
2663 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2664 			   pf->stat_offsets_loaded,
2665 			   &osd->tx_dropped_link_down,
2666 			   &nsd->tx_dropped_link_down);
2667 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2668 			   pf->stat_offsets_loaded,
2669 			   &osd->mac_local_faults,
2670 			   &nsd->mac_local_faults);
2671 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2672 			   pf->stat_offsets_loaded,
2673 			   &osd->mac_remote_faults,
2674 			   &nsd->mac_remote_faults);
2675 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2676 			   pf->stat_offsets_loaded,
2677 			   &osd->rx_length_errors,
2678 			   &nsd->rx_length_errors);
2679 
2680 	/* Flow control (LFC) stats */
2681 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2682 			   pf->stat_offsets_loaded,
2683 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2684 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2685 			   pf->stat_offsets_loaded,
2686 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2687 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2688 			   pf->stat_offsets_loaded,
2689 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2690 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2691 			   pf->stat_offsets_loaded,
2692 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2693 
2694 	/* Packet size stats rx */
2695 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2696 			   I40E_GLPRT_PRC64L(hw->port),
2697 			   pf->stat_offsets_loaded,
2698 			   &osd->rx_size_64, &nsd->rx_size_64);
2699 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2700 			   I40E_GLPRT_PRC127L(hw->port),
2701 			   pf->stat_offsets_loaded,
2702 			   &osd->rx_size_127, &nsd->rx_size_127);
2703 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2704 			   I40E_GLPRT_PRC255L(hw->port),
2705 			   pf->stat_offsets_loaded,
2706 			   &osd->rx_size_255, &nsd->rx_size_255);
2707 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2708 			   I40E_GLPRT_PRC511L(hw->port),
2709 			   pf->stat_offsets_loaded,
2710 			   &osd->rx_size_511, &nsd->rx_size_511);
2711 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2712 			   I40E_GLPRT_PRC1023L(hw->port),
2713 			   pf->stat_offsets_loaded,
2714 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2715 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2716 			   I40E_GLPRT_PRC1522L(hw->port),
2717 			   pf->stat_offsets_loaded,
2718 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2719 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2720 			   I40E_GLPRT_PRC9522L(hw->port),
2721 			   pf->stat_offsets_loaded,
2722 			   &osd->rx_size_big, &nsd->rx_size_big);
2723 
2724 	/* Packet size stats tx */
2725 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2726 			   I40E_GLPRT_PTC64L(hw->port),
2727 			   pf->stat_offsets_loaded,
2728 			   &osd->tx_size_64, &nsd->tx_size_64);
2729 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2730 			   I40E_GLPRT_PTC127L(hw->port),
2731 			   pf->stat_offsets_loaded,
2732 			   &osd->tx_size_127, &nsd->tx_size_127);
2733 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2734 			   I40E_GLPRT_PTC255L(hw->port),
2735 			   pf->stat_offsets_loaded,
2736 			   &osd->tx_size_255, &nsd->tx_size_255);
2737 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2738 			   I40E_GLPRT_PTC511L(hw->port),
2739 			   pf->stat_offsets_loaded,
2740 			   &osd->tx_size_511, &nsd->tx_size_511);
2741 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2742 			   I40E_GLPRT_PTC1023L(hw->port),
2743 			   pf->stat_offsets_loaded,
2744 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2745 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2746 			   I40E_GLPRT_PTC1522L(hw->port),
2747 			   pf->stat_offsets_loaded,
2748 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2749 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2750 			   I40E_GLPRT_PTC9522L(hw->port),
2751 			   pf->stat_offsets_loaded,
2752 			   &osd->tx_size_big, &nsd->tx_size_big);
2753 
2754 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2755 			   pf->stat_offsets_loaded,
2756 			   &osd->rx_undersize, &nsd->rx_undersize);
2757 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2758 			   pf->stat_offsets_loaded,
2759 			   &osd->rx_fragments, &nsd->rx_fragments);
2760 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2761 			   pf->stat_offsets_loaded,
2762 			   &osd->rx_oversize, &nsd->rx_oversize);
2763 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2764 			   pf->stat_offsets_loaded,
2765 			   &osd->rx_jabber, &nsd->rx_jabber);
2766 	pf->stat_offsets_loaded = true;
2767 	/* End hw stats */
2768 
2769 	/* Update vsi stats */
2770 	ixl_update_vsi_stats(vsi);
2771 
2772 	for (int i = 0; i < pf->num_vfs; i++) {
2773 		vf = &pf->vfs[i];
2774 		if (vf->vf_flags & VF_FLAG_ENABLED)
2775 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2776 	}
2777 }
2778 
2779 int
2780 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2781 {
2782 	struct i40e_hw *hw = &pf->hw;
2783 	device_t dev = pf->dev;
2784 	int error = 0;
2785 
2786 	error = i40e_shutdown_lan_hmc(hw);
2787 	if (error)
2788 		device_printf(dev,
2789 		    "Shutdown LAN HMC failed with code %d\n", error);
2790 
2791 	ixl_disable_intr0(hw);
2792 
2793 	error = i40e_shutdown_adminq(hw);
2794 	if (error)
2795 		device_printf(dev,
2796 		    "Shutdown Admin queue failed with code %d\n", error);
2797 
2798 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2799 	return (error);
2800 }
2801 
2802 int
2803 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2804 {
2805 	struct i40e_hw *hw = &pf->hw;
2806 	struct ixl_vsi *vsi = &pf->vsi;
2807 	device_t dev = pf->dev;
2808 	int error = 0;
2809 
2810 	device_printf(dev, "Rebuilding driver state...\n");
2811 
2812 	error = i40e_pf_reset(hw);
2813 	if (error) {
2814 		device_printf(dev, "PF reset failure %s\n",
2815 		    i40e_stat_str(hw, error));
2816 		goto ixl_rebuild_hw_structs_after_reset_err;
2817 	}
2818 
2819 	/* Setup */
2820 	error = i40e_init_adminq(hw);
2821 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2822 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2823 		    error);
2824 		goto ixl_rebuild_hw_structs_after_reset_err;
2825 	}
2826 
2827 	i40e_clear_pxe_mode(hw);
2828 
2829 	error = ixl_get_hw_capabilities(pf);
2830 	if (error) {
2831 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2832 		goto ixl_rebuild_hw_structs_after_reset_err;
2833 	}
2834 
2835 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2836 	    hw->func_caps.num_rx_qp, 0, 0);
2837 	if (error) {
2838 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2839 		goto ixl_rebuild_hw_structs_after_reset_err;
2840 	}
2841 
2842 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2843 	if (error) {
2844 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2845 		goto ixl_rebuild_hw_structs_after_reset_err;
2846 	}
2847 
2848 	/* reserve a contiguous allocation for the PF's VSI */
2849 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2850 	if (error) {
2851 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2852 		    error);
2853 		/* TODO: error handling */
2854 	}
2855 
2856 	error = ixl_switch_config(pf);
2857 	if (error) {
2858 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2859 		     error);
2860 		error = EIO;
2861 		goto ixl_rebuild_hw_structs_after_reset_err;
2862 	}
2863 
2864 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2865 	    NULL);
2866         if (error) {
2867 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2868 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2869 		error = EIO;
2870 		goto ixl_rebuild_hw_structs_after_reset_err;
2871 	}
2872 
2873 	u8 set_fc_err_mask;
2874 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2875 	if (error) {
2876 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2877 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2878 		error = EIO;
2879 		goto ixl_rebuild_hw_structs_after_reset_err;
2880 	}
2881 
2882 	/* Remove default filters reinstalled by FW on reset */
2883 	ixl_del_default_hw_filters(vsi);
2884 
2885 	/* Determine link state */
2886 	if (ixl_attach_get_link_status(pf)) {
2887 		error = EINVAL;
2888 		/* TODO: error handling */
2889 	}
2890 
2891 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2892 	ixl_get_fw_lldp_status(pf);
2893 
2894 	/* Keep admin queue interrupts active while driver is loaded */
2895 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2896  		ixl_configure_intr0_msix(pf);
2897  		ixl_enable_intr0(hw);
2898 	}
2899 
2900 	device_printf(dev, "Rebuilding driver state done.\n");
2901 	return (0);
2902 
2903 ixl_rebuild_hw_structs_after_reset_err:
2904 	device_printf(dev, "Reload the driver to recover\n");
2905 	return (error);
2906 }
2907 
2908 void
2909 ixl_handle_empr_reset(struct ixl_pf *pf)
2910 {
2911 	struct ixl_vsi	*vsi = &pf->vsi;
2912 	struct i40e_hw	*hw = &pf->hw;
2913 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2914 	int count = 0;
2915 	u32 reg;
2916 
2917 	ixl_prepare_for_reset(pf, is_up);
2918 
2919 	/* Typically finishes within 3-4 seconds */
2920 	while (count++ < 100) {
2921 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2922 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2923 		if (reg)
2924 			i40e_msec_delay(100);
2925 		else
2926 			break;
2927 	}
2928 	ixl_dbg(pf, IXL_DBG_INFO,
2929 			"Reset wait count: %d\n", count);
2930 
2931 	ixl_rebuild_hw_structs_after_reset(pf);
2932 
2933 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2934 }
2935 
2936 /**
2937  * Update VSI-specific ethernet statistics counters.
2938  **/
2939 void
2940 ixl_update_eth_stats(struct ixl_vsi *vsi)
2941 {
2942 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2943 	struct i40e_hw *hw = &pf->hw;
2944 	struct i40e_eth_stats *es;
2945 	struct i40e_eth_stats *oes;
2946 	struct i40e_hw_port_stats *nsd;
2947 	u16 stat_idx = vsi->info.stat_counter_idx;
2948 
2949 	es = &vsi->eth_stats;
2950 	oes = &vsi->eth_stats_offsets;
2951 	nsd = &pf->stats;
2952 
2953 	/* Gather up the stats that the hw collects */
2954 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2955 			   vsi->stat_offsets_loaded,
2956 			   &oes->tx_errors, &es->tx_errors);
2957 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2958 			   vsi->stat_offsets_loaded,
2959 			   &oes->rx_discards, &es->rx_discards);
2960 
2961 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2962 			   I40E_GLV_GORCL(stat_idx),
2963 			   vsi->stat_offsets_loaded,
2964 			   &oes->rx_bytes, &es->rx_bytes);
2965 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2966 			   I40E_GLV_UPRCL(stat_idx),
2967 			   vsi->stat_offsets_loaded,
2968 			   &oes->rx_unicast, &es->rx_unicast);
2969 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2970 			   I40E_GLV_MPRCL(stat_idx),
2971 			   vsi->stat_offsets_loaded,
2972 			   &oes->rx_multicast, &es->rx_multicast);
2973 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2974 			   I40E_GLV_BPRCL(stat_idx),
2975 			   vsi->stat_offsets_loaded,
2976 			   &oes->rx_broadcast, &es->rx_broadcast);
2977 
2978 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2979 			   I40E_GLV_GOTCL(stat_idx),
2980 			   vsi->stat_offsets_loaded,
2981 			   &oes->tx_bytes, &es->tx_bytes);
2982 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2983 			   I40E_GLV_UPTCL(stat_idx),
2984 			   vsi->stat_offsets_loaded,
2985 			   &oes->tx_unicast, &es->tx_unicast);
2986 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2987 			   I40E_GLV_MPTCL(stat_idx),
2988 			   vsi->stat_offsets_loaded,
2989 			   &oes->tx_multicast, &es->tx_multicast);
2990 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2991 			   I40E_GLV_BPTCL(stat_idx),
2992 			   vsi->stat_offsets_loaded,
2993 			   &oes->tx_broadcast, &es->tx_broadcast);
2994 	vsi->stat_offsets_loaded = true;
2995 }
2996 
2997 void
2998 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2999 {
3000 	struct ixl_pf		*pf;
3001 	struct ifnet		*ifp;
3002 	struct i40e_eth_stats	*es;
3003 	u64			tx_discards;
3004 
3005 	struct i40e_hw_port_stats *nsd;
3006 
3007 	pf = vsi->back;
3008 	ifp = vsi->ifp;
3009 	es = &vsi->eth_stats;
3010 	nsd = &pf->stats;
3011 
3012 	ixl_update_eth_stats(vsi);
3013 
3014 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3015 
3016 	/* Update ifnet stats */
3017 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3018 	                   es->rx_multicast +
3019 			   es->rx_broadcast);
3020 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3021 	                   es->tx_multicast +
3022 			   es->tx_broadcast);
3023 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3024 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3025 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3026 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3027 
3028 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3029 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3030 	    nsd->rx_jabber);
3031 	IXL_SET_OERRORS(vsi, es->tx_errors);
3032 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3033 	IXL_SET_OQDROPS(vsi, tx_discards);
3034 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3035 	IXL_SET_COLLISIONS(vsi, 0);
3036 }
3037 
3038 /**
3039  * Reset all of the stats for the given pf
3040  **/
3041 void
3042 ixl_pf_reset_stats(struct ixl_pf *pf)
3043 {
3044 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3045 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3046 	pf->stat_offsets_loaded = false;
3047 }
3048 
3049 /**
3050  * Resets all stats of the given vsi
3051  **/
3052 void
3053 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3054 {
3055 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3056 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3057 	vsi->stat_offsets_loaded = false;
3058 }
3059 
3060 /**
3061  * Read and update a 48 bit stat from the hw
3062  *
3063  * Since the device stats are not reset at PFReset, they likely will not
3064  * be zeroed when the driver starts.  We'll save the first values read
3065  * and use them as offsets to be subtracted from the raw values in order
3066  * to report stats that count from zero.
3067  **/
3068 void
3069 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3070 	bool offset_loaded, u64 *offset, u64 *stat)
3071 {
3072 	u64 new_data;
3073 
3074 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3075 	new_data = rd64(hw, loreg);
3076 #else
3077 	/*
3078 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3079 	 * 10 don't support 64-bit bus reads/writes.
3080 	 */
3081 	new_data = rd32(hw, loreg);
3082 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3083 #endif
3084 
3085 	if (!offset_loaded)
3086 		*offset = new_data;
3087 	if (new_data >= *offset)
3088 		*stat = new_data - *offset;
3089 	else
3090 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3091 	*stat &= 0xFFFFFFFFFFFFULL;
3092 }
3093 
3094 /**
3095  * Read and update a 32 bit stat from the hw
3096  **/
3097 void
3098 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3099 	bool offset_loaded, u64 *offset, u64 *stat)
3100 {
3101 	u32 new_data;
3102 
3103 	new_data = rd32(hw, reg);
3104 	if (!offset_loaded)
3105 		*offset = new_data;
3106 	if (new_data >= *offset)
3107 		*stat = (u32)(new_data - *offset);
3108 	else
3109 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3110 }
3111 
3112 void
3113 ixl_add_device_sysctls(struct ixl_pf *pf)
3114 {
3115 	device_t dev = pf->dev;
3116 	struct i40e_hw *hw = &pf->hw;
3117 
3118 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3119 	struct sysctl_oid_list *ctx_list =
3120 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3121 
3122 	struct sysctl_oid *debug_node;
3123 	struct sysctl_oid_list *debug_list;
3124 
3125 	struct sysctl_oid *fec_node;
3126 	struct sysctl_oid_list *fec_list;
3127 
3128 	/* Set up sysctls */
3129 	SYSCTL_ADD_PROC(ctx, ctx_list,
3130 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3131 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3132 
3133 	SYSCTL_ADD_PROC(ctx, ctx_list,
3134 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3135 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3136 
3137 	SYSCTL_ADD_PROC(ctx, ctx_list,
3138 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3139 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3140 
3141 	SYSCTL_ADD_PROC(ctx, ctx_list,
3142 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3143 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3144 
3145 	SYSCTL_ADD_PROC(ctx, ctx_list,
3146 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3147 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3148 
3149 	SYSCTL_ADD_PROC(ctx, ctx_list,
3150 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3151 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3152 	    "Queues not allocated to a PF or VF");
3153 
3154 	SYSCTL_ADD_PROC(ctx, ctx_list,
3155 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3156 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3157 	    "Immediately set TX ITR value for all queues");
3158 
3159 	SYSCTL_ADD_PROC(ctx, ctx_list,
3160 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3161 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3162 	    "Immediately set RX ITR value for all queues");
3163 
3164 	SYSCTL_ADD_INT(ctx, ctx_list,
3165 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3166 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3167 
3168 	SYSCTL_ADD_INT(ctx, ctx_list,
3169 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3170 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3171 
3172 	/* Add FEC sysctls for 25G adapters */
3173 	if (i40e_is_25G_device(hw->device_id)) {
3174 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3175 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3176 		fec_list = SYSCTL_CHILDREN(fec_node);
3177 
3178 		SYSCTL_ADD_PROC(ctx, fec_list,
3179 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3180 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3181 
3182 		SYSCTL_ADD_PROC(ctx, fec_list,
3183 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3184 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3185 
3186 		SYSCTL_ADD_PROC(ctx, fec_list,
3187 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3188 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3189 
3190 		SYSCTL_ADD_PROC(ctx, fec_list,
3191 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3192 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3193 
3194 		SYSCTL_ADD_PROC(ctx, fec_list,
3195 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3196 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3197 	}
3198 
3199 	SYSCTL_ADD_PROC(ctx, ctx_list,
3200 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3201 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3202 
3203 	/* Add sysctls meant to print debug information, but don't list them
3204 	 * in "sysctl -a" output. */
3205 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3206 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3207 	debug_list = SYSCTL_CHILDREN(debug_node);
3208 
3209 	SYSCTL_ADD_UINT(ctx, debug_list,
3210 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3211 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3212 
3213 	SYSCTL_ADD_UINT(ctx, debug_list,
3214 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3215 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3216 
3217 	SYSCTL_ADD_PROC(ctx, debug_list,
3218 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3219 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3220 
3221 	SYSCTL_ADD_PROC(ctx, debug_list,
3222 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3223 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3224 
3225 	SYSCTL_ADD_PROC(ctx, debug_list,
3226 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3227 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3228 
3229 	SYSCTL_ADD_PROC(ctx, debug_list,
3230 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3231 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3232 
3233 	SYSCTL_ADD_PROC(ctx, debug_list,
3234 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3235 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3236 
3237 	SYSCTL_ADD_PROC(ctx, debug_list,
3238 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3239 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3240 
3241 	SYSCTL_ADD_PROC(ctx, debug_list,
3242 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3243 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3244 
3245 	SYSCTL_ADD_PROC(ctx, debug_list,
3246 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3247 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3248 
3249 	SYSCTL_ADD_PROC(ctx, debug_list,
3250 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3251 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3252 
3253 	SYSCTL_ADD_PROC(ctx, debug_list,
3254 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3255 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3256 
3257 	SYSCTL_ADD_PROC(ctx, debug_list,
3258 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3259 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3260 
3261 	SYSCTL_ADD_PROC(ctx, debug_list,
3262 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3263 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3264 
3265 	SYSCTL_ADD_PROC(ctx, debug_list,
3266 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3267 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3268 
3269 	SYSCTL_ADD_PROC(ctx, debug_list,
3270 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3271 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3272 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3273 
3274 	SYSCTL_ADD_PROC(ctx, debug_list,
3275 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3276 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3277 
3278 	if (pf->has_i2c) {
3279 		SYSCTL_ADD_PROC(ctx, debug_list,
3280 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3281 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3282 
3283 		SYSCTL_ADD_PROC(ctx, debug_list,
3284 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3285 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3286 
3287 		SYSCTL_ADD_PROC(ctx, debug_list,
3288 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3289 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3290 	}
3291 }
3292 
3293 /*
3294  * Primarily for finding out how many queues can be assigned to VFs,
3295  * at runtime.
3296  */
3297 static int
3298 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3299 {
3300 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3301 	int queues;
3302 
3303 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3304 
3305 	return sysctl_handle_int(oidp, NULL, queues, req);
3306 }
3307 
3308 /*
3309 ** Set flow control using sysctl:
3310 ** 	0 - off
3311 **	1 - rx pause
3312 **	2 - tx pause
3313 **	3 - full
3314 */
3315 int
3316 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3317 {
3318 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3319 	struct i40e_hw *hw = &pf->hw;
3320 	device_t dev = pf->dev;
3321 	int requested_fc, error = 0;
3322 	enum i40e_status_code aq_error = 0;
3323 	u8 fc_aq_err = 0;
3324 
3325 	/* Get request */
3326 	requested_fc = pf->fc;
3327 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3328 	if ((error) || (req->newptr == NULL))
3329 		return (error);
3330 	if (requested_fc < 0 || requested_fc > 3) {
3331 		device_printf(dev,
3332 		    "Invalid fc mode; valid modes are 0 through 3\n");
3333 		return (EINVAL);
3334 	}
3335 
3336 	/* Set fc ability for port */
3337 	hw->fc.requested_mode = requested_fc;
3338 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3339 	if (aq_error) {
3340 		device_printf(dev,
3341 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3342 		    __func__, aq_error, fc_aq_err);
3343 		return (EIO);
3344 	}
3345 	pf->fc = requested_fc;
3346 
3347 	return (0);
3348 }
3349 
3350 char *
3351 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3352 {
3353 	int index;
3354 
3355 	char *speeds[] = {
3356 		"Unknown",
3357 		"100 Mbps",
3358 		"1 Gbps",
3359 		"10 Gbps",
3360 		"40 Gbps",
3361 		"20 Gbps",
3362 		"25 Gbps",
3363 	};
3364 
3365 	switch (link_speed) {
3366 	case I40E_LINK_SPEED_100MB:
3367 		index = 1;
3368 		break;
3369 	case I40E_LINK_SPEED_1GB:
3370 		index = 2;
3371 		break;
3372 	case I40E_LINK_SPEED_10GB:
3373 		index = 3;
3374 		break;
3375 	case I40E_LINK_SPEED_40GB:
3376 		index = 4;
3377 		break;
3378 	case I40E_LINK_SPEED_20GB:
3379 		index = 5;
3380 		break;
3381 	case I40E_LINK_SPEED_25GB:
3382 		index = 6;
3383 		break;
3384 	case I40E_LINK_SPEED_UNKNOWN:
3385 	default:
3386 		index = 0;
3387 		break;
3388 	}
3389 
3390 	return speeds[index];
3391 }
3392 
3393 int
3394 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3395 {
3396 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3397 	struct i40e_hw *hw = &pf->hw;
3398 	int error = 0;
3399 
3400 	ixl_update_link_status(pf);
3401 
3402 	error = sysctl_handle_string(oidp,
3403 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3404 	    8, req);
3405 	return (error);
3406 }
3407 
3408 /*
3409  * Converts 8-bit speeds value to and from sysctl flags and
3410  * Admin Queue flags.
3411  */
3412 static u8
3413 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3414 {
3415 	static u16 speedmap[6] = {
3416 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3417 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3418 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3419 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3420 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3421 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3422 	};
3423 	u8 retval = 0;
3424 
3425 	for (int i = 0; i < 6; i++) {
3426 		if (to_aq)
3427 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3428 		else
3429 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3430 	}
3431 
3432 	return (retval);
3433 }
3434 
3435 int
3436 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3437 {
3438 	struct i40e_hw *hw = &pf->hw;
3439 	device_t dev = pf->dev;
3440 	struct i40e_aq_get_phy_abilities_resp abilities;
3441 	struct i40e_aq_set_phy_config config;
3442 	enum i40e_status_code aq_error = 0;
3443 
3444 	/* Get current capability information */
3445 	aq_error = i40e_aq_get_phy_capabilities(hw,
3446 	    FALSE, FALSE, &abilities, NULL);
3447 	if (aq_error) {
3448 		device_printf(dev,
3449 		    "%s: Error getting phy capabilities %d,"
3450 		    " aq error: %d\n", __func__, aq_error,
3451 		    hw->aq.asq_last_status);
3452 		return (EIO);
3453 	}
3454 
3455 	/* Prepare new config */
3456 	bzero(&config, sizeof(config));
3457 	if (from_aq)
3458 		config.link_speed = speeds;
3459 	else
3460 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3461 	config.phy_type = abilities.phy_type;
3462 	config.phy_type_ext = abilities.phy_type_ext;
3463 	config.abilities = abilities.abilities
3464 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3465 	config.eee_capability = abilities.eee_capability;
3466 	config.eeer = abilities.eeer_val;
3467 	config.low_power_ctrl = abilities.d3_lpan;
3468 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3469 
3470 	/* Do aq command & restart link */
3471 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3472 	if (aq_error) {
3473 		device_printf(dev,
3474 		    "%s: Error setting new phy config %d,"
3475 		    " aq error: %d\n", __func__, aq_error,
3476 		    hw->aq.asq_last_status);
3477 		return (EIO);
3478 	}
3479 
3480 	return (0);
3481 }
3482 
3483 /*
3484 ** Supported link speedsL
3485 **	Flags:
3486 **	 0x1 - 100 Mb
3487 **	 0x2 - 1G
3488 **	 0x4 - 10G
3489 **	 0x8 - 20G
3490 **	0x10 - 25G
3491 **	0x20 - 40G
3492 */
3493 static int
3494 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3495 {
3496 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3497 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3498 
3499 	return sysctl_handle_int(oidp, NULL, supported, req);
3500 }
3501 
3502 /*
3503 ** Control link advertise speed:
3504 **	Flags:
3505 **	 0x1 - advertise 100 Mb
3506 **	 0x2 - advertise 1G
3507 **	 0x4 - advertise 10G
3508 **	 0x8 - advertise 20G
3509 **	0x10 - advertise 25G
3510 **	0x20 - advertise 40G
3511 **
3512 **	Set to 0 to disable link
3513 */
3514 int
3515 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3516 {
3517 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3518 	device_t dev = pf->dev;
3519 	u8 converted_speeds;
3520 	int requested_ls = 0;
3521 	int error = 0;
3522 
3523 	/* Read in new mode */
3524 	requested_ls = pf->advertised_speed;
3525 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3526 	if ((error) || (req->newptr == NULL))
3527 		return (error);
3528 
3529 	/* Error out if bits outside of possible flag range are set */
3530 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3531 		device_printf(dev, "Input advertised speed out of range; "
3532 		    "valid flags are: 0x%02x\n",
3533 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3534 		return (EINVAL);
3535 	}
3536 
3537 	/* Check if adapter supports input value */
3538 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3539 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3540 		device_printf(dev, "Invalid advertised speed; "
3541 		    "valid flags are: 0x%02x\n",
3542 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3543 		return (EINVAL);
3544 	}
3545 
3546 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3547 	if (error)
3548 		return (error);
3549 
3550 	pf->advertised_speed = requested_ls;
3551 	ixl_update_link_status(pf);
3552 	return (0);
3553 }
3554 
3555 /*
3556 ** Get the width and transaction speed of
3557 ** the bus this adapter is plugged into.
3558 */
3559 void
3560 ixl_get_bus_info(struct ixl_pf *pf)
3561 {
3562 	struct i40e_hw *hw = &pf->hw;
3563 	device_t dev = pf->dev;
3564         u16 link;
3565         u32 offset, num_ports;
3566 	u64 max_speed;
3567 
3568 	/* Some devices don't use PCIE */
3569 	if (hw->mac.type == I40E_MAC_X722)
3570 		return;
3571 
3572         /* Read PCI Express Capabilities Link Status Register */
3573         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3574         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3575 
3576 	/* Fill out hw struct with PCIE info */
3577 	i40e_set_pci_config_data(hw, link);
3578 
3579 	/* Use info to print out bandwidth messages */
3580         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3581             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3582             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3583             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3584             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3585             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3586             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3587             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3588             ("Unknown"));
3589 
3590 	/*
3591 	 * If adapter is in slot with maximum supported speed,
3592 	 * no warning message needs to be printed out.
3593 	 */
3594 	if (hw->bus.speed >= i40e_bus_speed_8000
3595 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3596 		return;
3597 
3598 	num_ports = bitcount32(hw->func_caps.valid_functions);
3599 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3600 
3601 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3602                 device_printf(dev, "PCI-Express bandwidth available"
3603                     " for this device may be insufficient for"
3604                     " optimal performance.\n");
3605                 device_printf(dev, "Please move the device to a different"
3606 		    " PCI-e link with more lanes and/or higher"
3607 		    " transfer rate.\n");
3608         }
3609 }
3610 
3611 static int
3612 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3613 {
3614 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3615 	struct i40e_hw	*hw = &pf->hw;
3616 	struct sbuf	*sbuf;
3617 
3618 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3619 	ixl_nvm_version_str(hw, sbuf);
3620 	sbuf_finish(sbuf);
3621 	sbuf_delete(sbuf);
3622 
3623 	return (0);
3624 }
3625 
3626 void
3627 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3628 {
3629 	if ((nvma->command == I40E_NVM_READ) &&
3630 	    ((nvma->config & 0xFF) == 0xF) &&
3631 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3632 	    (nvma->offset == 0) &&
3633 	    (nvma->data_size == 1)) {
3634 		// device_printf(dev, "- Get Driver Status Command\n");
3635 	}
3636 	else if (nvma->command == I40E_NVM_READ) {
3637 
3638 	}
3639 	else {
3640 		switch (nvma->command) {
3641 		case 0xB:
3642 			device_printf(dev, "- command: I40E_NVM_READ\n");
3643 			break;
3644 		case 0xC:
3645 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3646 			break;
3647 		default:
3648 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3649 			break;
3650 		}
3651 
3652 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3653 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3654 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3655 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3656 	}
3657 }
3658 
3659 int
3660 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3661 {
3662 	struct i40e_hw *hw = &pf->hw;
3663 	struct i40e_nvm_access *nvma;
3664 	device_t dev = pf->dev;
3665 	enum i40e_status_code status = 0;
3666 	size_t nvma_size, ifd_len, exp_len;
3667 	int err, perrno;
3668 
3669 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3670 
3671 	/* Sanity checks */
3672 	nvma_size = sizeof(struct i40e_nvm_access);
3673 	ifd_len = ifd->ifd_len;
3674 
3675 	if (ifd_len < nvma_size ||
3676 	    ifd->ifd_data == NULL) {
3677 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3678 		    __func__);
3679 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3680 		    __func__, ifd_len, nvma_size);
3681 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3682 		    ifd->ifd_data);
3683 		return (EINVAL);
3684 	}
3685 
3686 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3687 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3688 	if (err) {
3689 		device_printf(dev, "%s: Cannot get request from user space\n",
3690 		    __func__);
3691 		free(nvma, M_DEVBUF);
3692 		return (err);
3693 	}
3694 
3695 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3696 		ixl_print_nvm_cmd(dev, nvma);
3697 
3698 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3699 		int count = 0;
3700 		while (count++ < 100) {
3701 			i40e_msec_delay(100);
3702 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3703 				break;
3704 		}
3705 	}
3706 
3707 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3708 		free(nvma, M_DEVBUF);
3709 		return (-EBUSY);
3710 	}
3711 
3712 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3713 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3714 		    __func__);
3715 		free(nvma, M_DEVBUF);
3716 		return (EINVAL);
3717 	}
3718 
3719 	/*
3720 	 * Older versions of the NVM update tool don't set ifd_len to the size
3721 	 * of the entire buffer passed to the ioctl. Check the data_size field
3722 	 * in the contained i40e_nvm_access struct and ensure everything is
3723 	 * copied in from userspace.
3724 	 */
3725 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3726 
3727 	if (ifd_len < exp_len) {
3728 		ifd_len = exp_len;
3729 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3730 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3731 		if (err) {
3732 			device_printf(dev, "%s: Cannot get request from user space\n",
3733 					__func__);
3734 			free(nvma, M_DEVBUF);
3735 			return (err);
3736 		}
3737 	}
3738 
3739 	// TODO: Might need a different lock here
3740 	// IXL_PF_LOCK(pf);
3741 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3742 	// IXL_PF_UNLOCK(pf);
3743 
3744 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3745 	free(nvma, M_DEVBUF);
3746 	if (err) {
3747 		device_printf(dev, "%s: Cannot return data to user space\n",
3748 				__func__);
3749 		return (err);
3750 	}
3751 
3752 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3753 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3754 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3755 		    i40e_stat_str(hw, status), perrno);
3756 
3757 	/*
3758 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3759 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3760 	 */
3761 	if (perrno == -EPERM)
3762 		return (-EACCES);
3763 	else
3764 		return (perrno);
3765 }
3766 
3767 int
3768 ixl_find_i2c_interface(struct ixl_pf *pf)
3769 {
3770 	struct i40e_hw *hw = &pf->hw;
3771 	bool i2c_en, port_matched;
3772 	u32 reg;
3773 
3774 	for (int i = 0; i < 4; i++) {
3775 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3776 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3777 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3778 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3779 		    & BIT(hw->port);
3780 		if (i2c_en && port_matched)
3781 			return (i);
3782 	}
3783 
3784 	return (-1);
3785 }
3786 
3787 static char *
3788 ixl_phy_type_string(u32 bit_pos, bool ext)
3789 {
3790 	static char * phy_types_str[32] = {
3791 		"SGMII",
3792 		"1000BASE-KX",
3793 		"10GBASE-KX4",
3794 		"10GBASE-KR",
3795 		"40GBASE-KR4",
3796 		"XAUI",
3797 		"XFI",
3798 		"SFI",
3799 		"XLAUI",
3800 		"XLPPI",
3801 		"40GBASE-CR4",
3802 		"10GBASE-CR1",
3803 		"SFP+ Active DA",
3804 		"QSFP+ Active DA",
3805 		"Reserved (14)",
3806 		"Reserved (15)",
3807 		"Reserved (16)",
3808 		"100BASE-TX",
3809 		"1000BASE-T",
3810 		"10GBASE-T",
3811 		"10GBASE-SR",
3812 		"10GBASE-LR",
3813 		"10GBASE-SFP+Cu",
3814 		"10GBASE-CR1",
3815 		"40GBASE-CR4",
3816 		"40GBASE-SR4",
3817 		"40GBASE-LR4",
3818 		"1000BASE-SX",
3819 		"1000BASE-LX",
3820 		"1000BASE-T Optical",
3821 		"20GBASE-KR2",
3822 		"Reserved (31)"
3823 	};
3824 	static char * ext_phy_types_str[8] = {
3825 		"25GBASE-KR",
3826 		"25GBASE-CR",
3827 		"25GBASE-SR",
3828 		"25GBASE-LR",
3829 		"25GBASE-AOC",
3830 		"25GBASE-ACC",
3831 		"Reserved (6)",
3832 		"Reserved (7)"
3833 	};
3834 
3835 	if (ext && bit_pos > 7) return "Invalid_Ext";
3836 	if (bit_pos > 31) return "Invalid";
3837 
3838 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3839 }
3840 
3841 /* TODO: ERJ: I don't this is necessary anymore. */
3842 int
3843 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3844 {
3845 	device_t dev = pf->dev;
3846 	struct i40e_hw *hw = &pf->hw;
3847 	struct i40e_aq_desc desc;
3848 	enum i40e_status_code status;
3849 
3850 	struct i40e_aqc_get_link_status *aq_link_status =
3851 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3852 
3853 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3854 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3855 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3856 	if (status) {
3857 		device_printf(dev,
3858 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3859 		    __func__, i40e_stat_str(hw, status),
3860 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3861 		return (EIO);
3862 	}
3863 
3864 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3865 	return (0);
3866 }
3867 
3868 static char *
3869 ixl_phy_type_string_ls(u8 val)
3870 {
3871 	if (val >= 0x1F)
3872 		return ixl_phy_type_string(val - 0x1F, true);
3873 	else
3874 		return ixl_phy_type_string(val, false);
3875 }
3876 
3877 static int
3878 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3879 {
3880 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3881 	device_t dev = pf->dev;
3882 	struct sbuf *buf;
3883 	int error = 0;
3884 
3885 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3886 	if (!buf) {
3887 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3888 		return (ENOMEM);
3889 	}
3890 
3891 	struct i40e_aqc_get_link_status link_status;
3892 	error = ixl_aq_get_link_status(pf, &link_status);
3893 	if (error) {
3894 		sbuf_delete(buf);
3895 		return (error);
3896 	}
3897 
3898 	sbuf_printf(buf, "\n"
3899 	    "PHY Type : 0x%02x<%s>\n"
3900 	    "Speed    : 0x%02x\n"
3901 	    "Link info: 0x%02x\n"
3902 	    "AN info  : 0x%02x\n"
3903 	    "Ext info : 0x%02x\n"
3904 	    "Loopback : 0x%02x\n"
3905 	    "Max Frame: %d\n"
3906 	    "Config   : 0x%02x\n"
3907 	    "Power    : 0x%02x",
3908 	    link_status.phy_type,
3909 	    ixl_phy_type_string_ls(link_status.phy_type),
3910 	    link_status.link_speed,
3911 	    link_status.link_info,
3912 	    link_status.an_info,
3913 	    link_status.ext_info,
3914 	    link_status.loopback,
3915 	    link_status.max_frame_size,
3916 	    link_status.config,
3917 	    link_status.power_desc);
3918 
3919 	error = sbuf_finish(buf);
3920 	if (error)
3921 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3922 
3923 	sbuf_delete(buf);
3924 	return (error);
3925 }
3926 
3927 static int
3928 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3929 {
3930 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3931 	struct i40e_hw *hw = &pf->hw;
3932 	device_t dev = pf->dev;
3933 	enum i40e_status_code status;
3934 	struct i40e_aq_get_phy_abilities_resp abilities;
3935 	struct sbuf *buf;
3936 	int error = 0;
3937 
3938 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3939 	if (!buf) {
3940 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3941 		return (ENOMEM);
3942 	}
3943 
3944 	status = i40e_aq_get_phy_capabilities(hw,
3945 	    FALSE, FALSE, &abilities, NULL);
3946 	if (status) {
3947 		device_printf(dev,
3948 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3949 		    __func__, i40e_stat_str(hw, status),
3950 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3951 		sbuf_delete(buf);
3952 		return (EIO);
3953 	}
3954 
3955 	sbuf_printf(buf, "\n"
3956 	    "PHY Type : %08x",
3957 	    abilities.phy_type);
3958 
3959 	if (abilities.phy_type != 0) {
3960 		sbuf_printf(buf, "<");
3961 		for (int i = 0; i < 32; i++)
3962 			if ((1 << i) & abilities.phy_type)
3963 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3964 		sbuf_printf(buf, ">\n");
3965 	}
3966 
3967 	sbuf_printf(buf, "PHY Ext  : %02x",
3968 	    abilities.phy_type_ext);
3969 
3970 	if (abilities.phy_type_ext != 0) {
3971 		sbuf_printf(buf, "<");
3972 		for (int i = 0; i < 4; i++)
3973 			if ((1 << i) & abilities.phy_type_ext)
3974 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3975 		sbuf_printf(buf, ">");
3976 	}
3977 	sbuf_printf(buf, "\n");
3978 
3979 	sbuf_printf(buf,
3980 	    "Speed    : %02x\n"
3981 	    "Abilities: %02x\n"
3982 	    "EEE cap  : %04x\n"
3983 	    "EEER reg : %08x\n"
3984 	    "D3 Lpan  : %02x\n"
3985 	    "ID       : %02x %02x %02x %02x\n"
3986 	    "ModType  : %02x %02x %02x\n"
3987 	    "ModType E: %01x\n"
3988 	    "FEC Cfg  : %02x\n"
3989 	    "Ext CC   : %02x",
3990 	    abilities.link_speed,
3991 	    abilities.abilities, abilities.eee_capability,
3992 	    abilities.eeer_val, abilities.d3_lpan,
3993 	    abilities.phy_id[0], abilities.phy_id[1],
3994 	    abilities.phy_id[2], abilities.phy_id[3],
3995 	    abilities.module_type[0], abilities.module_type[1],
3996 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3997 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3998 	    abilities.ext_comp_code);
3999 
4000 	error = sbuf_finish(buf);
4001 	if (error)
4002 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4003 
4004 	sbuf_delete(buf);
4005 	return (error);
4006 }
4007 
4008 static int
4009 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4010 {
4011 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4012 	struct ixl_vsi *vsi = &pf->vsi;
4013 	struct ixl_mac_filter *f;
4014 	device_t dev = pf->dev;
4015 	int error = 0, ftl_len = 0, ftl_counter = 0;
4016 
4017 	struct sbuf *buf;
4018 
4019 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4020 	if (!buf) {
4021 		device_printf(dev, "Could not allocate sbuf for output.\n");
4022 		return (ENOMEM);
4023 	}
4024 
4025 	sbuf_printf(buf, "\n");
4026 
4027 	/* Print MAC filters */
4028 	sbuf_printf(buf, "PF Filters:\n");
4029 	SLIST_FOREACH(f, &vsi->ftl, next)
4030 		ftl_len++;
4031 
4032 	if (ftl_len < 1)
4033 		sbuf_printf(buf, "(none)\n");
4034 	else {
4035 		SLIST_FOREACH(f, &vsi->ftl, next) {
4036 			sbuf_printf(buf,
4037 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4038 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4039 			/* don't print '\n' for last entry */
4040 			if (++ftl_counter != ftl_len)
4041 				sbuf_printf(buf, "\n");
4042 		}
4043 	}
4044 
4045 #ifdef PCI_IOV
4046 	/* TODO: Give each VF its own filter list sysctl */
4047 	struct ixl_vf *vf;
4048 	if (pf->num_vfs > 0) {
4049 		sbuf_printf(buf, "\n\n");
4050 		for (int i = 0; i < pf->num_vfs; i++) {
4051 			vf = &pf->vfs[i];
4052 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4053 				continue;
4054 
4055 			vsi = &vf->vsi;
4056 			ftl_len = 0, ftl_counter = 0;
4057 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4058 			SLIST_FOREACH(f, &vsi->ftl, next)
4059 				ftl_len++;
4060 
4061 			if (ftl_len < 1)
4062 				sbuf_printf(buf, "(none)\n");
4063 			else {
4064 				SLIST_FOREACH(f, &vsi->ftl, next) {
4065 					sbuf_printf(buf,
4066 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4067 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4068 				}
4069 			}
4070 		}
4071 	}
4072 #endif
4073 
4074 	error = sbuf_finish(buf);
4075 	if (error)
4076 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4077 	sbuf_delete(buf);
4078 
4079 	return (error);
4080 }
4081 
4082 #define IXL_SW_RES_SIZE 0x14
4083 int
4084 ixl_res_alloc_cmp(const void *a, const void *b)
4085 {
4086 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4087 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4088 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4089 
4090 	return ((int)one->resource_type - (int)two->resource_type);
4091 }
4092 
4093 /*
4094  * Longest string length: 25
4095  */
4096 char *
4097 ixl_switch_res_type_string(u8 type)
4098 {
4099 	// TODO: This should be changed to static const
4100 	char * ixl_switch_res_type_strings[0x14] = {
4101 		"VEB",
4102 		"VSI",
4103 		"Perfect Match MAC address",
4104 		"S-tag",
4105 		"(Reserved)",
4106 		"Multicast hash entry",
4107 		"Unicast hash entry",
4108 		"VLAN",
4109 		"VSI List entry",
4110 		"(Reserved)",
4111 		"VLAN Statistic Pool",
4112 		"Mirror Rule",
4113 		"Queue Set",
4114 		"Inner VLAN Forward filter",
4115 		"(Reserved)",
4116 		"Inner MAC",
4117 		"IP",
4118 		"GRE/VN1 Key",
4119 		"VN2 Key",
4120 		"Tunneling Port"
4121 	};
4122 
4123 	if (type < 0x14)
4124 		return ixl_switch_res_type_strings[type];
4125 	else
4126 		return "(Reserved)";
4127 }
4128 
4129 static int
4130 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4131 {
4132 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4133 	struct i40e_hw *hw = &pf->hw;
4134 	device_t dev = pf->dev;
4135 	struct sbuf *buf;
4136 	enum i40e_status_code status;
4137 	int error = 0;
4138 
4139 	u8 num_entries;
4140 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4141 
4142 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4143 	if (!buf) {
4144 		device_printf(dev, "Could not allocate sbuf for output.\n");
4145 		return (ENOMEM);
4146 	}
4147 
4148 	bzero(resp, sizeof(resp));
4149 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4150 				resp,
4151 				IXL_SW_RES_SIZE,
4152 				NULL);
4153 	if (status) {
4154 		device_printf(dev,
4155 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4156 		    __func__, i40e_stat_str(hw, status),
4157 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4158 		sbuf_delete(buf);
4159 		return (error);
4160 	}
4161 
4162 	/* Sort entries by type for display */
4163 	qsort(resp, num_entries,
4164 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4165 	    &ixl_res_alloc_cmp);
4166 
4167 	sbuf_cat(buf, "\n");
4168 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4169 	sbuf_printf(buf,
4170 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4171 	    "                          | (this)     | (all) | (this) | (all)       \n");
4172 	for (int i = 0; i < num_entries; i++) {
4173 		sbuf_printf(buf,
4174 		    "%25s | %10d   %5d   %6d   %12d",
4175 		    ixl_switch_res_type_string(resp[i].resource_type),
4176 		    resp[i].guaranteed,
4177 		    resp[i].total,
4178 		    resp[i].used,
4179 		    resp[i].total_unalloced);
4180 		if (i < num_entries - 1)
4181 			sbuf_cat(buf, "\n");
4182 	}
4183 
4184 	error = sbuf_finish(buf);
4185 	if (error)
4186 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4187 
4188 	sbuf_delete(buf);
4189 	return (error);
4190 }
4191 
4192 /*
4193 ** Caller must init and delete sbuf; this function will clear and
4194 ** finish it for caller.
4195 */
4196 char *
4197 ixl_switch_element_string(struct sbuf *s,
4198     struct i40e_aqc_switch_config_element_resp *element)
4199 {
4200 	sbuf_clear(s);
4201 
4202 	switch (element->element_type) {
4203 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4204 		sbuf_printf(s, "MAC %3d", element->element_info);
4205 		break;
4206 	case I40E_AQ_SW_ELEM_TYPE_PF:
4207 		sbuf_printf(s, "PF  %3d", element->element_info);
4208 		break;
4209 	case I40E_AQ_SW_ELEM_TYPE_VF:
4210 		sbuf_printf(s, "VF  %3d", element->element_info);
4211 		break;
4212 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4213 		sbuf_cat(s, "EMP");
4214 		break;
4215 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4216 		sbuf_cat(s, "BMC");
4217 		break;
4218 	case I40E_AQ_SW_ELEM_TYPE_PV:
4219 		sbuf_cat(s, "PV");
4220 		break;
4221 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4222 		sbuf_cat(s, "VEB");
4223 		break;
4224 	case I40E_AQ_SW_ELEM_TYPE_PA:
4225 		sbuf_cat(s, "PA");
4226 		break;
4227 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4228 		sbuf_printf(s, "VSI %3d", element->element_info);
4229 		break;
4230 	default:
4231 		sbuf_cat(s, "?");
4232 		break;
4233 	}
4234 
4235 	sbuf_finish(s);
4236 	return sbuf_data(s);
4237 }
4238 
4239 static int
4240 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4241 {
4242 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4243 	struct i40e_hw *hw = &pf->hw;
4244 	device_t dev = pf->dev;
4245 	struct sbuf *buf;
4246 	struct sbuf *nmbuf;
4247 	enum i40e_status_code status;
4248 	int error = 0;
4249 	u16 next = 0;
4250 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4251 
4252 	struct i40e_aqc_get_switch_config_resp *sw_config;
4253 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4254 
4255 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4256 	if (!buf) {
4257 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4258 		return (ENOMEM);
4259 	}
4260 
4261 	status = i40e_aq_get_switch_config(hw, sw_config,
4262 	    sizeof(aq_buf), &next, NULL);
4263 	if (status) {
4264 		device_printf(dev,
4265 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4266 		    __func__, i40e_stat_str(hw, status),
4267 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4268 		sbuf_delete(buf);
4269 		return error;
4270 	}
4271 	if (next)
4272 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4273 		    __func__, next);
4274 
4275 	nmbuf = sbuf_new_auto();
4276 	if (!nmbuf) {
4277 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4278 		sbuf_delete(buf);
4279 		return (ENOMEM);
4280 	}
4281 
4282 	sbuf_cat(buf, "\n");
4283 	/* Assuming <= 255 elements in switch */
4284 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4285 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4286 	/* Exclude:
4287 	** Revision -- all elements are revision 1 for now
4288 	*/
4289 	sbuf_printf(buf,
4290 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4291 	    "                |          |          | (uplink)\n");
4292 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4293 		// "%4d (%8s) | %8s   %8s   %#8x",
4294 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4295 		sbuf_cat(buf, " ");
4296 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4297 		    &sw_config->element[i]));
4298 		sbuf_cat(buf, " | ");
4299 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4300 		sbuf_cat(buf, "   ");
4301 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4302 		sbuf_cat(buf, "   ");
4303 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4304 		if (i < sw_config->header.num_reported - 1)
4305 			sbuf_cat(buf, "\n");
4306 	}
4307 	sbuf_delete(nmbuf);
4308 
4309 	error = sbuf_finish(buf);
4310 	if (error)
4311 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4312 
4313 	sbuf_delete(buf);
4314 
4315 	return (error);
4316 }
4317 
4318 static int
4319 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4320 {
4321 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4322 	struct i40e_hw *hw = &pf->hw;
4323 	device_t dev = pf->dev;
4324 	struct sbuf *buf;
4325 	int error = 0;
4326 	enum i40e_status_code status;
4327 	u32 reg;
4328 
4329 	struct i40e_aqc_get_set_rss_key_data key_data;
4330 
4331 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4332 	if (!buf) {
4333 		device_printf(dev, "Could not allocate sbuf for output.\n");
4334 		return (ENOMEM);
4335 	}
4336 
4337 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4338 
4339 	sbuf_cat(buf, "\n");
4340 	if (hw->mac.type == I40E_MAC_X722) {
4341 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4342 		if (status)
4343 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4344 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4345 	} else {
4346 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4347 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4348 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4349 		}
4350 	}
4351 
4352 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4353 
4354 	error = sbuf_finish(buf);
4355 	if (error)
4356 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4357 	sbuf_delete(buf);
4358 
4359 	return (error);
4360 }
4361 
4362 static void
4363 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4364 {
4365 	int i, j, k, width;
4366 	char c;
4367 
4368 	if (length < 1 || buf == NULL) return;
4369 
4370 	int byte_stride = 16;
4371 	int lines = length / byte_stride;
4372 	int rem = length % byte_stride;
4373 	if (rem > 0)
4374 		lines++;
4375 
4376 	for (i = 0; i < lines; i++) {
4377 		width = (rem > 0 && i == lines - 1)
4378 		    ? rem : byte_stride;
4379 
4380 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4381 
4382 		for (j = 0; j < width; j++)
4383 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4384 
4385 		if (width < byte_stride) {
4386 			for (k = 0; k < (byte_stride - width); k++)
4387 				sbuf_printf(sb, "   ");
4388 		}
4389 
4390 		if (!text) {
4391 			sbuf_printf(sb, "\n");
4392 			continue;
4393 		}
4394 
4395 		for (j = 0; j < width; j++) {
4396 			c = (char)buf[i * byte_stride + j];
4397 			if (c < 32 || c > 126)
4398 				sbuf_printf(sb, ".");
4399 			else
4400 				sbuf_printf(sb, "%c", c);
4401 
4402 			if (j == width - 1)
4403 				sbuf_printf(sb, "\n");
4404 		}
4405 	}
4406 }
4407 
4408 static int
4409 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4410 {
4411 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4412 	struct i40e_hw *hw = &pf->hw;
4413 	device_t dev = pf->dev;
4414 	struct sbuf *buf;
4415 	int error = 0;
4416 	enum i40e_status_code status;
4417 	u8 hlut[512];
4418 	u32 reg;
4419 
4420 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4421 	if (!buf) {
4422 		device_printf(dev, "Could not allocate sbuf for output.\n");
4423 		return (ENOMEM);
4424 	}
4425 
4426 	bzero(hlut, sizeof(hlut));
4427 	sbuf_cat(buf, "\n");
4428 	if (hw->mac.type == I40E_MAC_X722) {
4429 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4430 		if (status)
4431 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4432 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4433 	} else {
4434 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4435 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4436 			bcopy(&reg, &hlut[i << 2], 4);
4437 		}
4438 	}
4439 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4440 
4441 	error = sbuf_finish(buf);
4442 	if (error)
4443 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4444 	sbuf_delete(buf);
4445 
4446 	return (error);
4447 }
4448 
4449 static int
4450 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4451 {
4452 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4453 	struct i40e_hw *hw = &pf->hw;
4454 	u64 hena;
4455 
4456 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4457 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4458 
4459 	return sysctl_handle_long(oidp, NULL, hena, req);
4460 }
4461 
4462 /*
4463  * Sysctl to disable firmware's link management
4464  *
4465  * 1 - Disable link management on this port
4466  * 0 - Re-enable link management
4467  *
4468  * On normal NVMs, firmware manages link by default.
4469  */
4470 static int
4471 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4472 {
4473 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4474 	struct i40e_hw *hw = &pf->hw;
4475 	device_t dev = pf->dev;
4476 	int requested_mode = -1;
4477 	enum i40e_status_code status = 0;
4478 	int error = 0;
4479 
4480 	/* Read in new mode */
4481 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4482 	if ((error) || (req->newptr == NULL))
4483 		return (error);
4484 	/* Check for sane value */
4485 	if (requested_mode < 0 || requested_mode > 1) {
4486 		device_printf(dev, "Valid modes are 0 or 1\n");
4487 		return (EINVAL);
4488 	}
4489 
4490 	/* Set new mode */
4491 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4492 	if (status) {
4493 		device_printf(dev,
4494 		    "%s: Error setting new phy debug mode %s,"
4495 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4496 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4497 		return (EIO);
4498 	}
4499 
4500 	return (0);
4501 }
4502 
4503 /*
4504  * Read some diagnostic data from an SFP module
4505  * Bytes 96-99, 102-105 from device address 0xA2
4506  */
4507 static int
4508 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4509 {
4510 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4511 	device_t dev = pf->dev;
4512 	struct sbuf *sbuf;
4513 	int error = 0;
4514 	u8 output;
4515 
4516 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4517 	if (error) {
4518 		device_printf(dev, "Error reading from i2c\n");
4519 		return (error);
4520 	}
4521 	if (output != 0x3) {
4522 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4523 		return (EIO);
4524 	}
4525 
4526 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4527 	if (!(output & 0x60)) {
4528 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4529 		return (EIO);
4530 	}
4531 
4532 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4533 
4534 	for (u8 offset = 96; offset < 100; offset++) {
4535 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4536 		sbuf_printf(sbuf, "%02X ", output);
4537 	}
4538 	for (u8 offset = 102; offset < 106; offset++) {
4539 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4540 		sbuf_printf(sbuf, "%02X ", output);
4541 	}
4542 
4543 	sbuf_finish(sbuf);
4544 	sbuf_delete(sbuf);
4545 
4546 	return (0);
4547 }
4548 
4549 /*
4550  * Sysctl to read a byte from I2C bus.
4551  *
4552  * Input: 32-bit value:
4553  * 	bits 0-7:   device address (0xA0 or 0xA2)
4554  * 	bits 8-15:  offset (0-255)
4555  *	bits 16-31: unused
4556  * Output: 8-bit value read
4557  */
4558 static int
4559 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4560 {
4561 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4562 	device_t dev = pf->dev;
4563 	int input = -1, error = 0;
4564 	u8 dev_addr, offset, output;
4565 
4566 	/* Read in I2C read parameters */
4567 	error = sysctl_handle_int(oidp, &input, 0, req);
4568 	if ((error) || (req->newptr == NULL))
4569 		return (error);
4570 	/* Validate device address */
4571 	dev_addr = input & 0xFF;
4572 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4573 		return (EINVAL);
4574 	}
4575 	offset = (input >> 8) & 0xFF;
4576 
4577 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4578 	if (error)
4579 		return (error);
4580 
4581 	device_printf(dev, "%02X\n", output);
4582 	return (0);
4583 }
4584 
4585 /*
4586  * Sysctl to write a byte to the I2C bus.
4587  *
4588  * Input: 32-bit value:
4589  * 	bits 0-7:   device address (0xA0 or 0xA2)
4590  * 	bits 8-15:  offset (0-255)
4591  *	bits 16-23: value to write
4592  *	bits 24-31: unused
4593  * Output: 8-bit value written
4594  */
4595 static int
4596 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4597 {
4598 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4599 	device_t dev = pf->dev;
4600 	int input = -1, error = 0;
4601 	u8 dev_addr, offset, value;
4602 
4603 	/* Read in I2C write parameters */
4604 	error = sysctl_handle_int(oidp, &input, 0, req);
4605 	if ((error) || (req->newptr == NULL))
4606 		return (error);
4607 	/* Validate device address */
4608 	dev_addr = input & 0xFF;
4609 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4610 		return (EINVAL);
4611 	}
4612 	offset = (input >> 8) & 0xFF;
4613 	value = (input >> 16) & 0xFF;
4614 
4615 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4616 	if (error)
4617 		return (error);
4618 
4619 	device_printf(dev, "%02X written\n", value);
4620 	return (0);
4621 }
4622 
4623 static int
4624 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4625     u8 bit_pos, int *is_set)
4626 {
4627 	device_t dev = pf->dev;
4628 	struct i40e_hw *hw = &pf->hw;
4629 	enum i40e_status_code status;
4630 
4631 	status = i40e_aq_get_phy_capabilities(hw,
4632 	    FALSE, FALSE, abilities, NULL);
4633 	if (status) {
4634 		device_printf(dev,
4635 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4636 		    __func__, i40e_stat_str(hw, status),
4637 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4638 		return (EIO);
4639 	}
4640 
4641 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4642 	return (0);
4643 }
4644 
4645 static int
4646 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4647     u8 bit_pos, int set)
4648 {
4649 	device_t dev = pf->dev;
4650 	struct i40e_hw *hw = &pf->hw;
4651 	struct i40e_aq_set_phy_config config;
4652 	enum i40e_status_code status;
4653 
4654 	/* Set new PHY config */
4655 	memset(&config, 0, sizeof(config));
4656 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4657 	if (set)
4658 		config.fec_config |= bit_pos;
4659 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4660 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4661 		config.phy_type = abilities->phy_type;
4662 		config.phy_type_ext = abilities->phy_type_ext;
4663 		config.link_speed = abilities->link_speed;
4664 		config.eee_capability = abilities->eee_capability;
4665 		config.eeer = abilities->eeer_val;
4666 		config.low_power_ctrl = abilities->d3_lpan;
4667 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4668 
4669 		if (status) {
4670 			device_printf(dev,
4671 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4672 			    __func__, i40e_stat_str(hw, status),
4673 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4674 			return (EIO);
4675 		}
4676 	}
4677 
4678 	return (0);
4679 }
4680 
4681 static int
4682 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4683 {
4684 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4685 	int mode, error = 0;
4686 
4687 	struct i40e_aq_get_phy_abilities_resp abilities;
4688 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4689 	if (error)
4690 		return (error);
4691 	/* Read in new mode */
4692 	error = sysctl_handle_int(oidp, &mode, 0, req);
4693 	if ((error) || (req->newptr == NULL))
4694 		return (error);
4695 
4696 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4697 }
4698 
4699 static int
4700 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4701 {
4702 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4703 	int mode, error = 0;
4704 
4705 	struct i40e_aq_get_phy_abilities_resp abilities;
4706 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4707 	if (error)
4708 		return (error);
4709 	/* Read in new mode */
4710 	error = sysctl_handle_int(oidp, &mode, 0, req);
4711 	if ((error) || (req->newptr == NULL))
4712 		return (error);
4713 
4714 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4715 }
4716 
4717 static int
4718 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4719 {
4720 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4721 	int mode, error = 0;
4722 
4723 	struct i40e_aq_get_phy_abilities_resp abilities;
4724 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4725 	if (error)
4726 		return (error);
4727 	/* Read in new mode */
4728 	error = sysctl_handle_int(oidp, &mode, 0, req);
4729 	if ((error) || (req->newptr == NULL))
4730 		return (error);
4731 
4732 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4733 }
4734 
4735 static int
4736 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4737 {
4738 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4739 	int mode, error = 0;
4740 
4741 	struct i40e_aq_get_phy_abilities_resp abilities;
4742 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4743 	if (error)
4744 		return (error);
4745 	/* Read in new mode */
4746 	error = sysctl_handle_int(oidp, &mode, 0, req);
4747 	if ((error) || (req->newptr == NULL))
4748 		return (error);
4749 
4750 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4751 }
4752 
4753 static int
4754 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4755 {
4756 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4757 	int mode, error = 0;
4758 
4759 	struct i40e_aq_get_phy_abilities_resp abilities;
4760 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4761 	if (error)
4762 		return (error);
4763 	/* Read in new mode */
4764 	error = sysctl_handle_int(oidp, &mode, 0, req);
4765 	if ((error) || (req->newptr == NULL))
4766 		return (error);
4767 
4768 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4769 }
4770 
4771 static int
4772 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4773 {
4774 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4775 	struct i40e_hw *hw = &pf->hw;
4776 	device_t dev = pf->dev;
4777 	struct sbuf *buf;
4778 	int error = 0;
4779 	enum i40e_status_code status;
4780 
4781 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4782 	if (!buf) {
4783 		device_printf(dev, "Could not allocate sbuf for output.\n");
4784 		return (ENOMEM);
4785 	}
4786 
4787 	u8 *final_buff;
4788 	/* This amount is only necessary if reading the entire cluster into memory */
4789 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4790 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4791 	if (final_buff == NULL) {
4792 		device_printf(dev, "Could not allocate memory for output.\n");
4793 		goto out;
4794 	}
4795 	int final_buff_len = 0;
4796 
4797 	u8 cluster_id = 1;
4798 	bool more = true;
4799 
4800 	u8 dump_buf[4096];
4801 	u16 curr_buff_size = 4096;
4802 	u8 curr_next_table = 0;
4803 	u32 curr_next_index = 0;
4804 
4805 	u16 ret_buff_size;
4806 	u8 ret_next_table;
4807 	u32 ret_next_index;
4808 
4809 	sbuf_cat(buf, "\n");
4810 
4811 	while (more) {
4812 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4813 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4814 		if (status) {
4815 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4816 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4817 			goto free_out;
4818 		}
4819 
4820 		/* copy info out of temp buffer */
4821 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4822 		final_buff_len += ret_buff_size;
4823 
4824 		if (ret_next_table != curr_next_table) {
4825 			/* We're done with the current table; we can dump out read data. */
4826 			sbuf_printf(buf, "%d:", curr_next_table);
4827 			int bytes_printed = 0;
4828 			while (bytes_printed <= final_buff_len) {
4829 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4830 				bytes_printed += 16;
4831 			}
4832 				sbuf_cat(buf, "\n");
4833 
4834 			/* The entire cluster has been read; we're finished */
4835 			if (ret_next_table == 0xFF)
4836 				break;
4837 
4838 			/* Otherwise clear the output buffer and continue reading */
4839 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4840 			final_buff_len = 0;
4841 		}
4842 
4843 		if (ret_next_index == 0xFFFFFFFF)
4844 			ret_next_index = 0;
4845 
4846 		bzero(dump_buf, sizeof(dump_buf));
4847 		curr_next_table = ret_next_table;
4848 		curr_next_index = ret_next_index;
4849 	}
4850 
4851 free_out:
4852 	free(final_buff, M_DEVBUF);
4853 out:
4854 	error = sbuf_finish(buf);
4855 	if (error)
4856 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4857 	sbuf_delete(buf);
4858 
4859 	return (error);
4860 }
4861 
4862 static int
4863 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4864 {
4865 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4866 	struct i40e_hw *hw = &pf->hw;
4867 	device_t dev = pf->dev;
4868 	int error = 0;
4869 	int state, new_state;
4870 	enum i40e_status_code status;
4871 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4872 
4873 	/* Read in new mode */
4874 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4875 	if ((error) || (req->newptr == NULL))
4876 		return (error);
4877 
4878 	/* Already in requested state */
4879 	if (new_state == state)
4880 		return (error);
4881 
4882 	if (new_state == 0) {
4883 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4884 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4885 			return (EINVAL);
4886 		}
4887 
4888 		if (pf->hw.aq.api_maj_ver < 1 ||
4889 		    (pf->hw.aq.api_maj_ver == 1 &&
4890 		    pf->hw.aq.api_min_ver < 7)) {
4891 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4892 			return (EINVAL);
4893 		}
4894 
4895 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4896 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4897 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4898 	} else {
4899 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4900 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4901 			device_printf(dev, "FW LLDP agent is already running\n");
4902 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4903 	}
4904 
4905 	return (0);
4906 }
4907 
4908 /*
4909  * Get FW LLDP Agent status
4910  */
4911 int
4912 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4913 {
4914 	enum i40e_status_code ret = I40E_SUCCESS;
4915 	struct i40e_lldp_variables lldp_cfg;
4916 	struct i40e_hw *hw = &pf->hw;
4917 	u8 adminstatus = 0;
4918 
4919 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4920 	if (ret)
4921 		return ret;
4922 
4923 	/* Get the LLDP AdminStatus for the current port */
4924 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4925 	adminstatus &= 0xf;
4926 
4927 	/* Check if LLDP agent is disabled */
4928 	if (!adminstatus) {
4929 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4930 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4931 	} else
4932 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4933 
4934 	return (0);
4935 }
4936 
4937 int
4938 ixl_attach_get_link_status(struct ixl_pf *pf)
4939 {
4940 	struct i40e_hw *hw = &pf->hw;
4941 	device_t dev = pf->dev;
4942 	int error = 0;
4943 
4944 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4945 	    (hw->aq.fw_maj_ver < 4)) {
4946 		i40e_msec_delay(75);
4947 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4948 		if (error) {
4949 			device_printf(dev, "link restart failed, aq_err=%d\n",
4950 			    pf->hw.aq.asq_last_status);
4951 			return error;
4952 		}
4953 	}
4954 
4955 	/* Determine link state */
4956 	hw->phy.get_link_info = TRUE;
4957 	i40e_get_link_status(hw, &pf->link_up);
4958 	return (0);
4959 }
4960 
4961 static int
4962 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4963 {
4964 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4965 	int requested = 0, error = 0;
4966 
4967 	/* Read in new mode */
4968 	error = sysctl_handle_int(oidp, &requested, 0, req);
4969 	if ((error) || (req->newptr == NULL))
4970 		return (error);
4971 
4972 	/* Initiate the PF reset later in the admin task */
4973 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4974 
4975 	return (error);
4976 }
4977 
4978 static int
4979 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4980 {
4981 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4982 	struct i40e_hw *hw = &pf->hw;
4983 	int requested = 0, error = 0;
4984 
4985 	/* Read in new mode */
4986 	error = sysctl_handle_int(oidp, &requested, 0, req);
4987 	if ((error) || (req->newptr == NULL))
4988 		return (error);
4989 
4990 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4991 
4992 	return (error);
4993 }
4994 
4995 static int
4996 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4997 {
4998 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4999 	struct i40e_hw *hw = &pf->hw;
5000 	int requested = 0, error = 0;
5001 
5002 	/* Read in new mode */
5003 	error = sysctl_handle_int(oidp, &requested, 0, req);
5004 	if ((error) || (req->newptr == NULL))
5005 		return (error);
5006 
5007 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5008 
5009 	return (error);
5010 }
5011 
5012 static int
5013 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5014 {
5015 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5016 	struct i40e_hw *hw = &pf->hw;
5017 	int requested = 0, error = 0;
5018 
5019 	/* Read in new mode */
5020 	error = sysctl_handle_int(oidp, &requested, 0, req);
5021 	if ((error) || (req->newptr == NULL))
5022 		return (error);
5023 
5024 	/* TODO: Find out how to bypass this */
5025 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5026 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5027 		error = EINVAL;
5028 	} else
5029 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5030 
5031 	return (error);
5032 }
5033 
5034 /*
5035  * Print out mapping of TX queue indexes and Rx queue indexes
5036  * to MSI-X vectors.
5037  */
5038 static int
5039 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5040 {
5041 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5042 	struct ixl_vsi *vsi = &pf->vsi;
5043 	device_t dev = pf->dev;
5044 	struct sbuf *buf;
5045 	int error = 0;
5046 
5047 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5048 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5049 
5050 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5051 	if (!buf) {
5052 		device_printf(dev, "Could not allocate sbuf for output.\n");
5053 		return (ENOMEM);
5054 	}
5055 
5056 	sbuf_cat(buf, "\n");
5057 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5058 		rx_que = &vsi->rx_queues[i];
5059 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5060 	}
5061 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5062 		tx_que = &vsi->tx_queues[i];
5063 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5064 	}
5065 
5066 	error = sbuf_finish(buf);
5067 	if (error)
5068 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5069 	sbuf_delete(buf);
5070 
5071 	return (error);
5072 }
5073