xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision b027d6545b60535c1c01508bc6f1254b2a2c3b94)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static void	ixl_del_default_hw_filters(struct ixl_vsi *);
50 
51 /* Sysctls */
52 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
59 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
60 
61 /* Debug Sysctls */
62 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 #ifdef IXL_DEBUG
87 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
88 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
89 #endif
90 
91 #ifdef IXL_IW
92 extern int ixl_enable_iwarp;
93 extern int ixl_limit_iwarp_msix;
94 #endif
95 
96 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
97     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 
99 const char * const ixl_fc_string[6] = {
100 	"None",
101 	"Rx",
102 	"Tx",
103 	"Full",
104 	"Priority",
105 	"Default"
106 };
107 
108 static char *ixl_fec_string[3] = {
109        "CL108 RS-FEC",
110        "CL74 FC-FEC/BASE-R",
111        "None"
112 };
113 
114 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
115 
116 void
117 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
118 {
119 	va_list args;
120 
121 	if (!(mask & pf->dbg_mask))
122 		return;
123 
124 	/* Re-implement device_printf() */
125 	device_print_prettyname(pf->dev);
126 	va_start(args, fmt);
127 	vprintf(fmt, args);
128 	va_end(args);
129 }
130 
131 /*
132 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
133 */
134 void
135 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
136 {
137 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
138 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
139 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
140 
141 	sbuf_printf(buf,
142 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
143 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
144 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
145 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
146 	    IXL_NVM_VERSION_HI_SHIFT,
147 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
148 	    IXL_NVM_VERSION_LO_SHIFT,
149 	    hw->nvm.eetrack,
150 	    oem_ver, oem_build, oem_patch);
151 }
152 
153 void
154 ixl_print_nvm_version(struct ixl_pf *pf)
155 {
156 	struct i40e_hw *hw = &pf->hw;
157 	device_t dev = pf->dev;
158 	struct sbuf *sbuf;
159 
160 	sbuf = sbuf_new_auto();
161 	ixl_nvm_version_str(hw, sbuf);
162 	sbuf_finish(sbuf);
163 	device_printf(dev, "%s\n", sbuf_data(sbuf));
164 	sbuf_delete(sbuf);
165 }
166 
167 static void
168 ixl_configure_tx_itr(struct ixl_pf *pf)
169 {
170 	struct i40e_hw		*hw = &pf->hw;
171 	struct ixl_vsi		*vsi = &pf->vsi;
172 	struct ixl_tx_queue	*que = vsi->tx_queues;
173 
174 	vsi->tx_itr_setting = pf->tx_itr;
175 
176 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
177 		struct tx_ring	*txr = &que->txr;
178 
179 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
180 		    vsi->tx_itr_setting);
181 		txr->itr = vsi->tx_itr_setting;
182 		txr->latency = IXL_AVE_LATENCY;
183 	}
184 }
185 
186 static void
187 ixl_configure_rx_itr(struct ixl_pf *pf)
188 {
189 	struct i40e_hw		*hw = &pf->hw;
190 	struct ixl_vsi		*vsi = &pf->vsi;
191 	struct ixl_rx_queue	*que = vsi->rx_queues;
192 
193 	vsi->rx_itr_setting = pf->rx_itr;
194 
195 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
196 		struct rx_ring 	*rxr = &que->rxr;
197 
198 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
199 		    vsi->rx_itr_setting);
200 		rxr->itr = vsi->rx_itr_setting;
201 		rxr->latency = IXL_AVE_LATENCY;
202 	}
203 }
204 
205 /*
206  * Write PF ITR values to queue ITR registers.
207  */
208 void
209 ixl_configure_itr(struct ixl_pf *pf)
210 {
211 	ixl_configure_tx_itr(pf);
212 	ixl_configure_rx_itr(pf);
213 }
214 
215 /*********************************************************************
216  *
217  *  Get the hardware capabilities
218  *
219  **********************************************************************/
220 
221 int
222 ixl_get_hw_capabilities(struct ixl_pf *pf)
223 {
224 	struct i40e_aqc_list_capabilities_element_resp *buf;
225 	struct i40e_hw	*hw = &pf->hw;
226 	device_t 	dev = pf->dev;
227 	enum i40e_status_code status;
228 	int len, i2c_intfc_num;
229 	bool again = TRUE;
230 	u16 needed;
231 
232 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
233 retry:
234 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
235 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
236 		device_printf(dev, "Unable to allocate cap memory\n");
237                 return (ENOMEM);
238 	}
239 
240 	/* This populates the hw struct */
241         status = i40e_aq_discover_capabilities(hw, buf, len,
242 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
243 	free(buf, M_DEVBUF);
244 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
245 	    (again == TRUE)) {
246 		/* retry once with a larger buffer */
247 		again = FALSE;
248 		len = needed;
249 		goto retry;
250 	} else if (status != I40E_SUCCESS) {
251 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
252 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
253 		return (ENODEV);
254 	}
255 
256 	/*
257 	 * Some devices have both MDIO and I2C; since this isn't reported
258 	 * by the FW, check registers to see if an I2C interface exists.
259 	 */
260 	i2c_intfc_num = ixl_find_i2c_interface(pf);
261 	if (i2c_intfc_num != -1)
262 		pf->has_i2c = true;
263 
264 	/* Determine functions to use for driver I2C accesses */
265 	switch (pf->i2c_access_method) {
266 	case 0: {
267 		if (hw->mac.type == I40E_MAC_XL710 &&
268 		    hw->aq.api_maj_ver == 1 &&
269 		    hw->aq.api_min_ver >= 7) {
270 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
271 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
272 		} else {
273 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
274 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
275 		}
276 		break;
277 	}
278 	case 3:
279 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
280 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
281 		break;
282 	case 2:
283 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
284 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
285 		break;
286 	case 1:
287 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
288 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
289 		break;
290 	default:
291 		/* Should not happen */
292 		device_printf(dev, "Error setting I2C access functions\n");
293 		break;
294 	}
295 
296 	/* Print a subset of the capability information. */
297 	device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
298 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
299 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
300 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
301 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
302 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
303 	    "MDIO shared");
304 
305 	return (0);
306 }
307 
308 /* For the set_advertise sysctl */
309 void
310 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
311 {
312 	device_t dev = pf->dev;
313 	int err;
314 
315 	/* Make sure to initialize the device to the complete list of
316 	 * supported speeds on driver load, to ensure unloading and
317 	 * reloading the driver will restore this value.
318 	 */
319 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
320 	if (err) {
321 		/* Non-fatal error */
322 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
323 			      __func__, err);
324 		return;
325 	}
326 
327 	pf->advertised_speed =
328 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
329 }
330 
331 int
332 ixl_teardown_hw_structs(struct ixl_pf *pf)
333 {
334 	enum i40e_status_code status = 0;
335 	struct i40e_hw *hw = &pf->hw;
336 	device_t dev = pf->dev;
337 
338 	/* Shutdown LAN HMC */
339 	if (hw->hmc.hmc_obj) {
340 		status = i40e_shutdown_lan_hmc(hw);
341 		if (status) {
342 			device_printf(dev,
343 			    "init: LAN HMC shutdown failure; status %s\n",
344 			    i40e_stat_str(hw, status));
345 			goto err_out;
346 		}
347 	}
348 
349 	/* Shutdown admin queue */
350 	ixl_disable_intr0(hw);
351 	status = i40e_shutdown_adminq(hw);
352 	if (status)
353 		device_printf(dev,
354 		    "init: Admin Queue shutdown failure; status %s\n",
355 		    i40e_stat_str(hw, status));
356 
357 err_out:
358 	return (status);
359 }
360 
361 int
362 ixl_reset(struct ixl_pf *pf)
363 {
364 	struct i40e_hw *hw = &pf->hw;
365 	device_t dev = pf->dev;
366 	u32 reg;
367 	int error = 0;
368 
369 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
370 	i40e_clear_hw(hw);
371 	error = i40e_pf_reset(hw);
372 	if (error) {
373 		device_printf(dev, "init: PF reset failure\n");
374 		error = EIO;
375 		goto err_out;
376 	}
377 
378 	error = i40e_init_adminq(hw);
379 	if (error) {
380 		device_printf(dev, "init: Admin queue init failure;"
381 		    " status code %d\n", error);
382 		error = EIO;
383 		goto err_out;
384 	}
385 
386 	i40e_clear_pxe_mode(hw);
387 
388 #if 0
389 	error = ixl_get_hw_capabilities(pf);
390 	if (error) {
391 		device_printf(dev, "init: Error retrieving HW capabilities;"
392 		    " status code %d\n", error);
393 		goto err_out;
394 	}
395 
396 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
397 	    hw->func_caps.num_rx_qp, 0, 0);
398 	if (error) {
399 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
400 		    error);
401 		error = EIO;
402 		goto err_out;
403 	}
404 
405 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
406 	if (error) {
407 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
408 		    error);
409 		error = EIO;
410 		goto err_out;
411 	}
412 
413 	// XXX: possible fix for panic, but our failure recovery is still broken
414 	error = ixl_switch_config(pf);
415 	if (error) {
416 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
417 		     error);
418 		goto err_out;
419 	}
420 
421 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
422 	    NULL);
423         if (error) {
424 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
425 		    " aq_err %d\n", error, hw->aq.asq_last_status);
426 		error = EIO;
427 		goto err_out;
428 	}
429 
430 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
431 	if (error) {
432 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
433 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
434 		goto err_out;
435 	}
436 
437 	// XXX: (Rebuild VSIs?)
438 
439 	/* Firmware delay workaround */
440 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
441 	    (hw->aq.fw_maj_ver < 4)) {
442 		i40e_msec_delay(75);
443 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
444 		if (error) {
445 			device_printf(dev, "init: link restart failed, aq_err %d\n",
446 			    hw->aq.asq_last_status);
447 			goto err_out;
448 		}
449 	}
450 
451 
452 	/* Re-enable admin queue interrupt */
453 	if (pf->msix > 1) {
454 		ixl_configure_intr0_msix(pf);
455 		ixl_enable_intr0(hw);
456 	}
457 
458 err_out:
459 	return (error);
460 #endif
461 	// TODO: Fix second parameter
462 	ixl_rebuild_hw_structs_after_reset(pf, false);
463 
464 	/* The PF reset should have cleared any critical errors */
465 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
466 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
467 
468 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
469 	reg |= IXL_ICR0_CRIT_ERR_MASK;
470 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
471 
472  err_out:
473  	return (error);
474 }
475 
476 /*
477  * TODO: Make sure this properly handles admin queue / single rx queue intr
478  */
479 int
480 ixl_intr(void *arg)
481 {
482 	struct ixl_pf		*pf = arg;
483 	struct i40e_hw		*hw =  &pf->hw;
484 	struct ixl_vsi		*vsi = &pf->vsi;
485 	struct ixl_rx_queue	*que = vsi->rx_queues;
486         u32			icr0;
487 
488 	// pf->admin_irq++
489 	++que->irqs;
490 
491 // TODO: Check against proper field
492 #if 0
493 	/* Clear PBA at start of ISR if using legacy interrupts */
494 	if (pf->msix == 0)
495 		wr32(hw, I40E_PFINT_DYN_CTL0,
496 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
497 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
498 #endif
499 
500 	icr0 = rd32(hw, I40E_PFINT_ICR0);
501 
502 
503 #ifdef PCI_IOV
504 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
505 		iflib_iov_intr_deferred(vsi->ctx);
506 #endif
507 
508 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
509 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
510 		iflib_admin_intr_deferred(vsi->ctx);
511 
512 	// TODO: Is intr0 enabled somewhere else?
513 	ixl_enable_intr0(hw);
514 
515 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
516 		return (FILTER_SCHEDULE_THREAD);
517 	else
518 		return (FILTER_HANDLED);
519 }
520 
521 
522 /*********************************************************************
523  *
524  *  MSIX VSI Interrupt Service routine
525  *
526  **********************************************************************/
527 int
528 ixl_msix_que(void *arg)
529 {
530 	struct ixl_rx_queue *que = arg;
531 
532 	++que->irqs;
533 
534 	ixl_set_queue_rx_itr(que);
535 	// ixl_set_queue_tx_itr(que);
536 
537 	return (FILTER_SCHEDULE_THREAD);
538 }
539 
540 
541 /*********************************************************************
542  *
543  *  MSIX Admin Queue Interrupt Service routine
544  *
545  **********************************************************************/
546 int
547 ixl_msix_adminq(void *arg)
548 {
549 	struct ixl_pf	*pf = arg;
550 	struct i40e_hw	*hw = &pf->hw;
551 	device_t	dev = pf->dev;
552 	u32		reg, mask, rstat_reg;
553 	bool		do_task = FALSE;
554 
555 	DDPRINTF(dev, "begin");
556 
557 	++pf->admin_irq;
558 
559 	reg = rd32(hw, I40E_PFINT_ICR0);
560 	// For masking off interrupt causes that need to be handled before
561 	// they can be re-enabled
562 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
563 
564 	/* Check on the cause */
565 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
566 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
567 		do_task = TRUE;
568 	}
569 
570 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
571 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
572 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
573 		do_task = TRUE;
574 	}
575 
576 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
577 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
578 		device_printf(dev, "Reset Requested!\n");
579 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
580 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
581 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
582 		device_printf(dev, "Reset type: ");
583 		switch (rstat_reg) {
584 		/* These others might be handled similarly to an EMPR reset */
585 		case I40E_RESET_CORER:
586 			printf("CORER\n");
587 			break;
588 		case I40E_RESET_GLOBR:
589 			printf("GLOBR\n");
590 			break;
591 		case I40E_RESET_EMPR:
592 			printf("EMPR\n");
593 			break;
594 		default:
595 			printf("POR\n");
596 			break;
597 		}
598 		/* overload admin queue task to check reset progress */
599 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
600 		do_task = TRUE;
601 	}
602 
603 	/*
604 	 * PE / PCI / ECC exceptions are all handled in the same way:
605 	 * mask out these three causes, then request a PF reset
606 	 *
607 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
608 	 */
609 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
610  		device_printf(dev, "ECC Error detected!\n");
611 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
612 		device_printf(dev, "PCI Exception detected!\n");
613 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
614 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
615 	/* Checks against the conditions above */
616 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
617 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
618 		atomic_set_32(&pf->state,
619 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
620 		do_task = TRUE;
621 	}
622 
623 	// TODO: Linux driver never re-enables this interrupt once it has been detected
624 	// Then what is supposed to happen? A PF reset? Should it never happen?
625 	// TODO: Parse out this error into something human readable
626 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
627 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
628 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
629 			device_printf(dev, "HMC Error detected!\n");
630 			device_printf(dev, "INFO 0x%08x\n", reg);
631 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
632 			device_printf(dev, "DATA 0x%08x\n", reg);
633 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
634 		}
635 	}
636 
637 #ifdef PCI_IOV
638 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
639 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
640 		atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
641 		do_task = TRUE;
642 	}
643 #endif
644 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
645 
646 	if (do_task)
647 		return (FILTER_SCHEDULE_THREAD);
648 	else
649 		return (FILTER_HANDLED);
650 }
651 
652 /*********************************************************************
653  * 	Filter Routines
654  *
655  *	Routines for multicast and vlan filter management.
656  *
657  *********************************************************************/
658 void
659 ixl_add_multi(struct ixl_vsi *vsi)
660 {
661 	struct	ifmultiaddr	*ifma;
662 	struct ifnet		*ifp = vsi->ifp;
663 	struct i40e_hw		*hw = vsi->hw;
664 	int			mcnt = 0, flags;
665 
666 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
667 
668 	if_maddr_rlock(ifp);
669 	/*
670 	** First just get a count, to decide if we
671 	** we simply use multicast promiscuous.
672 	*/
673 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
674 		if (ifma->ifma_addr->sa_family != AF_LINK)
675 			continue;
676 		mcnt++;
677 	}
678 	if_maddr_runlock(ifp);
679 
680 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
681 		/* delete existing MC filters */
682 		ixl_del_hw_filters(vsi, mcnt);
683 		i40e_aq_set_vsi_multicast_promiscuous(hw,
684 		    vsi->seid, TRUE, NULL);
685 		return;
686 	}
687 
688 	mcnt = 0;
689 	if_maddr_rlock(ifp);
690 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
691 		if (ifma->ifma_addr->sa_family != AF_LINK)
692 			continue;
693 		ixl_add_mc_filter(vsi,
694 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
695 		mcnt++;
696 	}
697 	if_maddr_runlock(ifp);
698 	if (mcnt > 0) {
699 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
700 		ixl_add_hw_filters(vsi, flags, mcnt);
701 	}
702 
703 	IOCTL_DEBUGOUT("ixl_add_multi: end");
704 }
705 
706 void
707 ixl_del_multi(struct ixl_vsi *vsi)
708 {
709 	struct ifnet		*ifp = vsi->ifp;
710 	struct ifmultiaddr	*ifma;
711 	struct ixl_mac_filter	*f;
712 	int			mcnt = 0;
713 	bool		match = FALSE;
714 
715 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
716 
717 	/* Search for removed multicast addresses */
718 	if_maddr_rlock(ifp);
719 	SLIST_FOREACH(f, &vsi->ftl, next) {
720 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
721 			match = FALSE;
722 			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
723 				if (ifma->ifma_addr->sa_family != AF_LINK)
724 					continue;
725 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
726 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
727 					match = TRUE;
728 					break;
729 				}
730 			}
731 			if (match == FALSE) {
732 				f->flags |= IXL_FILTER_DEL;
733 				mcnt++;
734 			}
735 		}
736 	}
737 	if_maddr_runlock(ifp);
738 
739 	if (mcnt > 0)
740 		ixl_del_hw_filters(vsi, mcnt);
741 }
742 
743 void
744 ixl_link_up_msg(struct ixl_pf *pf)
745 {
746 	struct i40e_hw *hw = &pf->hw;
747 	struct ifnet *ifp = pf->vsi.ifp;
748 	char *req_fec_string, *neg_fec_string;
749 	u8 fec_abilities;
750 
751 	fec_abilities = hw->phy.link_info.req_fec_info;
752 	/* If both RS and KR are requested, only show RS */
753 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
754 		req_fec_string = ixl_fec_string[0];
755 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
756 		req_fec_string = ixl_fec_string[1];
757 	else
758 		req_fec_string = ixl_fec_string[2];
759 
760 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
761 		neg_fec_string = ixl_fec_string[0];
762 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
763 		neg_fec_string = ixl_fec_string[1];
764 	else
765 		neg_fec_string = ixl_fec_string[2];
766 
767 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
768 	    ifp->if_xname,
769 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
770 	    req_fec_string, neg_fec_string,
771 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
772 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
773 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
774 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
775 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
776 		ixl_fc_string[1] : ixl_fc_string[0]);
777 }
778 
779 /*
780  * Configure admin queue/misc interrupt cause registers in hardware.
781  */
782 void
783 ixl_configure_intr0_msix(struct ixl_pf *pf)
784 {
785 	struct i40e_hw *hw = &pf->hw;
786 	u32 reg;
787 
788 	/* First set up the adminq - vector 0 */
789 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
790 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
791 
792 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
793 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
794 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
795 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
796 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
797 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
798 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
799 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
800 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
801 
802 	/*
803 	 * 0x7FF is the end of the queue list.
804 	 * This means we won't use MSI-X vector 0 for a queue interrupt
805 	 * in MSIX mode.
806 	 */
807 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
808 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
809 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
810 
811 	wr32(hw, I40E_PFINT_DYN_CTL0,
812 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
813 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
814 
815 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
816 }
817 
818 /*
819  * Configure queue interrupt cause registers in hardware.
820  *
821  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
822  */
823 void
824 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
825 {
826 	struct i40e_hw *hw = &pf->hw;
827 	struct ixl_vsi *vsi = &pf->vsi;
828 	u32		reg;
829 	u16		vector = 1;
830 
831 	// TODO: See if max is really necessary
832 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
833 		/* Make sure interrupt is disabled */
834 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
835 		/* Set linked list head to point to corresponding RX queue
836 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
837 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
838 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
839 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
840 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
841 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
842 
843 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
844 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
845 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
846 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
847 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
848 		wr32(hw, I40E_QINT_RQCTL(i), reg);
849 
850 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
851 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
852 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
853 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
854 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
855 		wr32(hw, I40E_QINT_TQCTL(i), reg);
856 	}
857 }
858 
859 /*
860  * Configure for single interrupt vector operation
861  */
862 void
863 ixl_configure_legacy(struct ixl_pf *pf)
864 {
865 	struct i40e_hw	*hw = &pf->hw;
866 	struct ixl_vsi	*vsi = &pf->vsi;
867 	u32 reg;
868 
869 // TODO: Fix
870 #if 0
871 	/* Configure ITR */
872 	vsi->tx_itr_setting = pf->tx_itr;
873 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
874 	    vsi->tx_itr_setting);
875 	txr->itr = vsi->tx_itr_setting;
876 
877 	vsi->rx_itr_setting = pf->rx_itr;
878 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
879 	    vsi->rx_itr_setting);
880 	rxr->itr = vsi->rx_itr_setting;
881 	/* XXX: Assuming only 1 queue in single interrupt mode */
882 #endif
883 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
884 
885 	/* Setup "other" causes */
886 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
887 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
888 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
889 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
890 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
891 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
892 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
893 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
894 	    ;
895 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
896 
897 	/* No ITR for non-queue interrupts */
898 	wr32(hw, I40E_PFINT_STAT_CTL0,
899 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
900 
901 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
902 	wr32(hw, I40E_PFINT_LNKLST0, 0);
903 
904 	/* Associate the queue pair to the vector and enable the q int */
905 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
906 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
907 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
908 	wr32(hw, I40E_QINT_RQCTL(0), reg);
909 
910 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
911 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
912 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
913 	wr32(hw, I40E_QINT_TQCTL(0), reg);
914 }
915 
916 void
917 ixl_free_pci_resources(struct ixl_pf *pf)
918 {
919 	struct ixl_vsi		*vsi = &pf->vsi;
920 	device_t		dev = iflib_get_dev(vsi->ctx);
921 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
922 
923 	/* We may get here before stations are setup */
924 	if (rx_que == NULL)
925 		goto early;
926 
927 	/*
928 	**  Release all msix VSI resources:
929 	*/
930 	iflib_irq_free(vsi->ctx, &vsi->irq);
931 
932 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
933 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
934 early:
935 	if (pf->pci_mem != NULL)
936 		bus_release_resource(dev, SYS_RES_MEMORY,
937 		    PCIR_BAR(0), pf->pci_mem);
938 }
939 
940 void
941 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
942 {
943 	/* Display supported media types */
944 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
945 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
946 
947 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
948 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
949 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
950 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
951 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
952 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
953 
954 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
955 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
956 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
957 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
958 
959 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
965 
966 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
967 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
968 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
969 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
970 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
972 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
973 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
974 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
975 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
976 
977 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
978 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
979 
980 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
981 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
982 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
983 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
984 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
985 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
986 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
987 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
988 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
989 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
990 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
991 
992 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
993 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
994 
995 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
996 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
997 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
998 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
999 
1000 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1001 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1002 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1003 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1004 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1005 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1006 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1007 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1008 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1009 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1010 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1011 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1012 }
1013 
1014 /*********************************************************************
1015  *
1016  *  Setup networking device structure and register an interface.
1017  *
1018  **********************************************************************/
1019 int
1020 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1021 {
1022 	struct ixl_vsi *vsi = &pf->vsi;
1023 	if_ctx_t ctx = vsi->ctx;
1024 	struct i40e_hw *hw = &pf->hw;
1025 	struct ifnet *ifp = iflib_get_ifp(ctx);
1026 	struct i40e_aq_get_phy_abilities_resp abilities;
1027 	enum i40e_status_code aq_error = 0;
1028 
1029 	uint64_t cap;
1030 
1031 	INIT_DBG_DEV(dev, "begin");
1032 
1033 	/* initialize fast path functions */
1034 	cap = IXL_CAPS;
1035 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1036 	if_setcapabilitiesbit(ifp, cap, 0);
1037 	if_setcapenable(ifp, if_getcapabilities(ifp));
1038 	/* TODO: Remove VLAN_ENCAP_LEN? */
1039 	vsi->shared->isc_max_frame_size =
1040 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1041 	    + ETHER_VLAN_ENCAP_LEN;
1042 
1043 	/*
1044 	** Don't turn this on by default, if vlans are
1045 	** created on another pseudo device (eg. lagg)
1046 	** then vlan events are not passed thru, breaking
1047 	** operation, but with HW FILTER off it works. If
1048 	** using vlans directly on the ixl driver you can
1049 	** enable this and get full hardware tag filtering.
1050 	*/
1051 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
1052 
1053 	aq_error = i40e_aq_get_phy_capabilities(hw,
1054 	    FALSE, TRUE, &abilities, NULL);
1055 	/* May need delay to detect fiber correctly */
1056 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1057 		/* TODO: Maybe just retry this in a task... */
1058 		i40e_msec_delay(200);
1059 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1060 		    TRUE, &abilities, NULL);
1061 	}
1062 	if (aq_error) {
1063 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1064 			device_printf(dev, "Unknown PHY type detected!\n");
1065 		else
1066 			device_printf(dev,
1067 			    "Error getting supported media types, err %d,"
1068 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1069 	} else {
1070 		pf->supported_speeds = abilities.link_speed;
1071 #if __FreeBSD_version >= 1100000
1072 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1073 #else
1074 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1075 #endif
1076 
1077 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1078 	}
1079 
1080 	/* Use autoselect media by default */
1081 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1082 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1083 
1084 	return (0);
1085 }
1086 
1087 /*
1088 ** Run when the Admin Queue gets a link state change interrupt.
1089 */
1090 void
1091 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1092 {
1093 	struct i40e_hw *hw = &pf->hw;
1094 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1095 	struct i40e_aqc_get_link_status *status =
1096 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1097 
1098 	/* Request link status from adapter */
1099 	hw->phy.get_link_info = TRUE;
1100 	i40e_get_link_status(hw, &pf->link_up);
1101 
1102 	/* Print out message if an unqualified module is found */
1103 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1104 	    (pf->advertised_speed) &&
1105 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1106 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1107 		device_printf(dev, "Link failed because "
1108 		    "an unqualified module was detected!\n");
1109 
1110 	/* OS link info is updated elsewhere */
1111 }
1112 
1113 /*********************************************************************
1114  *
1115  *  Get Firmware Switch configuration
1116  *	- this will need to be more robust when more complex
1117  *	  switch configurations are enabled.
1118  *
1119  **********************************************************************/
1120 int
1121 ixl_switch_config(struct ixl_pf *pf)
1122 {
1123 	struct i40e_hw	*hw = &pf->hw;
1124 	struct ixl_vsi	*vsi = &pf->vsi;
1125 	device_t 	dev = iflib_get_dev(vsi->ctx);
1126 	struct i40e_aqc_get_switch_config_resp *sw_config;
1127 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1128 	int	ret;
1129 	u16	next = 0;
1130 
1131 	memset(&aq_buf, 0, sizeof(aq_buf));
1132 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1133 	ret = i40e_aq_get_switch_config(hw, sw_config,
1134 	    sizeof(aq_buf), &next, NULL);
1135 	if (ret) {
1136 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1137 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1138 		return (ret);
1139 	}
1140 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1141 		device_printf(dev,
1142 		    "Switch config: header reported: %d in structure, %d total\n",
1143 		    sw_config->header.num_reported, sw_config->header.num_total);
1144 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1145 			device_printf(dev,
1146 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1147 			    sw_config->element[i].element_type,
1148 			    sw_config->element[i].seid,
1149 			    sw_config->element[i].uplink_seid,
1150 			    sw_config->element[i].downlink_seid);
1151 		}
1152 	}
1153 	/* Simplified due to a single VSI */
1154 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1155 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1156 	vsi->seid = sw_config->element[0].seid;
1157 	return (ret);
1158 }
1159 
1160 /*********************************************************************
1161  *
1162  *  Initialize the VSI:  this handles contexts, which means things
1163  *  			 like the number of descriptors, buffer size,
1164  *			 plus we init the rings thru this function.
1165  *
1166  **********************************************************************/
1167 int
1168 ixl_initialize_vsi(struct ixl_vsi *vsi)
1169 {
1170 	struct ixl_pf *pf = vsi->back;
1171 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1172 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1173 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1174 	device_t		dev = iflib_get_dev(vsi->ctx);
1175 	struct i40e_hw		*hw = vsi->hw;
1176 	struct i40e_vsi_context	ctxt;
1177 	int 			tc_queues;
1178 	int			err = 0;
1179 
1180 	memset(&ctxt, 0, sizeof(ctxt));
1181 	ctxt.seid = vsi->seid;
1182 	if (pf->veb_seid != 0)
1183 		ctxt.uplink_seid = pf->veb_seid;
1184 	ctxt.pf_num = hw->pf_id;
1185 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1186 	if (err) {
1187 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1188 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1189 		return (err);
1190 	}
1191 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1192 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1193 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1194 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1195 	    ctxt.uplink_seid, ctxt.vsi_number,
1196 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1197 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1198 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1199 	/*
1200 	** Set the queue and traffic class bits
1201 	**  - when multiple traffic classes are supported
1202 	**    this will need to be more robust.
1203 	*/
1204 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1205 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1206 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1207 	ctxt.info.queue_mapping[0] = 0;
1208 	/*
1209 	 * This VSI will only use traffic class 0; start traffic class 0's
1210 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1211 	 * the driver may not use all of them).
1212 	 */
1213 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1214 	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1215 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1216 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1217 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1218 
1219 	/* Set VLAN receive stripping mode */
1220 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1221 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1222 	// TODO: Call function to get this cap bit, instead
1223 	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1224 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1225 	else
1226 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1227 
1228 #ifdef IXL_IW
1229 	/* Set TCP Enable for iWARP capable VSI */
1230 	if (ixl_enable_iwarp && pf->iw_enabled) {
1231 		ctxt.info.valid_sections |=
1232 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1233 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1234 	}
1235 #endif
1236 	/* Save VSI number and info for use later */
1237 	vsi->vsi_num = ctxt.vsi_number;
1238 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1239 
1240 	/* Reset VSI statistics */
1241 	ixl_vsi_reset_stats(vsi);
1242 	vsi->hw_filters_add = 0;
1243 	vsi->hw_filters_del = 0;
1244 
1245 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1246 
1247 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1248 	if (err) {
1249 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1250 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1251 		return (err);
1252 	}
1253 
1254 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1255 		struct tx_ring		*txr = &tx_que->txr;
1256 		struct i40e_hmc_obj_txq tctx;
1257 		u32			txctl;
1258 
1259 		/* Setup the HMC TX Context  */
1260 		bzero(&tctx, sizeof(tctx));
1261 		tctx.new_context = 1;
1262 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1263 		tctx.qlen = scctx->isc_ntxd[0];
1264 		tctx.fc_ena = 0;	/* Disable FCoE */
1265 		/*
1266 		 * This value needs to pulled from the VSI that this queue
1267 		 * is assigned to. Index into array is traffic class.
1268 		 */
1269 		tctx.rdylist = vsi->info.qs_handle[0];
1270 		/*
1271 		 * Set these to enable Head Writeback
1272 		 * - Address is last entry in TX ring (reserved for HWB index)
1273 		 * Leave these as 0 for Descriptor Writeback
1274 		 */
1275 		if (vsi->enable_head_writeback) {
1276 			tctx.head_wb_ena = 1;
1277 			tctx.head_wb_addr = txr->tx_paddr +
1278 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1279 		} else {
1280 			tctx.head_wb_ena = 0;
1281 			tctx.head_wb_addr = 0;
1282 		}
1283 		tctx.rdylist_act = 0;
1284 		err = i40e_clear_lan_tx_queue_context(hw, i);
1285 		if (err) {
1286 			device_printf(dev, "Unable to clear TX context\n");
1287 			break;
1288 		}
1289 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1290 		if (err) {
1291 			device_printf(dev, "Unable to set TX context\n");
1292 			break;
1293 		}
1294 		/* Associate the ring with this PF */
1295 		txctl = I40E_QTX_CTL_PF_QUEUE;
1296 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1297 		    I40E_QTX_CTL_PF_INDX_MASK);
1298 		wr32(hw, I40E_QTX_CTL(i), txctl);
1299 		ixl_flush(hw);
1300 
1301 		/* Do ring (re)init */
1302 		ixl_init_tx_ring(vsi, tx_que);
1303 	}
1304 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1305 		struct rx_ring 		*rxr = &rx_que->rxr;
1306 		struct i40e_hmc_obj_rxq rctx;
1307 
1308 		/* Next setup the HMC RX Context  */
1309 		if (scctx->isc_max_frame_size <= MCLBYTES)
1310 			rxr->mbuf_sz = MCLBYTES;
1311 		else
1312 			rxr->mbuf_sz = MJUMPAGESIZE;
1313 
1314 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1315 
1316 		/* Set up an RX context for the HMC */
1317 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1318 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1319 		/* ignore header split for now */
1320 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1321 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1322 		    scctx->isc_max_frame_size : max_rxmax;
1323 		rctx.dtype = 0;
1324 		rctx.dsize = 1;		/* do 32byte descriptors */
1325 		rctx.hsplit_0 = 0;	/* no header split */
1326 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1327 		rctx.qlen = scctx->isc_nrxd[0];
1328 		rctx.tphrdesc_ena = 1;
1329 		rctx.tphwdesc_ena = 1;
1330 		rctx.tphdata_ena = 0;	/* Header Split related */
1331 		rctx.tphhead_ena = 0;	/* Header Split related */
1332 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1333 		rctx.crcstrip = 1;
1334 		rctx.l2tsel = 1;
1335 		rctx.showiv = 1;	/* Strip inner VLAN header */
1336 		rctx.fc_ena = 0;	/* Disable FCoE */
1337 		rctx.prefena = 1;	/* Prefetch descriptors */
1338 
1339 		err = i40e_clear_lan_rx_queue_context(hw, i);
1340 		if (err) {
1341 			device_printf(dev,
1342 			    "Unable to clear RX context %d\n", i);
1343 			break;
1344 		}
1345 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1346 		if (err) {
1347 			device_printf(dev, "Unable to set RX context %d\n", i);
1348 			break;
1349 		}
1350 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1351 	}
1352 	return (err);
1353 }
1354 
1355 void
1356 ixl_free_mac_filters(struct ixl_vsi *vsi)
1357 {
1358 	struct ixl_mac_filter *f;
1359 
1360 	while (!SLIST_EMPTY(&vsi->ftl)) {
1361 		f = SLIST_FIRST(&vsi->ftl);
1362 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1363 		free(f, M_DEVBUF);
1364 	}
1365 }
1366 
1367 /*
1368 ** Provide a update to the queue RX
1369 ** interrupt moderation value.
1370 */
1371 void
1372 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1373 {
1374 	struct ixl_vsi	*vsi = que->vsi;
1375 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1376 	struct i40e_hw	*hw = vsi->hw;
1377 	struct rx_ring	*rxr = &que->rxr;
1378 	u16		rx_itr;
1379 	u16		rx_latency = 0;
1380 	int		rx_bytes;
1381 
1382 	/* Idle, do nothing */
1383 	if (rxr->bytes == 0)
1384 		return;
1385 
1386 	if (pf->dynamic_rx_itr) {
1387 		rx_bytes = rxr->bytes/rxr->itr;
1388 		rx_itr = rxr->itr;
1389 
1390 		/* Adjust latency range */
1391 		switch (rxr->latency) {
1392 		case IXL_LOW_LATENCY:
1393 			if (rx_bytes > 10) {
1394 				rx_latency = IXL_AVE_LATENCY;
1395 				rx_itr = IXL_ITR_20K;
1396 			}
1397 			break;
1398 		case IXL_AVE_LATENCY:
1399 			if (rx_bytes > 20) {
1400 				rx_latency = IXL_BULK_LATENCY;
1401 				rx_itr = IXL_ITR_8K;
1402 			} else if (rx_bytes <= 10) {
1403 				rx_latency = IXL_LOW_LATENCY;
1404 				rx_itr = IXL_ITR_100K;
1405 			}
1406 			break;
1407 		case IXL_BULK_LATENCY:
1408 			if (rx_bytes <= 20) {
1409 				rx_latency = IXL_AVE_LATENCY;
1410 				rx_itr = IXL_ITR_20K;
1411 			}
1412 			break;
1413        		 }
1414 
1415 		rxr->latency = rx_latency;
1416 
1417 		if (rx_itr != rxr->itr) {
1418 			/* do an exponential smoothing */
1419 			rx_itr = (10 * rx_itr * rxr->itr) /
1420 			    ((9 * rx_itr) + rxr->itr);
1421 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1422 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1423 			    rxr->me), rxr->itr);
1424 		}
1425 	} else { /* We may have have toggled to non-dynamic */
1426 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1427 			vsi->rx_itr_setting = pf->rx_itr;
1428 		/* Update the hardware if needed */
1429 		if (rxr->itr != vsi->rx_itr_setting) {
1430 			rxr->itr = vsi->rx_itr_setting;
1431 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1432 			    rxr->me), rxr->itr);
1433 		}
1434 	}
1435 	rxr->bytes = 0;
1436 	rxr->packets = 0;
1437 }
1438 
1439 
1440 /*
1441 ** Provide a update to the queue TX
1442 ** interrupt moderation value.
1443 */
1444 void
1445 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1446 {
1447 	struct ixl_vsi	*vsi = que->vsi;
1448 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1449 	struct i40e_hw	*hw = vsi->hw;
1450 	struct tx_ring	*txr = &que->txr;
1451 	u16		tx_itr;
1452 	u16		tx_latency = 0;
1453 	int		tx_bytes;
1454 
1455 
1456 	/* Idle, do nothing */
1457 	if (txr->bytes == 0)
1458 		return;
1459 
1460 	if (pf->dynamic_tx_itr) {
1461 		tx_bytes = txr->bytes/txr->itr;
1462 		tx_itr = txr->itr;
1463 
1464 		switch (txr->latency) {
1465 		case IXL_LOW_LATENCY:
1466 			if (tx_bytes > 10) {
1467 				tx_latency = IXL_AVE_LATENCY;
1468 				tx_itr = IXL_ITR_20K;
1469 			}
1470 			break;
1471 		case IXL_AVE_LATENCY:
1472 			if (tx_bytes > 20) {
1473 				tx_latency = IXL_BULK_LATENCY;
1474 				tx_itr = IXL_ITR_8K;
1475 			} else if (tx_bytes <= 10) {
1476 				tx_latency = IXL_LOW_LATENCY;
1477 				tx_itr = IXL_ITR_100K;
1478 			}
1479 			break;
1480 		case IXL_BULK_LATENCY:
1481 			if (tx_bytes <= 20) {
1482 				tx_latency = IXL_AVE_LATENCY;
1483 				tx_itr = IXL_ITR_20K;
1484 			}
1485 			break;
1486 		}
1487 
1488 		txr->latency = tx_latency;
1489 
1490 		if (tx_itr != txr->itr) {
1491        	         /* do an exponential smoothing */
1492 			tx_itr = (10 * tx_itr * txr->itr) /
1493 			    ((9 * tx_itr) + txr->itr);
1494 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1495 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1496 			    txr->me), txr->itr);
1497 		}
1498 
1499 	} else { /* We may have have toggled to non-dynamic */
1500 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1501 			vsi->tx_itr_setting = pf->tx_itr;
1502 		/* Update the hardware if needed */
1503 		if (txr->itr != vsi->tx_itr_setting) {
1504 			txr->itr = vsi->tx_itr_setting;
1505 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1506 			    txr->me), txr->itr);
1507 		}
1508 	}
1509 	txr->bytes = 0;
1510 	txr->packets = 0;
1511 	return;
1512 }
1513 
1514 void
1515 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
1516     struct sysctl_ctx_list *ctx, const char *sysctl_name)
1517 {
1518 	struct sysctl_oid *tree;
1519 	struct sysctl_oid_list *child;
1520 	struct sysctl_oid_list *vsi_list;
1521 
1522 	tree = device_get_sysctl_tree(pf->dev);
1523 	child = SYSCTL_CHILDREN(tree);
1524 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
1525 				   CTLFLAG_RD, NULL, "VSI Number");
1526 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
1527 
1528 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
1529 }
1530 
1531 #ifdef IXL_DEBUG
1532 /**
1533  * ixl_sysctl_qtx_tail_handler
1534  * Retrieves I40E_QTX_TAIL value from hardware
1535  * for a sysctl.
1536  */
1537 int
1538 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1539 {
1540 	struct ixl_tx_queue *tx_que;
1541 	int error;
1542 	u32 val;
1543 
1544 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1545 	if (!tx_que) return 0;
1546 
1547 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1548 	error = sysctl_handle_int(oidp, &val, 0, req);
1549 	if (error || !req->newptr)
1550 		return error;
1551 	return (0);
1552 }
1553 
1554 /**
1555  * ixl_sysctl_qrx_tail_handler
1556  * Retrieves I40E_QRX_TAIL value from hardware
1557  * for a sysctl.
1558  */
1559 int
1560 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1561 {
1562 	struct ixl_rx_queue *rx_que;
1563 	int error;
1564 	u32 val;
1565 
1566 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1567 	if (!rx_que) return 0;
1568 
1569 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1570 	error = sysctl_handle_int(oidp, &val, 0, req);
1571 	if (error || !req->newptr)
1572 		return error;
1573 	return (0);
1574 }
1575 #endif
1576 
1577 /*
1578  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1579  * Writes to the ITR registers immediately.
1580  */
1581 static int
1582 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1583 {
1584 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1585 	device_t dev = pf->dev;
1586 	int error = 0;
1587 	int requested_tx_itr;
1588 
1589 	requested_tx_itr = pf->tx_itr;
1590 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1591 	if ((error) || (req->newptr == NULL))
1592 		return (error);
1593 	if (pf->dynamic_tx_itr) {
1594 		device_printf(dev,
1595 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1596 		    return (EINVAL);
1597 	}
1598 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1599 		device_printf(dev,
1600 		    "Invalid TX itr value; value must be between 0 and %d\n",
1601 		        IXL_MAX_ITR);
1602 		return (EINVAL);
1603 	}
1604 
1605 	pf->tx_itr = requested_tx_itr;
1606 	ixl_configure_tx_itr(pf);
1607 
1608 	return (error);
1609 }
1610 
1611 /*
1612  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1613  * Writes to the ITR registers immediately.
1614  */
1615 static int
1616 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1617 {
1618 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1619 	device_t dev = pf->dev;
1620 	int error = 0;
1621 	int requested_rx_itr;
1622 
1623 	requested_rx_itr = pf->rx_itr;
1624 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1625 	if ((error) || (req->newptr == NULL))
1626 		return (error);
1627 	if (pf->dynamic_rx_itr) {
1628 		device_printf(dev,
1629 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1630 		    return (EINVAL);
1631 	}
1632 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1633 		device_printf(dev,
1634 		    "Invalid RX itr value; value must be between 0 and %d\n",
1635 		        IXL_MAX_ITR);
1636 		return (EINVAL);
1637 	}
1638 
1639 	pf->rx_itr = requested_rx_itr;
1640 	ixl_configure_rx_itr(pf);
1641 
1642 	return (error);
1643 }
1644 
1645 void
1646 ixl_add_hw_stats(struct ixl_pf *pf)
1647 {
1648 	struct ixl_vsi *vsi = &pf->vsi;
1649 	device_t dev = iflib_get_dev(vsi->ctx);
1650 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1651 
1652 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1653 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1654 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1655 	struct sysctl_oid_list *vsi_list, *queue_list;
1656 	struct sysctl_oid *queue_node;
1657 	char queue_namebuf[32];
1658 
1659 	struct ixl_rx_queue *rx_que;
1660 	struct ixl_tx_queue *tx_que;
1661 	struct tx_ring *txr;
1662 	struct rx_ring *rxr;
1663 
1664 	/* Driver statistics */
1665 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
1666 			CTLFLAG_RD, &pf->watchdog_events,
1667 			"Watchdog timeouts");
1668 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1669 			CTLFLAG_RD, &pf->admin_irq,
1670 			"Admin Queue IRQ Handled");
1671 
1672 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
1673 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
1674 
1675 	/* Queue statistics */
1676 	for (int q = 0; q < vsi->num_rx_queues; q++) {
1677 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
1678 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1679 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
1680 		queue_list = SYSCTL_CHILDREN(queue_node);
1681 
1682 		rx_que = &(vsi->rx_queues[q]);
1683 		rxr = &(rx_que->rxr);
1684 
1685 
1686 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1687 				CTLFLAG_RD, &(rx_que->irqs),
1688 				"irqs on this queue (both Tx and Rx)");
1689 
1690 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1691 				CTLFLAG_RD, &(rxr->rx_packets),
1692 				"Queue Packets Received");
1693 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1694 				CTLFLAG_RD, &(rxr->rx_bytes),
1695 				"Queue Bytes Received");
1696 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
1697 				CTLFLAG_RD, &(rxr->desc_errs),
1698 				"Queue Rx Descriptor Errors");
1699 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1700 				CTLFLAG_RD, &(rxr->itr), 0,
1701 				"Queue Rx ITR Interval");
1702 #ifdef IXL_DEBUG
1703 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
1704 				CTLTYPE_UINT | CTLFLAG_RD, rx_que,
1705 				sizeof(struct ixl_rx_queue),
1706 				ixl_sysctl_qrx_tail_handler, "IU",
1707 				"Queue Receive Descriptor Tail");
1708 #endif
1709 	}
1710 	for (int q = 0; q < vsi->num_tx_queues; q++) {
1711 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
1712 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1713 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
1714 		queue_list = SYSCTL_CHILDREN(queue_node);
1715 
1716 		tx_que = &(vsi->tx_queues[q]);
1717 		txr = &(tx_que->txr);
1718 
1719 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
1720 				CTLFLAG_RD, &(tx_que->tso),
1721 				"TSO");
1722 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
1723 				CTLFLAG_RD, &(txr->mss_too_small),
1724 				"TSO sends with an MSS less than 64");
1725 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1726 				CTLFLAG_RD, &(txr->tx_packets),
1727 				"Queue Packets Transmitted");
1728 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1729 				CTLFLAG_RD, &(txr->tx_bytes),
1730 				"Queue Bytes Transmitted");
1731 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1732 				CTLFLAG_RD, &(txr->itr), 0,
1733 				"Queue Tx ITR Interval");
1734 #ifdef IXL_DEBUG
1735 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
1736 				CTLTYPE_UINT | CTLFLAG_RD, tx_que,
1737 				sizeof(struct ixl_tx_queue),
1738 				ixl_sysctl_qtx_tail_handler, "IU",
1739 				"Queue Transmit Descriptor Tail");
1740 #endif
1741 	}
1742 
1743 	/* MAC stats */
1744 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1745 }
1746 
1747 void
1748 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
1749 	struct sysctl_oid_list *child,
1750 	struct i40e_eth_stats *eth_stats)
1751 {
1752 	struct ixl_sysctl_info ctls[] =
1753 	{
1754 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
1755 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
1756 			"Unicast Packets Received"},
1757 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
1758 			"Multicast Packets Received"},
1759 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
1760 			"Broadcast Packets Received"},
1761 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
1762 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
1763 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
1764 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
1765 			"Multicast Packets Transmitted"},
1766 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
1767 			"Broadcast Packets Transmitted"},
1768 		// end
1769 		{0,0,0}
1770 	};
1771 
1772 	struct ixl_sysctl_info *entry = ctls;
1773 	while (entry->stat != 0)
1774 	{
1775 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
1776 				CTLFLAG_RD, entry->stat,
1777 				entry->description);
1778 		entry++;
1779 	}
1780 }
1781 
1782 void
1783 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1784 	struct sysctl_oid_list *child,
1785 	struct i40e_hw_port_stats *stats)
1786 {
1787 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1788 				    CTLFLAG_RD, NULL, "Mac Statistics");
1789 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1790 
1791 	struct i40e_eth_stats *eth_stats = &stats->eth;
1792 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1793 
1794 	struct ixl_sysctl_info ctls[] =
1795 	{
1796 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1797 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1798 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1799 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1800 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1801 		/* Packet Reception Stats */
1802 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1803 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1804 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1805 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1806 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1807 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1808 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1809 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1810 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1811 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1812 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1813 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1814 		/* Packet Transmission Stats */
1815 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1816 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1817 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1818 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1819 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1820 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1821 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1822 		/* Flow control */
1823 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1824 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1825 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1826 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1827 		/* End */
1828 		{0,0,0}
1829 	};
1830 
1831 	struct ixl_sysctl_info *entry = ctls;
1832 	while (entry->stat != 0)
1833 	{
1834 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1835 				CTLFLAG_RD, entry->stat,
1836 				entry->description);
1837 		entry++;
1838 	}
1839 }
1840 
1841 void
1842 ixl_set_rss_key(struct ixl_pf *pf)
1843 {
1844 	struct i40e_hw *hw = &pf->hw;
1845 	struct ixl_vsi *vsi = &pf->vsi;
1846 	device_t	dev = pf->dev;
1847 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1848 	enum i40e_status_code status;
1849 
1850 #ifdef RSS
1851         /* Fetch the configured RSS key */
1852         rss_getkey((uint8_t *) &rss_seed);
1853 #else
1854 	ixl_get_default_rss_key(rss_seed);
1855 #endif
1856 	/* Fill out hash function seed */
1857 	if (hw->mac.type == I40E_MAC_X722) {
1858 		struct i40e_aqc_get_set_rss_key_data key_data;
1859 		bcopy(rss_seed, &key_data, 52);
1860 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1861 		if (status)
1862 			device_printf(dev,
1863 			    "i40e_aq_set_rss_key status %s, error %s\n",
1864 			    i40e_stat_str(hw, status),
1865 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1866 	} else {
1867 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1868 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1869 	}
1870 }
1871 
1872 /*
1873  * Configure enabled PCTYPES for RSS.
1874  */
1875 void
1876 ixl_set_rss_pctypes(struct ixl_pf *pf)
1877 {
1878 	struct i40e_hw *hw = &pf->hw;
1879 	u64		set_hena = 0, hena;
1880 
1881 #ifdef RSS
1882 	u32		rss_hash_config;
1883 
1884 	rss_hash_config = rss_gethashconfig();
1885 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1886                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1887 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1888                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1889 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1890                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1891 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1892                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1893 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1894 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1895 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1896                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1897         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1898                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1899 #else
1900 	if (hw->mac.type == I40E_MAC_X722)
1901 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1902 	else
1903 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1904 #endif
1905 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1906 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1907 	hena |= set_hena;
1908 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1909 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1910 
1911 }
1912 
1913 void
1914 ixl_set_rss_hlut(struct ixl_pf *pf)
1915 {
1916 	struct i40e_hw	*hw = &pf->hw;
1917 	struct ixl_vsi *vsi = &pf->vsi;
1918 	device_t	dev = iflib_get_dev(vsi->ctx);
1919 	int		i, que_id;
1920 	int		lut_entry_width;
1921 	u32		lut = 0;
1922 	enum i40e_status_code status;
1923 
1924 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1925 
1926 	/* Populate the LUT with max no. of queues in round robin fashion */
1927 	u8 hlut_buf[512];
1928 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1929 #ifdef RSS
1930 		/*
1931 		 * Fetch the RSS bucket id for the given indirection entry.
1932 		 * Cap it at the number of configured buckets (which is
1933 		 * num_queues.)
1934 		 */
1935 		que_id = rss_get_indirection_to_bucket(i);
1936 		que_id = que_id % vsi->num_rx_queues;
1937 #else
1938 		que_id = i % vsi->num_rx_queues;
1939 #endif
1940 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1941 		hlut_buf[i] = lut;
1942 	}
1943 
1944 	if (hw->mac.type == I40E_MAC_X722) {
1945 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1946 		if (status)
1947 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1948 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1949 	} else {
1950 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1951 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1952 		ixl_flush(hw);
1953 	}
1954 }
1955 
1956 /*
1957 ** Setup the PF's RSS parameters.
1958 */
1959 void
1960 ixl_config_rss(struct ixl_pf *pf)
1961 {
1962 	ixl_set_rss_key(pf);
1963 	ixl_set_rss_pctypes(pf);
1964 	ixl_set_rss_hlut(pf);
1965 }
1966 
1967 /*
1968 ** This routine updates vlan filters, called by init
1969 ** it scans the filter table and then updates the hw
1970 ** after a soft reset.
1971 */
1972 void
1973 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1974 {
1975 	struct ixl_mac_filter	*f;
1976 	int			cnt = 0, flags;
1977 
1978 	if (vsi->num_vlans == 0)
1979 		return;
1980 	/*
1981 	** Scan the filter list for vlan entries,
1982 	** mark them for addition and then call
1983 	** for the AQ update.
1984 	*/
1985 	SLIST_FOREACH(f, &vsi->ftl, next) {
1986 		if (f->flags & IXL_FILTER_VLAN) {
1987 			f->flags |=
1988 			    (IXL_FILTER_ADD |
1989 			    IXL_FILTER_USED);
1990 			cnt++;
1991 		}
1992 	}
1993 	if (cnt == 0) {
1994 		printf("setup vlan: no filters found!\n");
1995 		return;
1996 	}
1997 	flags = IXL_FILTER_VLAN;
1998 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1999 	ixl_add_hw_filters(vsi, flags, cnt);
2000 }
2001 
2002 /*
2003  * In some firmware versions there is default MAC/VLAN filter
2004  * configured which interferes with filters managed by driver.
2005  * Make sure it's removed.
2006  */
2007 static void
2008 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
2009 {
2010 	struct i40e_aqc_remove_macvlan_element_data e;
2011 
2012 	bzero(&e, sizeof(e));
2013 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
2014 	e.vlan_tag = 0;
2015 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2016 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
2017 
2018 	bzero(&e, sizeof(e));
2019 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
2020 	e.vlan_tag = 0;
2021 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2022 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2023 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
2024 }
2025 
2026 /*
2027 ** Initialize filter list and add filters that the hardware
2028 ** needs to know about.
2029 **
2030 ** Requires VSI's filter list & seid to be set before calling.
2031 */
2032 void
2033 ixl_init_filters(struct ixl_vsi *vsi)
2034 {
2035 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2036 
2037 	/* Initialize mac filter list for VSI */
2038 	SLIST_INIT(&vsi->ftl);
2039 
2040 	/* Receive broadcast Ethernet frames */
2041 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
2042 
2043 	ixl_del_default_hw_filters(vsi);
2044 
2045 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
2046 	/*
2047 	 * Prevent Tx flow control frames from being sent out by
2048 	 * non-firmware transmitters.
2049 	 * This affects every VSI in the PF.
2050 	 */
2051 	if (pf->enable_tx_fc_filter)
2052 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
2053 }
2054 
2055 /*
2056 ** This routine adds mulicast filters
2057 */
2058 void
2059 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
2060 {
2061 	struct ixl_mac_filter *f;
2062 
2063 	/* Does one already exist */
2064 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
2065 	if (f != NULL)
2066 		return;
2067 
2068 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
2069 	if (f != NULL)
2070 		f->flags |= IXL_FILTER_MC;
2071 	else
2072 		printf("WARNING: no filter available!!\n");
2073 
2074 	return;
2075 }
2076 
2077 void
2078 ixl_reconfigure_filters(struct ixl_vsi *vsi)
2079 {
2080 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
2081 }
2082 
2083 /*
2084 ** This routine adds macvlan filters
2085 */
2086 void
2087 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2088 {
2089 	struct ixl_mac_filter	*f, *tmp;
2090 	struct ixl_pf		*pf;
2091 	device_t		dev;
2092 
2093 	DEBUGOUT("ixl_add_filter: begin");
2094 
2095 	pf = vsi->back;
2096 	dev = pf->dev;
2097 
2098 	/* Does one already exist */
2099 	f = ixl_find_filter(vsi, macaddr, vlan);
2100 	if (f != NULL)
2101 		return;
2102 	/*
2103 	** Is this the first vlan being registered, if so we
2104 	** need to remove the ANY filter that indicates we are
2105 	** not in a vlan, and replace that with a 0 filter.
2106 	*/
2107 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
2108 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
2109 		if (tmp != NULL) {
2110 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
2111 			ixl_add_filter(vsi, macaddr, 0);
2112 		}
2113 	}
2114 
2115 	f = ixl_new_filter(vsi, macaddr, vlan);
2116 	if (f == NULL) {
2117 		device_printf(dev, "WARNING: no filter available!!\n");
2118 		return;
2119 	}
2120 	if (f->vlan != IXL_VLAN_ANY)
2121 		f->flags |= IXL_FILTER_VLAN;
2122 	else
2123 		vsi->num_macs++;
2124 
2125 	ixl_add_hw_filters(vsi, f->flags, 1);
2126 	return;
2127 }
2128 
2129 void
2130 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2131 {
2132 	struct ixl_mac_filter *f;
2133 
2134 	f = ixl_find_filter(vsi, macaddr, vlan);
2135 	if (f == NULL)
2136 		return;
2137 
2138 	f->flags |= IXL_FILTER_DEL;
2139 	ixl_del_hw_filters(vsi, 1);
2140 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2141 		vsi->num_macs--;
2142 
2143 	/* Check if this is the last vlan removal */
2144 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2145 		/* Switch back to a non-vlan filter */
2146 		ixl_del_filter(vsi, macaddr, 0);
2147 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2148 	}
2149 	return;
2150 }
2151 
2152 /*
2153 ** Find the filter with both matching mac addr and vlan id
2154 */
2155 struct ixl_mac_filter *
2156 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2157 {
2158 	struct ixl_mac_filter	*f;
2159 
2160 	SLIST_FOREACH(f, &vsi->ftl, next) {
2161 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2162 		    && (f->vlan == vlan)) {
2163 			return (f);
2164 		}
2165 	}
2166 
2167 	return (NULL);
2168 }
2169 
2170 /*
2171 ** This routine takes additions to the vsi filter
2172 ** table and creates an Admin Queue call to create
2173 ** the filters in the hardware.
2174 */
2175 void
2176 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2177 {
2178 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2179 	struct ixl_mac_filter	*f;
2180 	struct ixl_pf		*pf;
2181 	struct i40e_hw		*hw;
2182 	device_t		dev;
2183 	enum i40e_status_code	status;
2184 	int			j = 0;
2185 
2186 	MPASS(cnt > 0);
2187 
2188 	pf = vsi->back;
2189 	dev = iflib_get_dev(vsi->ctx);
2190 	hw = &pf->hw;
2191 
2192 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2193 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2194 	if (a == NULL) {
2195 		device_printf(dev, "add_hw_filters failed to get memory\n");
2196 		return;
2197 	}
2198 
2199 	/*
2200 	** Scan the filter list, each time we find one
2201 	** we add it to the admin queue array and turn off
2202 	** the add bit.
2203 	*/
2204 	SLIST_FOREACH(f, &vsi->ftl, next) {
2205 		if ((f->flags & flags) == flags) {
2206 			b = &a[j]; // a pox on fvl long names :)
2207 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2208 			if (f->vlan == IXL_VLAN_ANY) {
2209 				b->vlan_tag = 0;
2210 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2211 			} else {
2212 				b->vlan_tag = f->vlan;
2213 				b->flags = 0;
2214 			}
2215 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2216 			f->flags &= ~IXL_FILTER_ADD;
2217 			j++;
2218 		}
2219 		if (j == cnt)
2220 			break;
2221 	}
2222 	if (j > 0) {
2223 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2224 		if (status)
2225 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2226 			    "error %s\n", i40e_stat_str(hw, status),
2227 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2228 		else
2229 			vsi->hw_filters_add += j;
2230 	}
2231 	free(a, M_DEVBUF);
2232 	return;
2233 }
2234 
2235 /*
2236 ** This routine takes removals in the vsi filter
2237 ** table and creates an Admin Queue call to delete
2238 ** the filters in the hardware.
2239 */
2240 void
2241 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2242 {
2243 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2244 	struct ixl_pf		*pf;
2245 	struct i40e_hw		*hw;
2246 	device_t		dev;
2247 	struct ixl_mac_filter	*f, *f_temp;
2248 	enum i40e_status_code	status;
2249 	int			j = 0;
2250 
2251 	pf = vsi->back;
2252 	hw = &pf->hw;
2253 	dev = iflib_get_dev(vsi->ctx);
2254 
2255 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2256 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2257 	if (d == NULL) {
2258 		device_printf(dev, "%s: failed to get memory\n", __func__);
2259 		return;
2260 	}
2261 
2262 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2263 		if (f->flags & IXL_FILTER_DEL) {
2264 			e = &d[j]; // a pox on fvl long names :)
2265 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2266 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2267 			if (f->vlan == IXL_VLAN_ANY) {
2268 				e->vlan_tag = 0;
2269 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2270 			} else {
2271 				e->vlan_tag = f->vlan;
2272 			}
2273 			/* delete entry from vsi list */
2274 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2275 			free(f, M_DEVBUF);
2276 			j++;
2277 		}
2278 		if (j == cnt)
2279 			break;
2280 	}
2281 	if (j > 0) {
2282 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2283 		if (status) {
2284 			int sc = 0;
2285 			for (int i = 0; i < j; i++)
2286 				sc += (!d[i].error_code);
2287 			vsi->hw_filters_del += sc;
2288 			device_printf(dev,
2289 			    "Failed to remove %d/%d filters, error %s\n",
2290 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2291 		} else
2292 			vsi->hw_filters_del += j;
2293 	}
2294 	free(d, M_DEVBUF);
2295 	return;
2296 }
2297 
2298 int
2299 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2300 {
2301 	struct i40e_hw	*hw = &pf->hw;
2302 	int		error = 0;
2303 	u32		reg;
2304 	u16		pf_qidx;
2305 
2306 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2307 
2308 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2309 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2310 	    pf_qidx, vsi_qidx);
2311 
2312 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2313 
2314 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2315 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2316 	    I40E_QTX_ENA_QENA_STAT_MASK;
2317 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2318 	/* Verify the enable took */
2319 	for (int j = 0; j < 10; j++) {
2320 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2321 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2322 			break;
2323 		i40e_usec_delay(10);
2324 	}
2325 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2326 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2327 		    pf_qidx);
2328 		error = ETIMEDOUT;
2329 	}
2330 
2331 	return (error);
2332 }
2333 
2334 int
2335 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2336 {
2337 	struct i40e_hw	*hw = &pf->hw;
2338 	int		error = 0;
2339 	u32		reg;
2340 	u16		pf_qidx;
2341 
2342 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2343 
2344 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2345 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2346 	    pf_qidx, vsi_qidx);
2347 
2348 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2349 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2350 	    I40E_QRX_ENA_QENA_STAT_MASK;
2351 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2352 	/* Verify the enable took */
2353 	for (int j = 0; j < 10; j++) {
2354 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2355 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2356 			break;
2357 		i40e_usec_delay(10);
2358 	}
2359 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2360 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2361 		    pf_qidx);
2362 		error = ETIMEDOUT;
2363 	}
2364 
2365 	return (error);
2366 }
2367 
2368 int
2369 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2370 {
2371 	int error = 0;
2372 
2373 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2374 	/* Called function already prints error message */
2375 	if (error)
2376 		return (error);
2377 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2378 	return (error);
2379 }
2380 
2381 /* For PF VSI only */
2382 int
2383 ixl_enable_rings(struct ixl_vsi *vsi)
2384 {
2385 	struct ixl_pf	*pf = vsi->back;
2386 	int		error = 0;
2387 
2388 	for (int i = 0; i < vsi->num_tx_queues; i++)
2389 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2390 
2391 	for (int i = 0; i < vsi->num_rx_queues; i++)
2392 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2393 
2394 	return (error);
2395 }
2396 
2397 /*
2398  * Returns error on first ring that is detected hung.
2399  */
2400 int
2401 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2402 {
2403 	struct i40e_hw	*hw = &pf->hw;
2404 	int		error = 0;
2405 	u32		reg;
2406 	u16		pf_qidx;
2407 
2408 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2409 
2410 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2411 	i40e_usec_delay(500);
2412 
2413 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2414 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2415 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2416 	/* Verify the disable took */
2417 	for (int j = 0; j < 10; j++) {
2418 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2419 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2420 			break;
2421 		i40e_msec_delay(10);
2422 	}
2423 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2424 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2425 		    pf_qidx);
2426 		error = ETIMEDOUT;
2427 	}
2428 
2429 	return (error);
2430 }
2431 
2432 /*
2433  * Returns error on first ring that is detected hung.
2434  */
2435 int
2436 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2437 {
2438 	struct i40e_hw	*hw = &pf->hw;
2439 	int		error = 0;
2440 	u32		reg;
2441 	u16		pf_qidx;
2442 
2443 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2444 
2445 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2446 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2447 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2448 	/* Verify the disable took */
2449 	for (int j = 0; j < 10; j++) {
2450 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2451 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2452 			break;
2453 		i40e_msec_delay(10);
2454 	}
2455 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2456 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2457 		    pf_qidx);
2458 		error = ETIMEDOUT;
2459 	}
2460 
2461 	return (error);
2462 }
2463 
2464 int
2465 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2466 {
2467 	int error = 0;
2468 
2469 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2470 	/* Called function already prints error message */
2471 	if (error)
2472 		return (error);
2473 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2474 	return (error);
2475 }
2476 
2477 /* For PF VSI only */
2478 int
2479 ixl_disable_rings(struct ixl_vsi *vsi)
2480 {
2481 	struct ixl_pf	*pf = vsi->back;
2482 	int		error = 0;
2483 
2484 	for (int i = 0; i < vsi->num_tx_queues; i++)
2485 		error = ixl_disable_tx_ring(pf, &pf->qtag, i);
2486 
2487 	for (int i = 0; i < vsi->num_rx_queues; i++)
2488 		error = ixl_disable_rx_ring(pf, &pf->qtag, i);
2489 
2490 	return (error);
2491 }
2492 
2493 /**
2494  * ixl_handle_mdd_event
2495  *
2496  * Called from interrupt handler to identify possibly malicious vfs
2497  * (But also detects events from the PF, as well)
2498  **/
2499 void
2500 ixl_handle_mdd_event(struct ixl_pf *pf)
2501 {
2502 	struct i40e_hw *hw = &pf->hw;
2503 	device_t dev = pf->dev;
2504 	struct ixl_vf *vf;
2505 	bool mdd_detected = false;
2506 	bool pf_mdd_detected = false;
2507 	bool vf_mdd_detected = false;
2508 	u32 reg;
2509 
2510 	/* find what triggered the MDD event */
2511 	reg = rd32(hw, I40E_GL_MDET_TX);
2512 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2513 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2514 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
2515 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2516 				I40E_GL_MDET_TX_EVENT_SHIFT;
2517 		u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2518 				I40E_GL_MDET_TX_QUEUE_SHIFT;
2519 		device_printf(dev,
2520 		    "Malicious Driver Detection event %d"
2521 		    " on TX queue %d, pf number %d\n",
2522 		    event, queue, pf_num);
2523 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2524 		mdd_detected = true;
2525 	}
2526 	reg = rd32(hw, I40E_GL_MDET_RX);
2527 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2528 		u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2529 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
2530 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2531 				I40E_GL_MDET_RX_EVENT_SHIFT;
2532 		u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2533 				I40E_GL_MDET_RX_QUEUE_SHIFT;
2534 		device_printf(dev,
2535 		    "Malicious Driver Detection event %d"
2536 		    " on RX queue %d, pf number %d\n",
2537 		    event, queue, pf_num);
2538 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2539 		mdd_detected = true;
2540 	}
2541 
2542 	if (mdd_detected) {
2543 		reg = rd32(hw, I40E_PF_MDET_TX);
2544 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2545 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2546 			device_printf(dev,
2547 			    "MDD TX event is for this function!\n");
2548 			pf_mdd_detected = true;
2549 		}
2550 		reg = rd32(hw, I40E_PF_MDET_RX);
2551 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2552 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2553 			device_printf(dev,
2554 			    "MDD RX event is for this function!\n");
2555 			pf_mdd_detected = true;
2556 		}
2557 	}
2558 
2559 	if (pf_mdd_detected) {
2560 		atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
2561 		goto end;
2562 	}
2563 
2564 	// Handle VF detection
2565 	for (int i = 0; i < pf->num_vfs && mdd_detected; i++) {
2566 		vf = &(pf->vfs[i]);
2567 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2568 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2569 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2570 			vf->num_mdd_events++;
2571 			device_printf(dev, "MDD TX event is for VF %d\n", i);
2572 			vf_mdd_detected = true;
2573 		}
2574 
2575 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2576 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2577 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2578 			vf->num_mdd_events++;
2579 			device_printf(dev, "MDD RX event is for VF %d\n", i);
2580 			vf_mdd_detected = true;
2581 		}
2582 
2583 		// TODO: Disable VF if there are too many MDD events from it
2584 	}
2585 
2586 	if (vf_mdd_detected)
2587 		atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
2588 
2589 end:
2590 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2591 
2592 	/* re-enable mdd interrupt cause */
2593 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2594 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2595 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2596 	ixl_flush(hw);
2597 }
2598 
2599 /* This only enables HW interrupts for the RX queues */
2600 void
2601 ixl_enable_intr(struct ixl_vsi *vsi)
2602 {
2603 	struct i40e_hw		*hw = vsi->hw;
2604 	struct ixl_rx_queue	*que = vsi->rx_queues;
2605 
2606 	// TODO: Check iflib interrupt mode instead?
2607 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2608 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2609 			ixl_enable_queue(hw, que->rxr.me);
2610 	} else
2611 		ixl_enable_intr0(hw);
2612 }
2613 
2614 void
2615 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2616 {
2617 	struct i40e_hw		*hw = vsi->hw;
2618 	struct ixl_rx_queue	*que = vsi->rx_queues;
2619 
2620 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2621 		ixl_disable_queue(hw, que->rxr.me);
2622 }
2623 
2624 void
2625 ixl_enable_intr0(struct i40e_hw *hw)
2626 {
2627 	u32		reg;
2628 
2629 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2630 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2631 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2632 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2633 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2634 }
2635 
2636 void
2637 ixl_disable_intr0(struct i40e_hw *hw)
2638 {
2639 	u32		reg;
2640 
2641 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2642 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2643 	ixl_flush(hw);
2644 }
2645 
2646 void
2647 ixl_enable_queue(struct i40e_hw *hw, int id)
2648 {
2649 	u32		reg;
2650 
2651 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2652 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2653 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2654 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2655 }
2656 
2657 void
2658 ixl_disable_queue(struct i40e_hw *hw, int id)
2659 {
2660 	u32		reg;
2661 
2662 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2663 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2664 }
2665 
2666 void
2667 ixl_update_stats_counters(struct ixl_pf *pf)
2668 {
2669 	struct i40e_hw	*hw = &pf->hw;
2670 	struct ixl_vsi	*vsi = &pf->vsi;
2671 	struct ixl_vf	*vf;
2672 
2673 	struct i40e_hw_port_stats *nsd = &pf->stats;
2674 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2675 
2676 	/* Update hw stats */
2677 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2678 			   pf->stat_offsets_loaded,
2679 			   &osd->crc_errors, &nsd->crc_errors);
2680 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2681 			   pf->stat_offsets_loaded,
2682 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2683 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2684 			   I40E_GLPRT_GORCL(hw->port),
2685 			   pf->stat_offsets_loaded,
2686 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2687 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2688 			   I40E_GLPRT_GOTCL(hw->port),
2689 			   pf->stat_offsets_loaded,
2690 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2691 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2692 			   pf->stat_offsets_loaded,
2693 			   &osd->eth.rx_discards,
2694 			   &nsd->eth.rx_discards);
2695 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2696 			   I40E_GLPRT_UPRCL(hw->port),
2697 			   pf->stat_offsets_loaded,
2698 			   &osd->eth.rx_unicast,
2699 			   &nsd->eth.rx_unicast);
2700 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2701 			   I40E_GLPRT_UPTCL(hw->port),
2702 			   pf->stat_offsets_loaded,
2703 			   &osd->eth.tx_unicast,
2704 			   &nsd->eth.tx_unicast);
2705 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2706 			   I40E_GLPRT_MPRCL(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->eth.rx_multicast,
2709 			   &nsd->eth.rx_multicast);
2710 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2711 			   I40E_GLPRT_MPTCL(hw->port),
2712 			   pf->stat_offsets_loaded,
2713 			   &osd->eth.tx_multicast,
2714 			   &nsd->eth.tx_multicast);
2715 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2716 			   I40E_GLPRT_BPRCL(hw->port),
2717 			   pf->stat_offsets_loaded,
2718 			   &osd->eth.rx_broadcast,
2719 			   &nsd->eth.rx_broadcast);
2720 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2721 			   I40E_GLPRT_BPTCL(hw->port),
2722 			   pf->stat_offsets_loaded,
2723 			   &osd->eth.tx_broadcast,
2724 			   &nsd->eth.tx_broadcast);
2725 
2726 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2727 			   pf->stat_offsets_loaded,
2728 			   &osd->tx_dropped_link_down,
2729 			   &nsd->tx_dropped_link_down);
2730 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2731 			   pf->stat_offsets_loaded,
2732 			   &osd->mac_local_faults,
2733 			   &nsd->mac_local_faults);
2734 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2735 			   pf->stat_offsets_loaded,
2736 			   &osd->mac_remote_faults,
2737 			   &nsd->mac_remote_faults);
2738 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2739 			   pf->stat_offsets_loaded,
2740 			   &osd->rx_length_errors,
2741 			   &nsd->rx_length_errors);
2742 
2743 	/* Flow control (LFC) stats */
2744 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2745 			   pf->stat_offsets_loaded,
2746 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2747 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2748 			   pf->stat_offsets_loaded,
2749 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2750 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2751 			   pf->stat_offsets_loaded,
2752 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2753 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2754 			   pf->stat_offsets_loaded,
2755 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2756 
2757 	/* Packet size stats rx */
2758 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2759 			   I40E_GLPRT_PRC64L(hw->port),
2760 			   pf->stat_offsets_loaded,
2761 			   &osd->rx_size_64, &nsd->rx_size_64);
2762 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2763 			   I40E_GLPRT_PRC127L(hw->port),
2764 			   pf->stat_offsets_loaded,
2765 			   &osd->rx_size_127, &nsd->rx_size_127);
2766 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2767 			   I40E_GLPRT_PRC255L(hw->port),
2768 			   pf->stat_offsets_loaded,
2769 			   &osd->rx_size_255, &nsd->rx_size_255);
2770 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2771 			   I40E_GLPRT_PRC511L(hw->port),
2772 			   pf->stat_offsets_loaded,
2773 			   &osd->rx_size_511, &nsd->rx_size_511);
2774 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2775 			   I40E_GLPRT_PRC1023L(hw->port),
2776 			   pf->stat_offsets_loaded,
2777 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2778 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2779 			   I40E_GLPRT_PRC1522L(hw->port),
2780 			   pf->stat_offsets_loaded,
2781 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2782 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2783 			   I40E_GLPRT_PRC9522L(hw->port),
2784 			   pf->stat_offsets_loaded,
2785 			   &osd->rx_size_big, &nsd->rx_size_big);
2786 
2787 	/* Packet size stats tx */
2788 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2789 			   I40E_GLPRT_PTC64L(hw->port),
2790 			   pf->stat_offsets_loaded,
2791 			   &osd->tx_size_64, &nsd->tx_size_64);
2792 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2793 			   I40E_GLPRT_PTC127L(hw->port),
2794 			   pf->stat_offsets_loaded,
2795 			   &osd->tx_size_127, &nsd->tx_size_127);
2796 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2797 			   I40E_GLPRT_PTC255L(hw->port),
2798 			   pf->stat_offsets_loaded,
2799 			   &osd->tx_size_255, &nsd->tx_size_255);
2800 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2801 			   I40E_GLPRT_PTC511L(hw->port),
2802 			   pf->stat_offsets_loaded,
2803 			   &osd->tx_size_511, &nsd->tx_size_511);
2804 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2805 			   I40E_GLPRT_PTC1023L(hw->port),
2806 			   pf->stat_offsets_loaded,
2807 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2808 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2809 			   I40E_GLPRT_PTC1522L(hw->port),
2810 			   pf->stat_offsets_loaded,
2811 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2812 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2813 			   I40E_GLPRT_PTC9522L(hw->port),
2814 			   pf->stat_offsets_loaded,
2815 			   &osd->tx_size_big, &nsd->tx_size_big);
2816 
2817 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2818 			   pf->stat_offsets_loaded,
2819 			   &osd->rx_undersize, &nsd->rx_undersize);
2820 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2821 			   pf->stat_offsets_loaded,
2822 			   &osd->rx_fragments, &nsd->rx_fragments);
2823 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2824 			   pf->stat_offsets_loaded,
2825 			   &osd->rx_oversize, &nsd->rx_oversize);
2826 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2827 			   pf->stat_offsets_loaded,
2828 			   &osd->rx_jabber, &nsd->rx_jabber);
2829 	pf->stat_offsets_loaded = true;
2830 	/* End hw stats */
2831 
2832 	/* Update vsi stats */
2833 	ixl_update_vsi_stats(vsi);
2834 
2835 	for (int i = 0; i < pf->num_vfs; i++) {
2836 		vf = &pf->vfs[i];
2837 		if (vf->vf_flags & VF_FLAG_ENABLED)
2838 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2839 	}
2840 }
2841 
2842 int
2843 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2844 {
2845 	struct i40e_hw *hw = &pf->hw;
2846 	device_t dev = pf->dev;
2847 	int error = 0;
2848 
2849 	error = i40e_shutdown_lan_hmc(hw);
2850 	if (error)
2851 		device_printf(dev,
2852 		    "Shutdown LAN HMC failed with code %d\n", error);
2853 
2854 	ixl_disable_intr0(hw);
2855 
2856 	error = i40e_shutdown_adminq(hw);
2857 	if (error)
2858 		device_printf(dev,
2859 		    "Shutdown Admin queue failed with code %d\n", error);
2860 
2861 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2862 	return (error);
2863 }
2864 
2865 int
2866 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
2867 {
2868 	struct i40e_hw *hw = &pf->hw;
2869 	struct ixl_vsi *vsi = &pf->vsi;
2870 	device_t dev = pf->dev;
2871 	int error = 0;
2872 
2873 	device_printf(dev, "Rebuilding driver state...\n");
2874 
2875 	error = i40e_pf_reset(hw);
2876 	if (error) {
2877 		device_printf(dev, "PF reset failure %s\n",
2878 		    i40e_stat_str(hw, error));
2879 		goto ixl_rebuild_hw_structs_after_reset_err;
2880 	}
2881 
2882 	/* Setup */
2883 	error = i40e_init_adminq(hw);
2884 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2885 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2886 		    error);
2887 		goto ixl_rebuild_hw_structs_after_reset_err;
2888 	}
2889 
2890 	i40e_clear_pxe_mode(hw);
2891 
2892 	error = ixl_get_hw_capabilities(pf);
2893 	if (error) {
2894 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2895 		goto ixl_rebuild_hw_structs_after_reset_err;
2896 	}
2897 
2898 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2899 	    hw->func_caps.num_rx_qp, 0, 0);
2900 	if (error) {
2901 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2902 		goto ixl_rebuild_hw_structs_after_reset_err;
2903 	}
2904 
2905 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2906 	if (error) {
2907 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2908 		goto ixl_rebuild_hw_structs_after_reset_err;
2909 	}
2910 
2911 	/* reserve a contiguous allocation for the PF's VSI */
2912 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2913 	if (error) {
2914 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2915 		    error);
2916 		/* TODO: error handling */
2917 	}
2918 
2919 	error = ixl_switch_config(pf);
2920 	if (error) {
2921 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2922 		     error);
2923 		goto ixl_rebuild_hw_structs_after_reset_err;
2924 	}
2925 
2926 	/* Remove default filters reinstalled by FW on reset */
2927 	ixl_del_default_hw_filters(vsi);
2928 
2929 	/* Determine link state */
2930 	if (ixl_attach_get_link_status(pf)) {
2931 		error = EINVAL;
2932 		/* TODO: error handling */
2933 	}
2934 
2935 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2936 	ixl_get_fw_lldp_status(pf);
2937 
2938 	/* Keep admin queue interrupts active while driver is loaded */
2939 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2940  		ixl_configure_intr0_msix(pf);
2941  		ixl_enable_intr0(hw);
2942 	}
2943 
2944 	device_printf(dev, "Rebuilding driver state done.\n");
2945 	return (0);
2946 
2947 ixl_rebuild_hw_structs_after_reset_err:
2948 	device_printf(dev, "Reload the driver to recover\n");
2949 	return (error);
2950 }
2951 
2952 void
2953 ixl_handle_empr_reset(struct ixl_pf *pf)
2954 {
2955 	struct ixl_vsi	*vsi = &pf->vsi;
2956 	struct i40e_hw	*hw = &pf->hw;
2957 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2958 	int count = 0;
2959 	u32 reg;
2960 
2961 	ixl_prepare_for_reset(pf, is_up);
2962 
2963 	/* Typically finishes within 3-4 seconds */
2964 	while (count++ < 100) {
2965 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2966 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2967 		if (reg)
2968 			i40e_msec_delay(100);
2969 		else
2970 			break;
2971 	}
2972 	ixl_dbg(pf, IXL_DBG_INFO,
2973 			"Reset wait count: %d\n", count);
2974 
2975 	ixl_rebuild_hw_structs_after_reset(pf, is_up);
2976 
2977 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2978 }
2979 
2980 /**
2981  * Update VSI-specific ethernet statistics counters.
2982  **/
2983 void
2984 ixl_update_eth_stats(struct ixl_vsi *vsi)
2985 {
2986 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2987 	struct i40e_hw *hw = &pf->hw;
2988 	struct i40e_eth_stats *es;
2989 	struct i40e_eth_stats *oes;
2990 	struct i40e_hw_port_stats *nsd;
2991 	u16 stat_idx = vsi->info.stat_counter_idx;
2992 
2993 	es = &vsi->eth_stats;
2994 	oes = &vsi->eth_stats_offsets;
2995 	nsd = &pf->stats;
2996 
2997 	/* Gather up the stats that the hw collects */
2998 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2999 			   vsi->stat_offsets_loaded,
3000 			   &oes->tx_errors, &es->tx_errors);
3001 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
3002 			   vsi->stat_offsets_loaded,
3003 			   &oes->rx_discards, &es->rx_discards);
3004 
3005 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
3006 			   I40E_GLV_GORCL(stat_idx),
3007 			   vsi->stat_offsets_loaded,
3008 			   &oes->rx_bytes, &es->rx_bytes);
3009 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
3010 			   I40E_GLV_UPRCL(stat_idx),
3011 			   vsi->stat_offsets_loaded,
3012 			   &oes->rx_unicast, &es->rx_unicast);
3013 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
3014 			   I40E_GLV_MPRCL(stat_idx),
3015 			   vsi->stat_offsets_loaded,
3016 			   &oes->rx_multicast, &es->rx_multicast);
3017 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
3018 			   I40E_GLV_BPRCL(stat_idx),
3019 			   vsi->stat_offsets_loaded,
3020 			   &oes->rx_broadcast, &es->rx_broadcast);
3021 
3022 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
3023 			   I40E_GLV_GOTCL(stat_idx),
3024 			   vsi->stat_offsets_loaded,
3025 			   &oes->tx_bytes, &es->tx_bytes);
3026 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
3027 			   I40E_GLV_UPTCL(stat_idx),
3028 			   vsi->stat_offsets_loaded,
3029 			   &oes->tx_unicast, &es->tx_unicast);
3030 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
3031 			   I40E_GLV_MPTCL(stat_idx),
3032 			   vsi->stat_offsets_loaded,
3033 			   &oes->tx_multicast, &es->tx_multicast);
3034 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
3035 			   I40E_GLV_BPTCL(stat_idx),
3036 			   vsi->stat_offsets_loaded,
3037 			   &oes->tx_broadcast, &es->tx_broadcast);
3038 	vsi->stat_offsets_loaded = true;
3039 }
3040 
3041 void
3042 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3043 {
3044 	struct ixl_pf		*pf;
3045 	struct ifnet		*ifp;
3046 	struct i40e_eth_stats	*es;
3047 	u64			tx_discards;
3048 
3049 	struct i40e_hw_port_stats *nsd;
3050 
3051 	pf = vsi->back;
3052 	ifp = vsi->ifp;
3053 	es = &vsi->eth_stats;
3054 	nsd = &pf->stats;
3055 
3056 	ixl_update_eth_stats(vsi);
3057 
3058 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3059 
3060 	/* Update ifnet stats */
3061 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3062 	                   es->rx_multicast +
3063 			   es->rx_broadcast);
3064 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3065 	                   es->tx_multicast +
3066 			   es->tx_broadcast);
3067 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3068 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3069 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3070 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3071 
3072 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3073 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3074 	    nsd->rx_jabber);
3075 	IXL_SET_OERRORS(vsi, es->tx_errors);
3076 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3077 	IXL_SET_OQDROPS(vsi, tx_discards);
3078 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3079 	IXL_SET_COLLISIONS(vsi, 0);
3080 }
3081 
3082 /**
3083  * Reset all of the stats for the given pf
3084  **/
3085 void
3086 ixl_pf_reset_stats(struct ixl_pf *pf)
3087 {
3088 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3089 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3090 	pf->stat_offsets_loaded = false;
3091 }
3092 
3093 /**
3094  * Resets all stats of the given vsi
3095  **/
3096 void
3097 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3098 {
3099 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3100 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3101 	vsi->stat_offsets_loaded = false;
3102 }
3103 
3104 /**
3105  * Read and update a 48 bit stat from the hw
3106  *
3107  * Since the device stats are not reset at PFReset, they likely will not
3108  * be zeroed when the driver starts.  We'll save the first values read
3109  * and use them as offsets to be subtracted from the raw values in order
3110  * to report stats that count from zero.
3111  **/
3112 void
3113 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3114 	bool offset_loaded, u64 *offset, u64 *stat)
3115 {
3116 	u64 new_data;
3117 
3118 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3119 	new_data = rd64(hw, loreg);
3120 #else
3121 	/*
3122 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3123 	 * 10 don't support 64-bit bus reads/writes.
3124 	 */
3125 	new_data = rd32(hw, loreg);
3126 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3127 #endif
3128 
3129 	if (!offset_loaded)
3130 		*offset = new_data;
3131 	if (new_data >= *offset)
3132 		*stat = new_data - *offset;
3133 	else
3134 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3135 	*stat &= 0xFFFFFFFFFFFFULL;
3136 }
3137 
3138 /**
3139  * Read and update a 32 bit stat from the hw
3140  **/
3141 void
3142 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3143 	bool offset_loaded, u64 *offset, u64 *stat)
3144 {
3145 	u32 new_data;
3146 
3147 	new_data = rd32(hw, reg);
3148 	if (!offset_loaded)
3149 		*offset = new_data;
3150 	if (new_data >= *offset)
3151 		*stat = (u32)(new_data - *offset);
3152 	else
3153 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3154 }
3155 
3156 void
3157 ixl_add_device_sysctls(struct ixl_pf *pf)
3158 {
3159 	device_t dev = pf->dev;
3160 	struct i40e_hw *hw = &pf->hw;
3161 
3162 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3163 	struct sysctl_oid_list *ctx_list =
3164 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3165 
3166 	struct sysctl_oid *debug_node;
3167 	struct sysctl_oid_list *debug_list;
3168 
3169 	struct sysctl_oid *fec_node;
3170 	struct sysctl_oid_list *fec_list;
3171 
3172 	/* Set up sysctls */
3173 	SYSCTL_ADD_PROC(ctx, ctx_list,
3174 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3175 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3176 
3177 	SYSCTL_ADD_PROC(ctx, ctx_list,
3178 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3179 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3180 
3181 	SYSCTL_ADD_PROC(ctx, ctx_list,
3182 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3183 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3184 
3185 	SYSCTL_ADD_PROC(ctx, ctx_list,
3186 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3187 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3188 
3189 	SYSCTL_ADD_PROC(ctx, ctx_list,
3190 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3191 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3192 
3193 	SYSCTL_ADD_PROC(ctx, ctx_list,
3194 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3195 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3196 	    "Queues not allocated to a PF or VF");
3197 
3198 	SYSCTL_ADD_PROC(ctx, ctx_list,
3199 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3200 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3201 	    "Immediately set TX ITR value for all queues");
3202 
3203 	SYSCTL_ADD_PROC(ctx, ctx_list,
3204 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3205 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3206 	    "Immediately set RX ITR value for all queues");
3207 
3208 	SYSCTL_ADD_INT(ctx, ctx_list,
3209 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3210 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3211 
3212 	SYSCTL_ADD_INT(ctx, ctx_list,
3213 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3214 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3215 
3216 	/* Add FEC sysctls for 25G adapters */
3217 	if (i40e_is_25G_device(hw->device_id)) {
3218 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3219 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3220 		fec_list = SYSCTL_CHILDREN(fec_node);
3221 
3222 		SYSCTL_ADD_PROC(ctx, fec_list,
3223 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3224 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3225 
3226 		SYSCTL_ADD_PROC(ctx, fec_list,
3227 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3228 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3229 
3230 		SYSCTL_ADD_PROC(ctx, fec_list,
3231 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3232 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3233 
3234 		SYSCTL_ADD_PROC(ctx, fec_list,
3235 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3236 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3237 
3238 		SYSCTL_ADD_PROC(ctx, fec_list,
3239 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3240 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3241 	}
3242 
3243 	SYSCTL_ADD_PROC(ctx, ctx_list,
3244 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3245 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3246 
3247 	/* Add sysctls meant to print debug information, but don't list them
3248 	 * in "sysctl -a" output. */
3249 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3250 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3251 	debug_list = SYSCTL_CHILDREN(debug_node);
3252 
3253 	SYSCTL_ADD_UINT(ctx, debug_list,
3254 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3255 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3256 
3257 	SYSCTL_ADD_UINT(ctx, debug_list,
3258 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3259 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3260 
3261 	SYSCTL_ADD_PROC(ctx, debug_list,
3262 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3263 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3264 
3265 	SYSCTL_ADD_PROC(ctx, debug_list,
3266 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3267 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3268 
3269 	SYSCTL_ADD_PROC(ctx, debug_list,
3270 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3271 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3272 
3273 	SYSCTL_ADD_PROC(ctx, debug_list,
3274 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3275 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3276 
3277 	SYSCTL_ADD_PROC(ctx, debug_list,
3278 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3279 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3280 
3281 	SYSCTL_ADD_PROC(ctx, debug_list,
3282 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3283 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3284 
3285 	SYSCTL_ADD_PROC(ctx, debug_list,
3286 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3287 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3288 
3289 	SYSCTL_ADD_PROC(ctx, debug_list,
3290 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3291 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3292 
3293 	SYSCTL_ADD_PROC(ctx, debug_list,
3294 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3295 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3296 
3297 	SYSCTL_ADD_PROC(ctx, debug_list,
3298 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3299 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3300 
3301 	SYSCTL_ADD_PROC(ctx, debug_list,
3302 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3303 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3304 
3305 	SYSCTL_ADD_PROC(ctx, debug_list,
3306 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3307 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3308 
3309 	SYSCTL_ADD_PROC(ctx, debug_list,
3310 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3311 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3312 
3313 	SYSCTL_ADD_PROC(ctx, debug_list,
3314 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3315 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3316 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3317 
3318 	SYSCTL_ADD_PROC(ctx, debug_list,
3319 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3320 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3321 
3322 	if (pf->has_i2c) {
3323 		SYSCTL_ADD_PROC(ctx, debug_list,
3324 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3325 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3326 
3327 		SYSCTL_ADD_PROC(ctx, debug_list,
3328 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3329 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3330 
3331 		SYSCTL_ADD_PROC(ctx, debug_list,
3332 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3333 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3334 	}
3335 
3336 #ifdef PCI_IOV
3337 	SYSCTL_ADD_UINT(ctx, debug_list,
3338 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
3339 	    0, "PF/VF Virtual Channel debug level");
3340 #endif
3341 }
3342 
3343 /*
3344  * Primarily for finding out how many queues can be assigned to VFs,
3345  * at runtime.
3346  */
3347 static int
3348 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3349 {
3350 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3351 	int queues;
3352 
3353 	//IXL_PF_LOCK(pf);
3354 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3355 	//IXL_PF_UNLOCK(pf);
3356 
3357 	return sysctl_handle_int(oidp, NULL, queues, req);
3358 }
3359 
3360 /*
3361 ** Set flow control using sysctl:
3362 ** 	0 - off
3363 **	1 - rx pause
3364 **	2 - tx pause
3365 **	3 - full
3366 */
3367 int
3368 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3369 {
3370 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3371 	struct i40e_hw *hw = &pf->hw;
3372 	device_t dev = pf->dev;
3373 	int requested_fc, error = 0;
3374 	enum i40e_status_code aq_error = 0;
3375 	u8 fc_aq_err = 0;
3376 
3377 	/* Get request */
3378 	requested_fc = pf->fc;
3379 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3380 	if ((error) || (req->newptr == NULL))
3381 		return (error);
3382 	if (requested_fc < 0 || requested_fc > 3) {
3383 		device_printf(dev,
3384 		    "Invalid fc mode; valid modes are 0 through 3\n");
3385 		return (EINVAL);
3386 	}
3387 
3388 	/* Set fc ability for port */
3389 	hw->fc.requested_mode = requested_fc;
3390 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3391 	if (aq_error) {
3392 		device_printf(dev,
3393 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3394 		    __func__, aq_error, fc_aq_err);
3395 		return (EIO);
3396 	}
3397 	pf->fc = requested_fc;
3398 
3399 	return (0);
3400 }
3401 
3402 char *
3403 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3404 {
3405 	int index;
3406 
3407 	char *speeds[] = {
3408 		"Unknown",
3409 		"100 Mbps",
3410 		"1 Gbps",
3411 		"10 Gbps",
3412 		"40 Gbps",
3413 		"20 Gbps",
3414 		"25 Gbps",
3415 	};
3416 
3417 	switch (link_speed) {
3418 	case I40E_LINK_SPEED_100MB:
3419 		index = 1;
3420 		break;
3421 	case I40E_LINK_SPEED_1GB:
3422 		index = 2;
3423 		break;
3424 	case I40E_LINK_SPEED_10GB:
3425 		index = 3;
3426 		break;
3427 	case I40E_LINK_SPEED_40GB:
3428 		index = 4;
3429 		break;
3430 	case I40E_LINK_SPEED_20GB:
3431 		index = 5;
3432 		break;
3433 	case I40E_LINK_SPEED_25GB:
3434 		index = 6;
3435 		break;
3436 	case I40E_LINK_SPEED_UNKNOWN:
3437 	default:
3438 		index = 0;
3439 		break;
3440 	}
3441 
3442 	return speeds[index];
3443 }
3444 
3445 int
3446 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3447 {
3448 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3449 	struct i40e_hw *hw = &pf->hw;
3450 	int error = 0;
3451 
3452 	ixl_update_link_status(pf);
3453 
3454 	error = sysctl_handle_string(oidp,
3455 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3456 	    8, req);
3457 	return (error);
3458 }
3459 
3460 /*
3461  * Converts 8-bit speeds value to and from sysctl flags and
3462  * Admin Queue flags.
3463  */
3464 static u8
3465 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3466 {
3467 	static u16 speedmap[6] = {
3468 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3469 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3470 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3471 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3472 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3473 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3474 	};
3475 	u8 retval = 0;
3476 
3477 	for (int i = 0; i < 6; i++) {
3478 		if (to_aq)
3479 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3480 		else
3481 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3482 	}
3483 
3484 	return (retval);
3485 }
3486 
3487 int
3488 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3489 {
3490 	struct i40e_hw *hw = &pf->hw;
3491 	device_t dev = pf->dev;
3492 	struct i40e_aq_get_phy_abilities_resp abilities;
3493 	struct i40e_aq_set_phy_config config;
3494 	enum i40e_status_code aq_error = 0;
3495 
3496 	/* Get current capability information */
3497 	aq_error = i40e_aq_get_phy_capabilities(hw,
3498 	    FALSE, FALSE, &abilities, NULL);
3499 	if (aq_error) {
3500 		device_printf(dev,
3501 		    "%s: Error getting phy capabilities %d,"
3502 		    " aq error: %d\n", __func__, aq_error,
3503 		    hw->aq.asq_last_status);
3504 		return (EIO);
3505 	}
3506 
3507 	/* Prepare new config */
3508 	bzero(&config, sizeof(config));
3509 	if (from_aq)
3510 		config.link_speed = speeds;
3511 	else
3512 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3513 	config.phy_type = abilities.phy_type;
3514 	config.phy_type_ext = abilities.phy_type_ext;
3515 	config.abilities = abilities.abilities
3516 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3517 	config.eee_capability = abilities.eee_capability;
3518 	config.eeer = abilities.eeer_val;
3519 	config.low_power_ctrl = abilities.d3_lpan;
3520 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3521 
3522 	/* Do aq command & restart link */
3523 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3524 	if (aq_error) {
3525 		device_printf(dev,
3526 		    "%s: Error setting new phy config %d,"
3527 		    " aq error: %d\n", __func__, aq_error,
3528 		    hw->aq.asq_last_status);
3529 		return (EIO);
3530 	}
3531 
3532 	return (0);
3533 }
3534 
3535 /*
3536 ** Supported link speedsL
3537 **	Flags:
3538 **	 0x1 - 100 Mb
3539 **	 0x2 - 1G
3540 **	 0x4 - 10G
3541 **	 0x8 - 20G
3542 **	0x10 - 25G
3543 **	0x20 - 40G
3544 */
3545 static int
3546 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3547 {
3548 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3549 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3550 
3551 	return sysctl_handle_int(oidp, NULL, supported, req);
3552 }
3553 
3554 /*
3555 ** Control link advertise speed:
3556 **	Flags:
3557 **	 0x1 - advertise 100 Mb
3558 **	 0x2 - advertise 1G
3559 **	 0x4 - advertise 10G
3560 **	 0x8 - advertise 20G
3561 **	0x10 - advertise 25G
3562 **	0x20 - advertise 40G
3563 **
3564 **	Set to 0 to disable link
3565 */
3566 int
3567 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3568 {
3569 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3570 	device_t dev = pf->dev;
3571 	u8 converted_speeds;
3572 	int requested_ls = 0;
3573 	int error = 0;
3574 
3575 	/* Read in new mode */
3576 	requested_ls = pf->advertised_speed;
3577 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3578 	if ((error) || (req->newptr == NULL))
3579 		return (error);
3580 
3581 	/* Error out if bits outside of possible flag range are set */
3582 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3583 		device_printf(dev, "Input advertised speed out of range; "
3584 		    "valid flags are: 0x%02x\n",
3585 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3586 		return (EINVAL);
3587 	}
3588 
3589 	/* Check if adapter supports input value */
3590 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3591 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3592 		device_printf(dev, "Invalid advertised speed; "
3593 		    "valid flags are: 0x%02x\n",
3594 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3595 		return (EINVAL);
3596 	}
3597 
3598 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3599 	if (error)
3600 		return (error);
3601 
3602 	pf->advertised_speed = requested_ls;
3603 	ixl_update_link_status(pf);
3604 	return (0);
3605 }
3606 
3607 /*
3608 ** Get the width and transaction speed of
3609 ** the bus this adapter is plugged into.
3610 */
3611 void
3612 ixl_get_bus_info(struct ixl_pf *pf)
3613 {
3614 	struct i40e_hw *hw = &pf->hw;
3615 	device_t dev = pf->dev;
3616         u16 link;
3617         u32 offset, num_ports;
3618 	u64 max_speed;
3619 
3620 	/* Some devices don't use PCIE */
3621 	if (hw->mac.type == I40E_MAC_X722)
3622 		return;
3623 
3624         /* Read PCI Express Capabilities Link Status Register */
3625         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3626         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3627 
3628 	/* Fill out hw struct with PCIE info */
3629 	i40e_set_pci_config_data(hw, link);
3630 
3631 	/* Use info to print out bandwidth messages */
3632         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3633             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3634             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3635             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3636             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3637             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3638             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3639             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3640             ("Unknown"));
3641 
3642 	/*
3643 	 * If adapter is in slot with maximum supported speed,
3644 	 * no warning message needs to be printed out.
3645 	 */
3646 	if (hw->bus.speed >= i40e_bus_speed_8000
3647 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3648 		return;
3649 
3650 	num_ports = bitcount32(hw->func_caps.valid_functions);
3651 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3652 
3653 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3654                 device_printf(dev, "PCI-Express bandwidth available"
3655                     " for this device may be insufficient for"
3656                     " optimal performance.\n");
3657                 device_printf(dev, "Please move the device to a different"
3658 		    " PCI-e link with more lanes and/or higher"
3659 		    " transfer rate.\n");
3660         }
3661 }
3662 
3663 static int
3664 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3665 {
3666 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3667 	struct i40e_hw	*hw = &pf->hw;
3668 	struct sbuf	*sbuf;
3669 
3670 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3671 	ixl_nvm_version_str(hw, sbuf);
3672 	sbuf_finish(sbuf);
3673 	sbuf_delete(sbuf);
3674 
3675 	return (0);
3676 }
3677 
3678 void
3679 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3680 {
3681 	if ((nvma->command == I40E_NVM_READ) &&
3682 	    ((nvma->config & 0xFF) == 0xF) &&
3683 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3684 	    (nvma->offset == 0) &&
3685 	    (nvma->data_size == 1)) {
3686 		// device_printf(dev, "- Get Driver Status Command\n");
3687 	}
3688 	else if (nvma->command == I40E_NVM_READ) {
3689 
3690 	}
3691 	else {
3692 		switch (nvma->command) {
3693 		case 0xB:
3694 			device_printf(dev, "- command: I40E_NVM_READ\n");
3695 			break;
3696 		case 0xC:
3697 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3698 			break;
3699 		default:
3700 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3701 			break;
3702 		}
3703 
3704 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3705 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3706 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3707 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3708 	}
3709 }
3710 
3711 int
3712 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3713 {
3714 	struct i40e_hw *hw = &pf->hw;
3715 	struct i40e_nvm_access *nvma;
3716 	device_t dev = pf->dev;
3717 	enum i40e_status_code status = 0;
3718 	int perrno;
3719 
3720 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3721 
3722 	/* Sanity checks */
3723 	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
3724 	    ifd->ifd_data == NULL) {
3725 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3726 		    __func__);
3727 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3728 		    __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
3729 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3730 		    ifd->ifd_data);
3731 		return (EINVAL);
3732 	}
3733 
3734 	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
3735 
3736 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3737 		ixl_print_nvm_cmd(dev, nvma);
3738 
3739 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3740 		int count = 0;
3741 		while (count++ < 100) {
3742 			i40e_msec_delay(100);
3743 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3744 				break;
3745 		}
3746 	}
3747 
3748 	if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) {
3749 		// TODO: Might need a different lock here
3750 		// IXL_PF_LOCK(pf);
3751 		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3752 		// IXL_PF_UNLOCK(pf);
3753 	} else {
3754 		perrno = -EBUSY;
3755 	}
3756 
3757 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3758 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3759 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3760 		    i40e_stat_str(hw, status), perrno);
3761 
3762 	/*
3763 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3764 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3765 	 */
3766 	if (perrno == -EPERM)
3767 		return (-EACCES);
3768 	else
3769 		return (perrno);
3770 }
3771 
3772 int
3773 ixl_find_i2c_interface(struct ixl_pf *pf)
3774 {
3775 	struct i40e_hw *hw = &pf->hw;
3776 	bool i2c_en, port_matched;
3777 	u32 reg;
3778 
3779 	for (int i = 0; i < 4; i++) {
3780 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3781 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3782 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3783 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3784 		    & BIT(hw->port);
3785 		if (i2c_en && port_matched)
3786 			return (i);
3787 	}
3788 
3789 	return (-1);
3790 }
3791 
3792 static char *
3793 ixl_phy_type_string(u32 bit_pos, bool ext)
3794 {
3795 	static char * phy_types_str[32] = {
3796 		"SGMII",
3797 		"1000BASE-KX",
3798 		"10GBASE-KX4",
3799 		"10GBASE-KR",
3800 		"40GBASE-KR4",
3801 		"XAUI",
3802 		"XFI",
3803 		"SFI",
3804 		"XLAUI",
3805 		"XLPPI",
3806 		"40GBASE-CR4",
3807 		"10GBASE-CR1",
3808 		"SFP+ Active DA",
3809 		"QSFP+ Active DA",
3810 		"Reserved (14)",
3811 		"Reserved (15)",
3812 		"Reserved (16)",
3813 		"100BASE-TX",
3814 		"1000BASE-T",
3815 		"10GBASE-T",
3816 		"10GBASE-SR",
3817 		"10GBASE-LR",
3818 		"10GBASE-SFP+Cu",
3819 		"10GBASE-CR1",
3820 		"40GBASE-CR4",
3821 		"40GBASE-SR4",
3822 		"40GBASE-LR4",
3823 		"1000BASE-SX",
3824 		"1000BASE-LX",
3825 		"1000BASE-T Optical",
3826 		"20GBASE-KR2",
3827 		"Reserved (31)"
3828 	};
3829 	static char * ext_phy_types_str[8] = {
3830 		"25GBASE-KR",
3831 		"25GBASE-CR",
3832 		"25GBASE-SR",
3833 		"25GBASE-LR",
3834 		"25GBASE-AOC",
3835 		"25GBASE-ACC",
3836 		"Reserved (6)",
3837 		"Reserved (7)"
3838 	};
3839 
3840 	if (ext && bit_pos > 7) return "Invalid_Ext";
3841 	if (bit_pos > 31) return "Invalid";
3842 
3843 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3844 }
3845 
3846 /* TODO: ERJ: I don't this is necessary anymore. */
3847 int
3848 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3849 {
3850 	device_t dev = pf->dev;
3851 	struct i40e_hw *hw = &pf->hw;
3852 	struct i40e_aq_desc desc;
3853 	enum i40e_status_code status;
3854 
3855 	struct i40e_aqc_get_link_status *aq_link_status =
3856 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3857 
3858 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3859 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3860 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3861 	if (status) {
3862 		device_printf(dev,
3863 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3864 		    __func__, i40e_stat_str(hw, status),
3865 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3866 		return (EIO);
3867 	}
3868 
3869 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3870 	return (0);
3871 }
3872 
3873 static char *
3874 ixl_phy_type_string_ls(u8 val)
3875 {
3876 	if (val >= 0x1F)
3877 		return ixl_phy_type_string(val - 0x1F, true);
3878 	else
3879 		return ixl_phy_type_string(val, false);
3880 }
3881 
3882 static int
3883 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3884 {
3885 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3886 	device_t dev = pf->dev;
3887 	struct sbuf *buf;
3888 	int error = 0;
3889 
3890 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3891 	if (!buf) {
3892 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3893 		return (ENOMEM);
3894 	}
3895 
3896 	struct i40e_aqc_get_link_status link_status;
3897 	error = ixl_aq_get_link_status(pf, &link_status);
3898 	if (error) {
3899 		sbuf_delete(buf);
3900 		return (error);
3901 	}
3902 
3903 	sbuf_printf(buf, "\n"
3904 	    "PHY Type : 0x%02x<%s>\n"
3905 	    "Speed    : 0x%02x\n"
3906 	    "Link info: 0x%02x\n"
3907 	    "AN info  : 0x%02x\n"
3908 	    "Ext info : 0x%02x\n"
3909 	    "Loopback : 0x%02x\n"
3910 	    "Max Frame: %d\n"
3911 	    "Config   : 0x%02x\n"
3912 	    "Power    : 0x%02x",
3913 	    link_status.phy_type,
3914 	    ixl_phy_type_string_ls(link_status.phy_type),
3915 	    link_status.link_speed,
3916 	    link_status.link_info,
3917 	    link_status.an_info,
3918 	    link_status.ext_info,
3919 	    link_status.loopback,
3920 	    link_status.max_frame_size,
3921 	    link_status.config,
3922 	    link_status.power_desc);
3923 
3924 	error = sbuf_finish(buf);
3925 	if (error)
3926 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3927 
3928 	sbuf_delete(buf);
3929 	return (error);
3930 }
3931 
3932 static int
3933 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3934 {
3935 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3936 	struct i40e_hw *hw = &pf->hw;
3937 	device_t dev = pf->dev;
3938 	enum i40e_status_code status;
3939 	struct i40e_aq_get_phy_abilities_resp abilities;
3940 	struct sbuf *buf;
3941 	int error = 0;
3942 
3943 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3944 	if (!buf) {
3945 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3946 		return (ENOMEM);
3947 	}
3948 
3949 	status = i40e_aq_get_phy_capabilities(hw,
3950 	    FALSE, FALSE, &abilities, NULL);
3951 	if (status) {
3952 		device_printf(dev,
3953 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3954 		    __func__, i40e_stat_str(hw, status),
3955 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3956 		sbuf_delete(buf);
3957 		return (EIO);
3958 	}
3959 
3960 	sbuf_printf(buf, "\n"
3961 	    "PHY Type : %08x",
3962 	    abilities.phy_type);
3963 
3964 	if (abilities.phy_type != 0) {
3965 		sbuf_printf(buf, "<");
3966 		for (int i = 0; i < 32; i++)
3967 			if ((1 << i) & abilities.phy_type)
3968 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3969 		sbuf_printf(buf, ">\n");
3970 	}
3971 
3972 	sbuf_printf(buf, "PHY Ext  : %02x",
3973 	    abilities.phy_type_ext);
3974 
3975 	if (abilities.phy_type_ext != 0) {
3976 		sbuf_printf(buf, "<");
3977 		for (int i = 0; i < 4; i++)
3978 			if ((1 << i) & abilities.phy_type_ext)
3979 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3980 		sbuf_printf(buf, ">");
3981 	}
3982 	sbuf_printf(buf, "\n");
3983 
3984 	sbuf_printf(buf,
3985 	    "Speed    : %02x\n"
3986 	    "Abilities: %02x\n"
3987 	    "EEE cap  : %04x\n"
3988 	    "EEER reg : %08x\n"
3989 	    "D3 Lpan  : %02x\n"
3990 	    "ID       : %02x %02x %02x %02x\n"
3991 	    "ModType  : %02x %02x %02x\n"
3992 	    "ModType E: %01x\n"
3993 	    "FEC Cfg  : %02x\n"
3994 	    "Ext CC   : %02x",
3995 	    abilities.link_speed,
3996 	    abilities.abilities, abilities.eee_capability,
3997 	    abilities.eeer_val, abilities.d3_lpan,
3998 	    abilities.phy_id[0], abilities.phy_id[1],
3999 	    abilities.phy_id[2], abilities.phy_id[3],
4000 	    abilities.module_type[0], abilities.module_type[1],
4001 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
4002 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
4003 	    abilities.ext_comp_code);
4004 
4005 	error = sbuf_finish(buf);
4006 	if (error)
4007 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4008 
4009 	sbuf_delete(buf);
4010 	return (error);
4011 }
4012 
4013 static int
4014 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4015 {
4016 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4017 	struct ixl_vsi *vsi = &pf->vsi;
4018 	struct ixl_mac_filter *f;
4019 	char *buf, *buf_i;
4020 
4021 	int error = 0;
4022 	int ftl_len = 0;
4023 	int ftl_counter = 0;
4024 	int buf_len = 0;
4025 	int entry_len = 42;
4026 
4027 	SLIST_FOREACH(f, &vsi->ftl, next) {
4028 		ftl_len++;
4029 	}
4030 
4031 	if (ftl_len < 1) {
4032 		sysctl_handle_string(oidp, "(none)", 6, req);
4033 		return (0);
4034 	}
4035 
4036 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4037 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_WAITOK);
4038 
4039 	sprintf(buf_i++, "\n");
4040 	SLIST_FOREACH(f, &vsi->ftl, next) {
4041 		sprintf(buf_i,
4042 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4043 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4044 		buf_i += entry_len;
4045 		/* don't print '\n' for last entry */
4046 		if (++ftl_counter != ftl_len) {
4047 			sprintf(buf_i, "\n");
4048 			buf_i++;
4049 		}
4050 	}
4051 
4052 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4053 	if (error)
4054 		printf("sysctl error: %d\n", error);
4055 	free(buf, M_DEVBUF);
4056 	return error;
4057 }
4058 
4059 #define IXL_SW_RES_SIZE 0x14
4060 int
4061 ixl_res_alloc_cmp(const void *a, const void *b)
4062 {
4063 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4064 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4065 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4066 
4067 	return ((int)one->resource_type - (int)two->resource_type);
4068 }
4069 
4070 /*
4071  * Longest string length: 25
4072  */
4073 char *
4074 ixl_switch_res_type_string(u8 type)
4075 {
4076 	// TODO: This should be changed to static const
4077 	char * ixl_switch_res_type_strings[0x14] = {
4078 		"VEB",
4079 		"VSI",
4080 		"Perfect Match MAC address",
4081 		"S-tag",
4082 		"(Reserved)",
4083 		"Multicast hash entry",
4084 		"Unicast hash entry",
4085 		"VLAN",
4086 		"VSI List entry",
4087 		"(Reserved)",
4088 		"VLAN Statistic Pool",
4089 		"Mirror Rule",
4090 		"Queue Set",
4091 		"Inner VLAN Forward filter",
4092 		"(Reserved)",
4093 		"Inner MAC",
4094 		"IP",
4095 		"GRE/VN1 Key",
4096 		"VN2 Key",
4097 		"Tunneling Port"
4098 	};
4099 
4100 	if (type < 0x14)
4101 		return ixl_switch_res_type_strings[type];
4102 	else
4103 		return "(Reserved)";
4104 }
4105 
4106 static int
4107 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4108 {
4109 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4110 	struct i40e_hw *hw = &pf->hw;
4111 	device_t dev = pf->dev;
4112 	struct sbuf *buf;
4113 	enum i40e_status_code status;
4114 	int error = 0;
4115 
4116 	u8 num_entries;
4117 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4118 
4119 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4120 	if (!buf) {
4121 		device_printf(dev, "Could not allocate sbuf for output.\n");
4122 		return (ENOMEM);
4123 	}
4124 
4125 	bzero(resp, sizeof(resp));
4126 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4127 				resp,
4128 				IXL_SW_RES_SIZE,
4129 				NULL);
4130 	if (status) {
4131 		device_printf(dev,
4132 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4133 		    __func__, i40e_stat_str(hw, status),
4134 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4135 		sbuf_delete(buf);
4136 		return (error);
4137 	}
4138 
4139 	/* Sort entries by type for display */
4140 	qsort(resp, num_entries,
4141 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4142 	    &ixl_res_alloc_cmp);
4143 
4144 	sbuf_cat(buf, "\n");
4145 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4146 	sbuf_printf(buf,
4147 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4148 	    "                          | (this)     | (all) | (this) | (all)       \n");
4149 	for (int i = 0; i < num_entries; i++) {
4150 		sbuf_printf(buf,
4151 		    "%25s | %10d   %5d   %6d   %12d",
4152 		    ixl_switch_res_type_string(resp[i].resource_type),
4153 		    resp[i].guaranteed,
4154 		    resp[i].total,
4155 		    resp[i].used,
4156 		    resp[i].total_unalloced);
4157 		if (i < num_entries - 1)
4158 			sbuf_cat(buf, "\n");
4159 	}
4160 
4161 	error = sbuf_finish(buf);
4162 	if (error)
4163 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4164 
4165 	sbuf_delete(buf);
4166 	return (error);
4167 }
4168 
4169 /*
4170 ** Caller must init and delete sbuf; this function will clear and
4171 ** finish it for caller.
4172 */
4173 char *
4174 ixl_switch_element_string(struct sbuf *s,
4175     struct i40e_aqc_switch_config_element_resp *element)
4176 {
4177 	sbuf_clear(s);
4178 
4179 	switch (element->element_type) {
4180 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4181 		sbuf_printf(s, "MAC %3d", element->element_info);
4182 		break;
4183 	case I40E_AQ_SW_ELEM_TYPE_PF:
4184 		sbuf_printf(s, "PF  %3d", element->element_info);
4185 		break;
4186 	case I40E_AQ_SW_ELEM_TYPE_VF:
4187 		sbuf_printf(s, "VF  %3d", element->element_info);
4188 		break;
4189 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4190 		sbuf_cat(s, "EMP");
4191 		break;
4192 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4193 		sbuf_cat(s, "BMC");
4194 		break;
4195 	case I40E_AQ_SW_ELEM_TYPE_PV:
4196 		sbuf_cat(s, "PV");
4197 		break;
4198 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4199 		sbuf_cat(s, "VEB");
4200 		break;
4201 	case I40E_AQ_SW_ELEM_TYPE_PA:
4202 		sbuf_cat(s, "PA");
4203 		break;
4204 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4205 		sbuf_printf(s, "VSI %3d", element->element_info);
4206 		break;
4207 	default:
4208 		sbuf_cat(s, "?");
4209 		break;
4210 	}
4211 
4212 	sbuf_finish(s);
4213 	return sbuf_data(s);
4214 }
4215 
4216 static int
4217 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4218 {
4219 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4220 	struct i40e_hw *hw = &pf->hw;
4221 	device_t dev = pf->dev;
4222 	struct sbuf *buf;
4223 	struct sbuf *nmbuf;
4224 	enum i40e_status_code status;
4225 	int error = 0;
4226 	u16 next = 0;
4227 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4228 
4229 	struct i40e_aqc_get_switch_config_resp *sw_config;
4230 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4231 
4232 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4233 	if (!buf) {
4234 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4235 		return (ENOMEM);
4236 	}
4237 
4238 	status = i40e_aq_get_switch_config(hw, sw_config,
4239 	    sizeof(aq_buf), &next, NULL);
4240 	if (status) {
4241 		device_printf(dev,
4242 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4243 		    __func__, i40e_stat_str(hw, status),
4244 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4245 		sbuf_delete(buf);
4246 		return error;
4247 	}
4248 	if (next)
4249 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4250 		    __func__, next);
4251 
4252 	nmbuf = sbuf_new_auto();
4253 	if (!nmbuf) {
4254 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4255 		sbuf_delete(buf);
4256 		return (ENOMEM);
4257 	}
4258 
4259 	sbuf_cat(buf, "\n");
4260 	/* Assuming <= 255 elements in switch */
4261 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4262 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4263 	/* Exclude:
4264 	** Revision -- all elements are revision 1 for now
4265 	*/
4266 	sbuf_printf(buf,
4267 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4268 	    "                |          |          | (uplink)\n");
4269 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4270 		// "%4d (%8s) | %8s   %8s   %#8x",
4271 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4272 		sbuf_cat(buf, " ");
4273 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4274 		    &sw_config->element[i]));
4275 		sbuf_cat(buf, " | ");
4276 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4277 		sbuf_cat(buf, "   ");
4278 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4279 		sbuf_cat(buf, "   ");
4280 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4281 		if (i < sw_config->header.num_reported - 1)
4282 			sbuf_cat(buf, "\n");
4283 	}
4284 	sbuf_delete(nmbuf);
4285 
4286 	error = sbuf_finish(buf);
4287 	if (error)
4288 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4289 
4290 	sbuf_delete(buf);
4291 
4292 	return (error);
4293 }
4294 
4295 static int
4296 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4297 {
4298 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4299 	struct i40e_hw *hw = &pf->hw;
4300 	device_t dev = pf->dev;
4301 	struct sbuf *buf;
4302 	int error = 0;
4303 	enum i40e_status_code status;
4304 	u32 reg;
4305 
4306 	struct i40e_aqc_get_set_rss_key_data key_data;
4307 
4308 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4309 	if (!buf) {
4310 		device_printf(dev, "Could not allocate sbuf for output.\n");
4311 		return (ENOMEM);
4312 	}
4313 
4314 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4315 
4316 	sbuf_cat(buf, "\n");
4317 	if (hw->mac.type == I40E_MAC_X722) {
4318 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4319 		if (status)
4320 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4321 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4322 	} else {
4323 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4324 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4325 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4326 		}
4327 	}
4328 
4329 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4330 
4331 	error = sbuf_finish(buf);
4332 	if (error)
4333 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4334 	sbuf_delete(buf);
4335 
4336 	return (error);
4337 }
4338 
4339 static void
4340 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4341 {
4342 	int i, j, k, width;
4343 	char c;
4344 
4345 	if (length < 1 || buf == NULL) return;
4346 
4347 	int byte_stride = 16;
4348 	int lines = length / byte_stride;
4349 	int rem = length % byte_stride;
4350 	if (rem > 0)
4351 		lines++;
4352 
4353 	for (i = 0; i < lines; i++) {
4354 		width = (rem > 0 && i == lines - 1)
4355 		    ? rem : byte_stride;
4356 
4357 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4358 
4359 		for (j = 0; j < width; j++)
4360 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4361 
4362 		if (width < byte_stride) {
4363 			for (k = 0; k < (byte_stride - width); k++)
4364 				sbuf_printf(sb, "   ");
4365 		}
4366 
4367 		if (!text) {
4368 			sbuf_printf(sb, "\n");
4369 			continue;
4370 		}
4371 
4372 		for (j = 0; j < width; j++) {
4373 			c = (char)buf[i * byte_stride + j];
4374 			if (c < 32 || c > 126)
4375 				sbuf_printf(sb, ".");
4376 			else
4377 				sbuf_printf(sb, "%c", c);
4378 
4379 			if (j == width - 1)
4380 				sbuf_printf(sb, "\n");
4381 		}
4382 	}
4383 }
4384 
4385 static int
4386 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4387 {
4388 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4389 	struct i40e_hw *hw = &pf->hw;
4390 	device_t dev = pf->dev;
4391 	struct sbuf *buf;
4392 	int error = 0;
4393 	enum i40e_status_code status;
4394 	u8 hlut[512];
4395 	u32 reg;
4396 
4397 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4398 	if (!buf) {
4399 		device_printf(dev, "Could not allocate sbuf for output.\n");
4400 		return (ENOMEM);
4401 	}
4402 
4403 	bzero(hlut, sizeof(hlut));
4404 	sbuf_cat(buf, "\n");
4405 	if (hw->mac.type == I40E_MAC_X722) {
4406 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4407 		if (status)
4408 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4409 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4410 	} else {
4411 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4412 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4413 			bcopy(&reg, &hlut[i << 2], 4);
4414 		}
4415 	}
4416 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4417 
4418 	error = sbuf_finish(buf);
4419 	if (error)
4420 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4421 	sbuf_delete(buf);
4422 
4423 	return (error);
4424 }
4425 
4426 static int
4427 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4428 {
4429 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4430 	struct i40e_hw *hw = &pf->hw;
4431 	u64 hena;
4432 
4433 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4434 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4435 
4436 	return sysctl_handle_long(oidp, NULL, hena, req);
4437 }
4438 
4439 /*
4440  * Sysctl to disable firmware's link management
4441  *
4442  * 1 - Disable link management on this port
4443  * 0 - Re-enable link management
4444  *
4445  * On normal NVMs, firmware manages link by default.
4446  */
4447 static int
4448 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4449 {
4450 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4451 	struct i40e_hw *hw = &pf->hw;
4452 	device_t dev = pf->dev;
4453 	int requested_mode = -1;
4454 	enum i40e_status_code status = 0;
4455 	int error = 0;
4456 
4457 	/* Read in new mode */
4458 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4459 	if ((error) || (req->newptr == NULL))
4460 		return (error);
4461 	/* Check for sane value */
4462 	if (requested_mode < 0 || requested_mode > 1) {
4463 		device_printf(dev, "Valid modes are 0 or 1\n");
4464 		return (EINVAL);
4465 	}
4466 
4467 	/* Set new mode */
4468 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4469 	if (status) {
4470 		device_printf(dev,
4471 		    "%s: Error setting new phy debug mode %s,"
4472 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4473 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4474 		return (EIO);
4475 	}
4476 
4477 	return (0);
4478 }
4479 
4480 /*
4481  * Read some diagnostic data from an SFP module
4482  * Bytes 96-99, 102-105 from device address 0xA2
4483  */
4484 static int
4485 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4486 {
4487 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4488 	device_t dev = pf->dev;
4489 	struct sbuf *sbuf;
4490 	int error = 0;
4491 	u8 output;
4492 
4493 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4494 	if (error) {
4495 		device_printf(dev, "Error reading from i2c\n");
4496 		return (error);
4497 	}
4498 	if (output != 0x3) {
4499 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4500 		return (EIO);
4501 	}
4502 
4503 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4504 	if (!(output & 0x60)) {
4505 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4506 		return (EIO);
4507 	}
4508 
4509 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4510 
4511 	for (u8 offset = 96; offset < 100; offset++) {
4512 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4513 		sbuf_printf(sbuf, "%02X ", output);
4514 	}
4515 	for (u8 offset = 102; offset < 106; offset++) {
4516 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4517 		sbuf_printf(sbuf, "%02X ", output);
4518 	}
4519 
4520 	sbuf_finish(sbuf);
4521 	sbuf_delete(sbuf);
4522 
4523 	return (0);
4524 }
4525 
4526 /*
4527  * Sysctl to read a byte from I2C bus.
4528  *
4529  * Input: 32-bit value:
4530  * 	bits 0-7:   device address (0xA0 or 0xA2)
4531  * 	bits 8-15:  offset (0-255)
4532  *	bits 16-31: unused
4533  * Output: 8-bit value read
4534  */
4535 static int
4536 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4537 {
4538 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4539 	device_t dev = pf->dev;
4540 	int input = -1, error = 0;
4541 	u8 dev_addr, offset, output;
4542 
4543 	/* Read in I2C read parameters */
4544 	error = sysctl_handle_int(oidp, &input, 0, req);
4545 	if ((error) || (req->newptr == NULL))
4546 		return (error);
4547 	/* Validate device address */
4548 	dev_addr = input & 0xFF;
4549 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4550 		return (EINVAL);
4551 	}
4552 	offset = (input >> 8) & 0xFF;
4553 
4554 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4555 	if (error)
4556 		return (error);
4557 
4558 	device_printf(dev, "%02X\n", output);
4559 	return (0);
4560 }
4561 
4562 /*
4563  * Sysctl to write a byte to the I2C bus.
4564  *
4565  * Input: 32-bit value:
4566  * 	bits 0-7:   device address (0xA0 or 0xA2)
4567  * 	bits 8-15:  offset (0-255)
4568  *	bits 16-23: value to write
4569  *	bits 24-31: unused
4570  * Output: 8-bit value written
4571  */
4572 static int
4573 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4574 {
4575 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4576 	device_t dev = pf->dev;
4577 	int input = -1, error = 0;
4578 	u8 dev_addr, offset, value;
4579 
4580 	/* Read in I2C write parameters */
4581 	error = sysctl_handle_int(oidp, &input, 0, req);
4582 	if ((error) || (req->newptr == NULL))
4583 		return (error);
4584 	/* Validate device address */
4585 	dev_addr = input & 0xFF;
4586 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4587 		return (EINVAL);
4588 	}
4589 	offset = (input >> 8) & 0xFF;
4590 	value = (input >> 16) & 0xFF;
4591 
4592 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4593 	if (error)
4594 		return (error);
4595 
4596 	device_printf(dev, "%02X written\n", value);
4597 	return (0);
4598 }
4599 
4600 static int
4601 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4602     u8 bit_pos, int *is_set)
4603 {
4604 	device_t dev = pf->dev;
4605 	struct i40e_hw *hw = &pf->hw;
4606 	enum i40e_status_code status;
4607 
4608 	status = i40e_aq_get_phy_capabilities(hw,
4609 	    FALSE, FALSE, abilities, NULL);
4610 	if (status) {
4611 		device_printf(dev,
4612 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4613 		    __func__, i40e_stat_str(hw, status),
4614 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4615 		return (EIO);
4616 	}
4617 
4618 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4619 	return (0);
4620 }
4621 
4622 static int
4623 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4624     u8 bit_pos, int set)
4625 {
4626 	device_t dev = pf->dev;
4627 	struct i40e_hw *hw = &pf->hw;
4628 	struct i40e_aq_set_phy_config config;
4629 	enum i40e_status_code status;
4630 
4631 	/* Set new PHY config */
4632 	memset(&config, 0, sizeof(config));
4633 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4634 	if (set)
4635 		config.fec_config |= bit_pos;
4636 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4637 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4638 		config.phy_type = abilities->phy_type;
4639 		config.phy_type_ext = abilities->phy_type_ext;
4640 		config.link_speed = abilities->link_speed;
4641 		config.eee_capability = abilities->eee_capability;
4642 		config.eeer = abilities->eeer_val;
4643 		config.low_power_ctrl = abilities->d3_lpan;
4644 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4645 
4646 		if (status) {
4647 			device_printf(dev,
4648 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4649 			    __func__, i40e_stat_str(hw, status),
4650 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4651 			return (EIO);
4652 		}
4653 	}
4654 
4655 	return (0);
4656 }
4657 
4658 static int
4659 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4660 {
4661 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4662 	int mode, error = 0;
4663 
4664 	struct i40e_aq_get_phy_abilities_resp abilities;
4665 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4666 	if (error)
4667 		return (error);
4668 	/* Read in new mode */
4669 	error = sysctl_handle_int(oidp, &mode, 0, req);
4670 	if ((error) || (req->newptr == NULL))
4671 		return (error);
4672 
4673 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4674 }
4675 
4676 static int
4677 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4678 {
4679 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4680 	int mode, error = 0;
4681 
4682 	struct i40e_aq_get_phy_abilities_resp abilities;
4683 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4684 	if (error)
4685 		return (error);
4686 	/* Read in new mode */
4687 	error = sysctl_handle_int(oidp, &mode, 0, req);
4688 	if ((error) || (req->newptr == NULL))
4689 		return (error);
4690 
4691 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4692 }
4693 
4694 static int
4695 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4696 {
4697 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4698 	int mode, error = 0;
4699 
4700 	struct i40e_aq_get_phy_abilities_resp abilities;
4701 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4702 	if (error)
4703 		return (error);
4704 	/* Read in new mode */
4705 	error = sysctl_handle_int(oidp, &mode, 0, req);
4706 	if ((error) || (req->newptr == NULL))
4707 		return (error);
4708 
4709 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4710 }
4711 
4712 static int
4713 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4714 {
4715 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4716 	int mode, error = 0;
4717 
4718 	struct i40e_aq_get_phy_abilities_resp abilities;
4719 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4720 	if (error)
4721 		return (error);
4722 	/* Read in new mode */
4723 	error = sysctl_handle_int(oidp, &mode, 0, req);
4724 	if ((error) || (req->newptr == NULL))
4725 		return (error);
4726 
4727 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4728 }
4729 
4730 static int
4731 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4732 {
4733 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4734 	int mode, error = 0;
4735 
4736 	struct i40e_aq_get_phy_abilities_resp abilities;
4737 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4738 	if (error)
4739 		return (error);
4740 	/* Read in new mode */
4741 	error = sysctl_handle_int(oidp, &mode, 0, req);
4742 	if ((error) || (req->newptr == NULL))
4743 		return (error);
4744 
4745 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4746 }
4747 
4748 static int
4749 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4750 {
4751 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4752 	struct i40e_hw *hw = &pf->hw;
4753 	device_t dev = pf->dev;
4754 	struct sbuf *buf;
4755 	int error = 0;
4756 	enum i40e_status_code status;
4757 
4758 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4759 	if (!buf) {
4760 		device_printf(dev, "Could not allocate sbuf for output.\n");
4761 		return (ENOMEM);
4762 	}
4763 
4764 	u8 *final_buff;
4765 	/* This amount is only necessary if reading the entire cluster into memory */
4766 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4767 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4768 	if (final_buff == NULL) {
4769 		device_printf(dev, "Could not allocate memory for output.\n");
4770 		goto out;
4771 	}
4772 	int final_buff_len = 0;
4773 
4774 	u8 cluster_id = 1;
4775 	bool more = true;
4776 
4777 	u8 dump_buf[4096];
4778 	u16 curr_buff_size = 4096;
4779 	u8 curr_next_table = 0;
4780 	u32 curr_next_index = 0;
4781 
4782 	u16 ret_buff_size;
4783 	u8 ret_next_table;
4784 	u32 ret_next_index;
4785 
4786 	sbuf_cat(buf, "\n");
4787 
4788 	while (more) {
4789 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4790 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4791 		if (status) {
4792 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4793 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4794 			goto free_out;
4795 		}
4796 
4797 		/* copy info out of temp buffer */
4798 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4799 		final_buff_len += ret_buff_size;
4800 
4801 		if (ret_next_table != curr_next_table) {
4802 			/* We're done with the current table; we can dump out read data. */
4803 			sbuf_printf(buf, "%d:", curr_next_table);
4804 			int bytes_printed = 0;
4805 			while (bytes_printed <= final_buff_len) {
4806 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4807 				bytes_printed += 16;
4808 			}
4809 				sbuf_cat(buf, "\n");
4810 
4811 			/* The entire cluster has been read; we're finished */
4812 			if (ret_next_table == 0xFF)
4813 				break;
4814 
4815 			/* Otherwise clear the output buffer and continue reading */
4816 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4817 			final_buff_len = 0;
4818 		}
4819 
4820 		if (ret_next_index == 0xFFFFFFFF)
4821 			ret_next_index = 0;
4822 
4823 		bzero(dump_buf, sizeof(dump_buf));
4824 		curr_next_table = ret_next_table;
4825 		curr_next_index = ret_next_index;
4826 	}
4827 
4828 free_out:
4829 	free(final_buff, M_DEVBUF);
4830 out:
4831 	error = sbuf_finish(buf);
4832 	if (error)
4833 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4834 	sbuf_delete(buf);
4835 
4836 	return (error);
4837 }
4838 
4839 static int
4840 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4841 {
4842 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4843 	struct i40e_hw *hw = &pf->hw;
4844 	device_t dev = pf->dev;
4845 	int error = 0;
4846 	int state, new_state;
4847 	enum i40e_status_code status;
4848 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4849 
4850 	/* Read in new mode */
4851 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4852 	if ((error) || (req->newptr == NULL))
4853 		return (error);
4854 
4855 	/* Already in requested state */
4856 	if (new_state == state)
4857 		return (error);
4858 
4859 	if (new_state == 0) {
4860 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4861 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4862 			return (EINVAL);
4863 		}
4864 
4865 		if (pf->hw.aq.api_maj_ver < 1 ||
4866 		    (pf->hw.aq.api_maj_ver == 1 &&
4867 		    pf->hw.aq.api_min_ver < 7)) {
4868 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4869 			return (EINVAL);
4870 		}
4871 
4872 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4873 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4874 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4875 	} else {
4876 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4877 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4878 			device_printf(dev, "FW LLDP agent is already running\n");
4879 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4880 	}
4881 
4882 	return (0);
4883 }
4884 
4885 /*
4886  * Get FW LLDP Agent status
4887  */
4888 int
4889 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4890 {
4891 	enum i40e_status_code ret = I40E_SUCCESS;
4892 	struct i40e_lldp_variables lldp_cfg;
4893 	struct i40e_hw *hw = &pf->hw;
4894 	u8 adminstatus = 0;
4895 
4896 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4897 	if (ret)
4898 		return ret;
4899 
4900 	/* Get the LLDP AdminStatus for the current port */
4901 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4902 	adminstatus &= 0xf;
4903 
4904 	/* Check if LLDP agent is disabled */
4905 	if (!adminstatus) {
4906 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4907 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4908 	} else
4909 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4910 
4911 	return (0);
4912 }
4913 
4914 int
4915 ixl_attach_get_link_status(struct ixl_pf *pf)
4916 {
4917 	struct i40e_hw *hw = &pf->hw;
4918 	device_t dev = pf->dev;
4919 	int error = 0;
4920 
4921 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4922 	    (hw->aq.fw_maj_ver < 4)) {
4923 		i40e_msec_delay(75);
4924 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4925 		if (error) {
4926 			device_printf(dev, "link restart failed, aq_err=%d\n",
4927 			    pf->hw.aq.asq_last_status);
4928 			return error;
4929 		}
4930 	}
4931 
4932 	/* Determine link state */
4933 	hw->phy.get_link_info = TRUE;
4934 	i40e_get_link_status(hw, &pf->link_up);
4935 	return (0);
4936 }
4937 
4938 static int
4939 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4940 {
4941 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4942 	int requested = 0, error = 0;
4943 
4944 	/* Read in new mode */
4945 	error = sysctl_handle_int(oidp, &requested, 0, req);
4946 	if ((error) || (req->newptr == NULL))
4947 		return (error);
4948 
4949 	/* Initiate the PF reset later in the admin task */
4950 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4951 
4952 	return (error);
4953 }
4954 
4955 static int
4956 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4957 {
4958 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4959 	struct i40e_hw *hw = &pf->hw;
4960 	int requested = 0, error = 0;
4961 
4962 	/* Read in new mode */
4963 	error = sysctl_handle_int(oidp, &requested, 0, req);
4964 	if ((error) || (req->newptr == NULL))
4965 		return (error);
4966 
4967 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4968 
4969 	return (error);
4970 }
4971 
4972 static int
4973 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4974 {
4975 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4976 	struct i40e_hw *hw = &pf->hw;
4977 	int requested = 0, error = 0;
4978 
4979 	/* Read in new mode */
4980 	error = sysctl_handle_int(oidp, &requested, 0, req);
4981 	if ((error) || (req->newptr == NULL))
4982 		return (error);
4983 
4984 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4985 
4986 	return (error);
4987 }
4988 
4989 static int
4990 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
4991 {
4992 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4993 	struct i40e_hw *hw = &pf->hw;
4994 	int requested = 0, error = 0;
4995 
4996 	/* Read in new mode */
4997 	error = sysctl_handle_int(oidp, &requested, 0, req);
4998 	if ((error) || (req->newptr == NULL))
4999 		return (error);
5000 
5001 	/* TODO: Find out how to bypass this */
5002 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5003 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5004 		error = EINVAL;
5005 	} else
5006 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5007 
5008 	return (error);
5009 }
5010 
5011 /*
5012  * Print out mapping of TX queue indexes and Rx queue indexes
5013  * to MSI-X vectors.
5014  */
5015 static int
5016 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5017 {
5018 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5019 	struct ixl_vsi *vsi = &pf->vsi;
5020 	device_t dev = pf->dev;
5021 	struct sbuf *buf;
5022 	int error = 0;
5023 
5024 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5025 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5026 
5027 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5028 	if (!buf) {
5029 		device_printf(dev, "Could not allocate sbuf for output.\n");
5030 		return (ENOMEM);
5031 	}
5032 
5033 	sbuf_cat(buf, "\n");
5034 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5035 		rx_que = &vsi->rx_queues[i];
5036 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5037 	}
5038 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5039 		tx_que = &vsi->tx_queues[i];
5040 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5041 	}
5042 
5043 	error = sbuf_finish(buf);
5044 	if (error)
5045 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5046 	sbuf_delete(buf);
5047 
5048 	return (error);
5049 }
5050