xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision edca4938f74db18d091868237592abbf7e718669)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 static u_int
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
642 {
643 	struct ixl_vsi *vsi = arg;
644 
645 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
646 
647 	return (1);
648 }
649 
650 /*********************************************************************
651  * 	Filter Routines
652  *
653  *	Routines for multicast and vlan filter management.
654  *
655  *********************************************************************/
656 void
657 ixl_add_multi(struct ixl_vsi *vsi)
658 {
659 	struct ifnet		*ifp = vsi->ifp;
660 	struct i40e_hw		*hw = vsi->hw;
661 	int			mcnt = 0, flags;
662 
663 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
664 
665 	/*
666 	** First just get a count, to decide if we
667 	** we simply use multicast promiscuous.
668 	*/
669 	mcnt = if_llmaddr_count(ifp);
670 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 		/* delete existing MC filters */
672 		ixl_del_hw_filters(vsi, mcnt);
673 		i40e_aq_set_vsi_multicast_promiscuous(hw,
674 		    vsi->seid, TRUE, NULL);
675 		return;
676 	}
677 
678 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
679 	if (mcnt > 0) {
680 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 		ixl_add_hw_filters(vsi, flags, mcnt);
682 	}
683 
684 	IOCTL_DEBUGOUT("ixl_add_multi: end");
685 }
686 
687 static u_int
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
689 {
690 	struct ixl_mac_filter *f = arg;
691 
692 	if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
693 		return (1);
694 	else
695 		return (0);
696 }
697 
698 int
699 ixl_del_multi(struct ixl_vsi *vsi)
700 {
701 	struct ifnet		*ifp = vsi->ifp;
702 	struct ixl_mac_filter	*f;
703 	int			mcnt = 0;
704 
705 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
706 
707 	SLIST_FOREACH(f, &vsi->ftl, next)
708 		if ((f->flags & IXL_FILTER_USED) &&
709 		    (f->flags & IXL_FILTER_MC) &&
710 		    (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 			f->flags |= IXL_FILTER_DEL;
712 			mcnt++;
713 		}
714 
715 	if (mcnt > 0)
716 		ixl_del_hw_filters(vsi, mcnt);
717 
718 	return (mcnt);
719 }
720 
721 void
722 ixl_link_up_msg(struct ixl_pf *pf)
723 {
724 	struct i40e_hw *hw = &pf->hw;
725 	struct ifnet *ifp = pf->vsi.ifp;
726 	char *req_fec_string, *neg_fec_string;
727 	u8 fec_abilities;
728 
729 	fec_abilities = hw->phy.link_info.req_fec_info;
730 	/* If both RS and KR are requested, only show RS */
731 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 		req_fec_string = ixl_fec_string[0];
733 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 		req_fec_string = ixl_fec_string[1];
735 	else
736 		req_fec_string = ixl_fec_string[2];
737 
738 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 		neg_fec_string = ixl_fec_string[0];
740 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 		neg_fec_string = ixl_fec_string[1];
742 	else
743 		neg_fec_string = ixl_fec_string[2];
744 
745 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
746 	    ifp->if_xname,
747 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 	    req_fec_string, neg_fec_string,
749 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 		ixl_fc_string[1] : ixl_fc_string[0]);
755 }
756 
757 /*
758  * Configure admin queue/misc interrupt cause registers in hardware.
759  */
760 void
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
762 {
763 	struct i40e_hw *hw = &pf->hw;
764 	u32 reg;
765 
766 	/* First set up the adminq - vector 0 */
767 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
768 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
769 
770 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
772 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
779 
780 	/*
781 	 * 0x7FF is the end of the queue list.
782 	 * This means we won't use MSI-X vector 0 for a queue interrupt
783 	 * in MSI-X mode.
784 	 */
785 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
788 
789 	wr32(hw, I40E_PFINT_DYN_CTL0,
790 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
792 
793 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
794 }
795 
796 /*
797  * Configure queue interrupt cause registers in hardware.
798  *
799  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
800  */
801 void
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
803 {
804 	struct i40e_hw *hw = &pf->hw;
805 	struct ixl_vsi *vsi = &pf->vsi;
806 	u32		reg;
807 	u16		vector = 1;
808 
809 	// TODO: See if max is really necessary
810 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 		/* Make sure interrupt is disabled */
812 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 		/* Set linked list head to point to corresponding RX queue
814 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
820 
821 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 		wr32(hw, I40E_QINT_RQCTL(i), reg);
827 
828 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 		wr32(hw, I40E_QINT_TQCTL(i), reg);
834 	}
835 }
836 
837 /*
838  * Configure for single interrupt vector operation
839  */
840 void
841 ixl_configure_legacy(struct ixl_pf *pf)
842 {
843 	struct i40e_hw	*hw = &pf->hw;
844 	struct ixl_vsi	*vsi = &pf->vsi;
845 	u32 reg;
846 
847 // TODO: Fix
848 #if 0
849 	/* Configure ITR */
850 	vsi->tx_itr_setting = pf->tx_itr;
851 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 	    vsi->tx_itr_setting);
853 	txr->itr = vsi->tx_itr_setting;
854 
855 	vsi->rx_itr_setting = pf->rx_itr;
856 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 	    vsi->rx_itr_setting);
858 	rxr->itr = vsi->rx_itr_setting;
859 	/* XXX: Assuming only 1 queue in single interrupt mode */
860 #endif
861 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
862 
863 	/* Setup "other" causes */
864 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
867 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
872 	    ;
873 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
874 
875 	/* No ITR for non-queue interrupts */
876 	wr32(hw, I40E_PFINT_STAT_CTL0,
877 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
878 
879 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 	wr32(hw, I40E_PFINT_LNKLST0, 0);
881 
882 	/* Associate the queue pair to the vector and enable the q int */
883 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 	wr32(hw, I40E_QINT_RQCTL(0), reg);
887 
888 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 	wr32(hw, I40E_QINT_TQCTL(0), reg);
892 }
893 
894 void
895 ixl_free_pci_resources(struct ixl_pf *pf)
896 {
897 	struct ixl_vsi		*vsi = &pf->vsi;
898 	device_t		dev = iflib_get_dev(vsi->ctx);
899 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
900 
901 	/* We may get here before stations are set up */
902 	if (rx_que == NULL)
903 		goto early;
904 
905 	/*
906 	**  Release all MSI-X VSI resources:
907 	*/
908 	iflib_irq_free(vsi->ctx, &vsi->irq);
909 
910 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
912 early:
913 	if (pf->pci_mem != NULL)
914 		bus_release_resource(dev, SYS_RES_MEMORY,
915 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
916 }
917 
918 void
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
920 {
921 	/* Display supported media types */
922 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
924 
925 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
931 
932 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
954 
955 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
957 
958 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
972 
973 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
977 
978 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
990 }
991 
992 /*********************************************************************
993  *
994  *  Setup networking device structure and register an interface.
995  *
996  **********************************************************************/
997 int
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
999 {
1000 	struct ixl_vsi *vsi = &pf->vsi;
1001 	if_ctx_t ctx = vsi->ctx;
1002 	struct i40e_hw *hw = &pf->hw;
1003 	struct ifnet *ifp = iflib_get_ifp(ctx);
1004 	struct i40e_aq_get_phy_abilities_resp abilities;
1005 	enum i40e_status_code aq_error = 0;
1006 
1007 	INIT_DBG_DEV(dev, "begin");
1008 
1009 	vsi->shared->isc_max_frame_size =
1010 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 	    + ETHER_VLAN_ENCAP_LEN;
1012 
1013 	aq_error = i40e_aq_get_phy_capabilities(hw,
1014 	    FALSE, TRUE, &abilities, NULL);
1015 	/* May need delay to detect fiber correctly */
1016 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 		/* TODO: Maybe just retry this in a task... */
1018 		i40e_msec_delay(200);
1019 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 		    TRUE, &abilities, NULL);
1021 	}
1022 	if (aq_error) {
1023 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 			device_printf(dev, "Unknown PHY type detected!\n");
1025 		else
1026 			device_printf(dev,
1027 			    "Error getting supported media types, err %d,"
1028 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1029 	} else {
1030 		pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1033 #else
1034 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1035 #endif
1036 
1037 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1038 	}
1039 
1040 	/* Use autoselect media by default */
1041 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1043 
1044 	return (0);
1045 }
1046 
1047 /*
1048  * Input: bitmap of enum i40e_aq_link_speed
1049  */
1050 u64
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1052 {
1053 	if (link_speeds & I40E_LINK_SPEED_40GB)
1054 		return IF_Gbps(40);
1055 	if (link_speeds & I40E_LINK_SPEED_25GB)
1056 		return IF_Gbps(25);
1057 	if (link_speeds & I40E_LINK_SPEED_20GB)
1058 		return IF_Gbps(20);
1059 	if (link_speeds & I40E_LINK_SPEED_10GB)
1060 		return IF_Gbps(10);
1061 	if (link_speeds & I40E_LINK_SPEED_1GB)
1062 		return IF_Gbps(1);
1063 	if (link_speeds & I40E_LINK_SPEED_100MB)
1064 		return IF_Mbps(100);
1065 	else
1066 		/* Minimum supported link speed */
1067 		return IF_Mbps(100);
1068 }
1069 
1070 /*
1071 ** Run when the Admin Queue gets a link state change interrupt.
1072 */
1073 void
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1075 {
1076 	struct i40e_hw *hw = &pf->hw;
1077 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 	struct i40e_aqc_get_link_status *status =
1079 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1080 
1081 	/* Request link status from adapter */
1082 	hw->phy.get_link_info = TRUE;
1083 	i40e_get_link_status(hw, &pf->link_up);
1084 
1085 	/* Print out message if an unqualified module is found */
1086 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 	    (pf->advertised_speed) &&
1088 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1090 		device_printf(dev, "Link failed because "
1091 		    "an unqualified module was detected!\n");
1092 
1093 	/* OS link info is updated elsewhere */
1094 }
1095 
1096 /*********************************************************************
1097  *
1098  *  Get Firmware Switch configuration
1099  *	- this will need to be more robust when more complex
1100  *	  switch configurations are enabled.
1101  *
1102  **********************************************************************/
1103 int
1104 ixl_switch_config(struct ixl_pf *pf)
1105 {
1106 	struct i40e_hw	*hw = &pf->hw;
1107 	struct ixl_vsi	*vsi = &pf->vsi;
1108 	device_t 	dev = iflib_get_dev(vsi->ctx);
1109 	struct i40e_aqc_get_switch_config_resp *sw_config;
1110 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1111 	int	ret;
1112 	u16	next = 0;
1113 
1114 	memset(&aq_buf, 0, sizeof(aq_buf));
1115 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 	ret = i40e_aq_get_switch_config(hw, sw_config,
1117 	    sizeof(aq_buf), &next, NULL);
1118 	if (ret) {
1119 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1121 		return (ret);
1122 	}
1123 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1124 		device_printf(dev,
1125 		    "Switch config: header reported: %d in structure, %d total\n",
1126 		    sw_config->header.num_reported, sw_config->header.num_total);
1127 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1128 			device_printf(dev,
1129 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 			    sw_config->element[i].element_type,
1131 			    sw_config->element[i].seid,
1132 			    sw_config->element[i].uplink_seid,
1133 			    sw_config->element[i].downlink_seid);
1134 		}
1135 	}
1136 	/* Simplified due to a single VSI */
1137 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 	vsi->seid = sw_config->element[0].seid;
1140 	return (ret);
1141 }
1142 
1143 /*********************************************************************
1144  *
1145  *  Initialize the VSI:  this handles contexts, which means things
1146  *  			 like the number of descriptors, buffer size,
1147  *			 plus we init the rings thru this function.
1148  *
1149  **********************************************************************/
1150 int
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1152 {
1153 	struct ixl_pf *pf = vsi->back;
1154 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1155 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1156 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1157 	device_t		dev = iflib_get_dev(vsi->ctx);
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct i40e_vsi_context	ctxt;
1160 	int 			tc_queues;
1161 	int			err = 0;
1162 
1163 	memset(&ctxt, 0, sizeof(ctxt));
1164 	ctxt.seid = vsi->seid;
1165 	if (pf->veb_seid != 0)
1166 		ctxt.uplink_seid = pf->veb_seid;
1167 	ctxt.pf_num = hw->pf_id;
1168 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1169 	if (err) {
1170 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1172 		return (err);
1173 	}
1174 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 	    ctxt.uplink_seid, ctxt.vsi_number,
1179 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1182 	/*
1183 	** Set the queue and traffic class bits
1184 	**  - when multiple traffic classes are supported
1185 	**    this will need to be more robust.
1186 	*/
1187 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 	ctxt.info.queue_mapping[0] = 0;
1191 	/*
1192 	 * This VSI will only use traffic class 0; start traffic class 0's
1193 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 	 * the driver may not use all of them).
1195 	 */
1196 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1201 
1202 	/* Set VLAN receive stripping mode */
1203 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1207 	else
1208 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1209 
1210 #ifdef IXL_IW
1211 	/* Set TCP Enable for iWARP capable VSI */
1212 	if (ixl_enable_iwarp && pf->iw_enabled) {
1213 		ctxt.info.valid_sections |=
1214 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1216 	}
1217 #endif
1218 	/* Save VSI number and info for use later */
1219 	vsi->vsi_num = ctxt.vsi_number;
1220 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1221 
1222 	/* Reset VSI statistics */
1223 	ixl_vsi_reset_stats(vsi);
1224 	vsi->hw_filters_add = 0;
1225 	vsi->hw_filters_del = 0;
1226 
1227 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1228 
1229 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1230 	if (err) {
1231 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1233 		return (err);
1234 	}
1235 
1236 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 		struct tx_ring		*txr = &tx_que->txr;
1238 		struct i40e_hmc_obj_txq tctx;
1239 		u32			txctl;
1240 
1241 		/* Setup the HMC TX Context  */
1242 		bzero(&tctx, sizeof(tctx));
1243 		tctx.new_context = 1;
1244 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 		tctx.qlen = scctx->isc_ntxd[0];
1246 		tctx.fc_ena = 0;	/* Disable FCoE */
1247 		/*
1248 		 * This value needs to pulled from the VSI that this queue
1249 		 * is assigned to. Index into array is traffic class.
1250 		 */
1251 		tctx.rdylist = vsi->info.qs_handle[0];
1252 		/*
1253 		 * Set these to enable Head Writeback
1254 		 * - Address is last entry in TX ring (reserved for HWB index)
1255 		 * Leave these as 0 for Descriptor Writeback
1256 		 */
1257 		if (vsi->enable_head_writeback) {
1258 			tctx.head_wb_ena = 1;
1259 			tctx.head_wb_addr = txr->tx_paddr +
1260 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1261 		} else {
1262 			tctx.head_wb_ena = 0;
1263 			tctx.head_wb_addr = 0;
1264 		}
1265 		tctx.rdylist_act = 0;
1266 		err = i40e_clear_lan_tx_queue_context(hw, i);
1267 		if (err) {
1268 			device_printf(dev, "Unable to clear TX context\n");
1269 			break;
1270 		}
1271 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1272 		if (err) {
1273 			device_printf(dev, "Unable to set TX context\n");
1274 			break;
1275 		}
1276 		/* Associate the ring with this PF */
1277 		txctl = I40E_QTX_CTL_PF_QUEUE;
1278 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 		    I40E_QTX_CTL_PF_INDX_MASK);
1280 		wr32(hw, I40E_QTX_CTL(i), txctl);
1281 		ixl_flush(hw);
1282 
1283 		/* Do ring (re)init */
1284 		ixl_init_tx_ring(vsi, tx_que);
1285 	}
1286 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 		struct rx_ring 		*rxr = &rx_que->rxr;
1288 		struct i40e_hmc_obj_rxq rctx;
1289 
1290 		/* Next setup the HMC RX Context  */
1291 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1292 
1293 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1294 
1295 		/* Set up an RX context for the HMC */
1296 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1297 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1298 		/* ignore header split for now */
1299 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1300 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1301 		    scctx->isc_max_frame_size : max_rxmax;
1302 		rctx.dtype = 0;
1303 		rctx.dsize = 1;		/* do 32byte descriptors */
1304 		rctx.hsplit_0 = 0;	/* no header split */
1305 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1306 		rctx.qlen = scctx->isc_nrxd[0];
1307 		rctx.tphrdesc_ena = 1;
1308 		rctx.tphwdesc_ena = 1;
1309 		rctx.tphdata_ena = 0;	/* Header Split related */
1310 		rctx.tphhead_ena = 0;	/* Header Split related */
1311 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1312 		rctx.crcstrip = 1;
1313 		rctx.l2tsel = 1;
1314 		rctx.showiv = 1;	/* Strip inner VLAN header */
1315 		rctx.fc_ena = 0;	/* Disable FCoE */
1316 		rctx.prefena = 1;	/* Prefetch descriptors */
1317 
1318 		err = i40e_clear_lan_rx_queue_context(hw, i);
1319 		if (err) {
1320 			device_printf(dev,
1321 			    "Unable to clear RX context %d\n", i);
1322 			break;
1323 		}
1324 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1325 		if (err) {
1326 			device_printf(dev, "Unable to set RX context %d\n", i);
1327 			break;
1328 		}
1329 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1330 	}
1331 	return (err);
1332 }
1333 
1334 void
1335 ixl_free_mac_filters(struct ixl_vsi *vsi)
1336 {
1337 	struct ixl_mac_filter *f;
1338 
1339 	while (!SLIST_EMPTY(&vsi->ftl)) {
1340 		f = SLIST_FIRST(&vsi->ftl);
1341 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1342 		free(f, M_DEVBUF);
1343 	}
1344 }
1345 
1346 /*
1347 ** Provide a update to the queue RX
1348 ** interrupt moderation value.
1349 */
1350 void
1351 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1352 {
1353 	struct ixl_vsi	*vsi = que->vsi;
1354 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1355 	struct i40e_hw	*hw = vsi->hw;
1356 	struct rx_ring	*rxr = &que->rxr;
1357 	u16		rx_itr;
1358 	u16		rx_latency = 0;
1359 	int		rx_bytes;
1360 
1361 	/* Idle, do nothing */
1362 	if (rxr->bytes == 0)
1363 		return;
1364 
1365 	if (pf->dynamic_rx_itr) {
1366 		rx_bytes = rxr->bytes/rxr->itr;
1367 		rx_itr = rxr->itr;
1368 
1369 		/* Adjust latency range */
1370 		switch (rxr->latency) {
1371 		case IXL_LOW_LATENCY:
1372 			if (rx_bytes > 10) {
1373 				rx_latency = IXL_AVE_LATENCY;
1374 				rx_itr = IXL_ITR_20K;
1375 			}
1376 			break;
1377 		case IXL_AVE_LATENCY:
1378 			if (rx_bytes > 20) {
1379 				rx_latency = IXL_BULK_LATENCY;
1380 				rx_itr = IXL_ITR_8K;
1381 			} else if (rx_bytes <= 10) {
1382 				rx_latency = IXL_LOW_LATENCY;
1383 				rx_itr = IXL_ITR_100K;
1384 			}
1385 			break;
1386 		case IXL_BULK_LATENCY:
1387 			if (rx_bytes <= 20) {
1388 				rx_latency = IXL_AVE_LATENCY;
1389 				rx_itr = IXL_ITR_20K;
1390 			}
1391 			break;
1392        		 }
1393 
1394 		rxr->latency = rx_latency;
1395 
1396 		if (rx_itr != rxr->itr) {
1397 			/* do an exponential smoothing */
1398 			rx_itr = (10 * rx_itr * rxr->itr) /
1399 			    ((9 * rx_itr) + rxr->itr);
1400 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1401 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1402 			    rxr->me), rxr->itr);
1403 		}
1404 	} else { /* We may have have toggled to non-dynamic */
1405 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1406 			vsi->rx_itr_setting = pf->rx_itr;
1407 		/* Update the hardware if needed */
1408 		if (rxr->itr != vsi->rx_itr_setting) {
1409 			rxr->itr = vsi->rx_itr_setting;
1410 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1411 			    rxr->me), rxr->itr);
1412 		}
1413 	}
1414 	rxr->bytes = 0;
1415 	rxr->packets = 0;
1416 }
1417 
1418 
1419 /*
1420 ** Provide a update to the queue TX
1421 ** interrupt moderation value.
1422 */
1423 void
1424 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1425 {
1426 	struct ixl_vsi	*vsi = que->vsi;
1427 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1428 	struct i40e_hw	*hw = vsi->hw;
1429 	struct tx_ring	*txr = &que->txr;
1430 	u16		tx_itr;
1431 	u16		tx_latency = 0;
1432 	int		tx_bytes;
1433 
1434 
1435 	/* Idle, do nothing */
1436 	if (txr->bytes == 0)
1437 		return;
1438 
1439 	if (pf->dynamic_tx_itr) {
1440 		tx_bytes = txr->bytes/txr->itr;
1441 		tx_itr = txr->itr;
1442 
1443 		switch (txr->latency) {
1444 		case IXL_LOW_LATENCY:
1445 			if (tx_bytes > 10) {
1446 				tx_latency = IXL_AVE_LATENCY;
1447 				tx_itr = IXL_ITR_20K;
1448 			}
1449 			break;
1450 		case IXL_AVE_LATENCY:
1451 			if (tx_bytes > 20) {
1452 				tx_latency = IXL_BULK_LATENCY;
1453 				tx_itr = IXL_ITR_8K;
1454 			} else if (tx_bytes <= 10) {
1455 				tx_latency = IXL_LOW_LATENCY;
1456 				tx_itr = IXL_ITR_100K;
1457 			}
1458 			break;
1459 		case IXL_BULK_LATENCY:
1460 			if (tx_bytes <= 20) {
1461 				tx_latency = IXL_AVE_LATENCY;
1462 				tx_itr = IXL_ITR_20K;
1463 			}
1464 			break;
1465 		}
1466 
1467 		txr->latency = tx_latency;
1468 
1469 		if (tx_itr != txr->itr) {
1470        	         /* do an exponential smoothing */
1471 			tx_itr = (10 * tx_itr * txr->itr) /
1472 			    ((9 * tx_itr) + txr->itr);
1473 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1474 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1475 			    txr->me), txr->itr);
1476 		}
1477 
1478 	} else { /* We may have have toggled to non-dynamic */
1479 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1480 			vsi->tx_itr_setting = pf->tx_itr;
1481 		/* Update the hardware if needed */
1482 		if (txr->itr != vsi->tx_itr_setting) {
1483 			txr->itr = vsi->tx_itr_setting;
1484 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1485 			    txr->me), txr->itr);
1486 		}
1487 	}
1488 	txr->bytes = 0;
1489 	txr->packets = 0;
1490 	return;
1491 }
1492 
1493 #ifdef IXL_DEBUG
1494 /**
1495  * ixl_sysctl_qtx_tail_handler
1496  * Retrieves I40E_QTX_TAIL value from hardware
1497  * for a sysctl.
1498  */
1499 int
1500 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1501 {
1502 	struct ixl_tx_queue *tx_que;
1503 	int error;
1504 	u32 val;
1505 
1506 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1507 	if (!tx_que) return 0;
1508 
1509 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1510 	error = sysctl_handle_int(oidp, &val, 0, req);
1511 	if (error || !req->newptr)
1512 		return error;
1513 	return (0);
1514 }
1515 
1516 /**
1517  * ixl_sysctl_qrx_tail_handler
1518  * Retrieves I40E_QRX_TAIL value from hardware
1519  * for a sysctl.
1520  */
1521 int
1522 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1523 {
1524 	struct ixl_rx_queue *rx_que;
1525 	int error;
1526 	u32 val;
1527 
1528 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1529 	if (!rx_que) return 0;
1530 
1531 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1532 	error = sysctl_handle_int(oidp, &val, 0, req);
1533 	if (error || !req->newptr)
1534 		return error;
1535 	return (0);
1536 }
1537 #endif
1538 
1539 /*
1540  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1541  * Writes to the ITR registers immediately.
1542  */
1543 static int
1544 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1545 {
1546 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1547 	device_t dev = pf->dev;
1548 	int error = 0;
1549 	int requested_tx_itr;
1550 
1551 	requested_tx_itr = pf->tx_itr;
1552 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1553 	if ((error) || (req->newptr == NULL))
1554 		return (error);
1555 	if (pf->dynamic_tx_itr) {
1556 		device_printf(dev,
1557 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1558 		    return (EINVAL);
1559 	}
1560 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1561 		device_printf(dev,
1562 		    "Invalid TX itr value; value must be between 0 and %d\n",
1563 		        IXL_MAX_ITR);
1564 		return (EINVAL);
1565 	}
1566 
1567 	pf->tx_itr = requested_tx_itr;
1568 	ixl_configure_tx_itr(pf);
1569 
1570 	return (error);
1571 }
1572 
1573 /*
1574  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1575  * Writes to the ITR registers immediately.
1576  */
1577 static int
1578 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1579 {
1580 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1581 	device_t dev = pf->dev;
1582 	int error = 0;
1583 	int requested_rx_itr;
1584 
1585 	requested_rx_itr = pf->rx_itr;
1586 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1587 	if ((error) || (req->newptr == NULL))
1588 		return (error);
1589 	if (pf->dynamic_rx_itr) {
1590 		device_printf(dev,
1591 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1592 		    return (EINVAL);
1593 	}
1594 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1595 		device_printf(dev,
1596 		    "Invalid RX itr value; value must be between 0 and %d\n",
1597 		        IXL_MAX_ITR);
1598 		return (EINVAL);
1599 	}
1600 
1601 	pf->rx_itr = requested_rx_itr;
1602 	ixl_configure_rx_itr(pf);
1603 
1604 	return (error);
1605 }
1606 
1607 void
1608 ixl_add_hw_stats(struct ixl_pf *pf)
1609 {
1610 	struct ixl_vsi *vsi = &pf->vsi;
1611 	device_t dev = iflib_get_dev(vsi->ctx);
1612 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1613 
1614 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1615 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1616 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1617 
1618 	/* Driver statistics */
1619 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1620 			CTLFLAG_RD, &pf->admin_irq,
1621 			"Admin Queue IRQs received");
1622 
1623 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1624 
1625 	ixl_add_queues_sysctls(dev, vsi);
1626 
1627 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1628 }
1629 
1630 void
1631 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1632 	struct sysctl_oid_list *child,
1633 	struct i40e_hw_port_stats *stats)
1634 {
1635 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1636 				    CTLFLAG_RD, NULL, "Mac Statistics");
1637 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1638 
1639 	struct i40e_eth_stats *eth_stats = &stats->eth;
1640 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1641 
1642 	struct ixl_sysctl_info ctls[] =
1643 	{
1644 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1645 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1646 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1647 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1648 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1649 		/* Packet Reception Stats */
1650 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1651 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1652 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1653 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1654 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1655 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1656 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1657 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1658 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1659 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1660 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1661 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1662 		/* Packet Transmission Stats */
1663 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1664 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1665 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1666 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1667 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1668 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1669 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1670 		/* Flow control */
1671 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1672 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1673 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1674 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1675 		/* End */
1676 		{0,0,0}
1677 	};
1678 
1679 	struct ixl_sysctl_info *entry = ctls;
1680 	while (entry->stat != 0)
1681 	{
1682 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1683 				CTLFLAG_RD, entry->stat,
1684 				entry->description);
1685 		entry++;
1686 	}
1687 }
1688 
1689 void
1690 ixl_set_rss_key(struct ixl_pf *pf)
1691 {
1692 	struct i40e_hw *hw = &pf->hw;
1693 	struct ixl_vsi *vsi = &pf->vsi;
1694 	device_t	dev = pf->dev;
1695 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1696 	enum i40e_status_code status;
1697 
1698 #ifdef RSS
1699         /* Fetch the configured RSS key */
1700         rss_getkey((uint8_t *) &rss_seed);
1701 #else
1702 	ixl_get_default_rss_key(rss_seed);
1703 #endif
1704 	/* Fill out hash function seed */
1705 	if (hw->mac.type == I40E_MAC_X722) {
1706 		struct i40e_aqc_get_set_rss_key_data key_data;
1707 		bcopy(rss_seed, &key_data, 52);
1708 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1709 		if (status)
1710 			device_printf(dev,
1711 			    "i40e_aq_set_rss_key status %s, error %s\n",
1712 			    i40e_stat_str(hw, status),
1713 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1714 	} else {
1715 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1716 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1717 	}
1718 }
1719 
1720 /*
1721  * Configure enabled PCTYPES for RSS.
1722  */
1723 void
1724 ixl_set_rss_pctypes(struct ixl_pf *pf)
1725 {
1726 	struct i40e_hw *hw = &pf->hw;
1727 	u64		set_hena = 0, hena;
1728 
1729 #ifdef RSS
1730 	u32		rss_hash_config;
1731 
1732 	rss_hash_config = rss_gethashconfig();
1733 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1734                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1735 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1736                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1737 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1738                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1739 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1740                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1741 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1742 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1743 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1744                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1745         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1746                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1747 #else
1748 	if (hw->mac.type == I40E_MAC_X722)
1749 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1750 	else
1751 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1752 #endif
1753 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1754 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1755 	hena |= set_hena;
1756 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1757 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1758 
1759 }
1760 
1761 void
1762 ixl_set_rss_hlut(struct ixl_pf *pf)
1763 {
1764 	struct i40e_hw	*hw = &pf->hw;
1765 	struct ixl_vsi *vsi = &pf->vsi;
1766 	device_t	dev = iflib_get_dev(vsi->ctx);
1767 	int		i, que_id;
1768 	int		lut_entry_width;
1769 	u32		lut = 0;
1770 	enum i40e_status_code status;
1771 
1772 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1773 
1774 	/* Populate the LUT with max no. of queues in round robin fashion */
1775 	u8 hlut_buf[512];
1776 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1777 #ifdef RSS
1778 		/*
1779 		 * Fetch the RSS bucket id for the given indirection entry.
1780 		 * Cap it at the number of configured buckets (which is
1781 		 * num_queues.)
1782 		 */
1783 		que_id = rss_get_indirection_to_bucket(i);
1784 		que_id = que_id % vsi->num_rx_queues;
1785 #else
1786 		que_id = i % vsi->num_rx_queues;
1787 #endif
1788 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1789 		hlut_buf[i] = lut;
1790 	}
1791 
1792 	if (hw->mac.type == I40E_MAC_X722) {
1793 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1794 		if (status)
1795 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1796 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1797 	} else {
1798 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1799 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1800 		ixl_flush(hw);
1801 	}
1802 }
1803 
1804 /*
1805 ** Setup the PF's RSS parameters.
1806 */
1807 void
1808 ixl_config_rss(struct ixl_pf *pf)
1809 {
1810 	ixl_set_rss_key(pf);
1811 	ixl_set_rss_pctypes(pf);
1812 	ixl_set_rss_hlut(pf);
1813 }
1814 
1815 /*
1816 ** This routine updates vlan filters, called by init
1817 ** it scans the filter table and then updates the hw
1818 ** after a soft reset.
1819 */
1820 void
1821 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1822 {
1823 	struct ixl_mac_filter	*f;
1824 	int			cnt = 0, flags;
1825 
1826 	if (vsi->num_vlans == 0)
1827 		return;
1828 	/*
1829 	** Scan the filter list for vlan entries,
1830 	** mark them for addition and then call
1831 	** for the AQ update.
1832 	*/
1833 	SLIST_FOREACH(f, &vsi->ftl, next) {
1834 		if (f->flags & IXL_FILTER_VLAN) {
1835 			f->flags |=
1836 			    (IXL_FILTER_ADD |
1837 			    IXL_FILTER_USED);
1838 			cnt++;
1839 		}
1840 	}
1841 	if (cnt == 0) {
1842 		printf("setup vlan: no filters found!\n");
1843 		return;
1844 	}
1845 	flags = IXL_FILTER_VLAN;
1846 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1847 	ixl_add_hw_filters(vsi, flags, cnt);
1848 }
1849 
1850 /*
1851  * In some firmware versions there is default MAC/VLAN filter
1852  * configured which interferes with filters managed by driver.
1853  * Make sure it's removed.
1854  */
1855 void
1856 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1857 {
1858 	struct i40e_aqc_remove_macvlan_element_data e;
1859 
1860 	bzero(&e, sizeof(e));
1861 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1862 	e.vlan_tag = 0;
1863 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1864 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1865 
1866 	bzero(&e, sizeof(e));
1867 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1868 	e.vlan_tag = 0;
1869 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1870 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1871 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1872 }
1873 
1874 /*
1875 ** Initialize filter list and add filters that the hardware
1876 ** needs to know about.
1877 **
1878 ** Requires VSI's filter list & seid to be set before calling.
1879 */
1880 void
1881 ixl_init_filters(struct ixl_vsi *vsi)
1882 {
1883 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1884 
1885 	/* Initialize mac filter list for VSI */
1886 	SLIST_INIT(&vsi->ftl);
1887 
1888 	/* Receive broadcast Ethernet frames */
1889 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1890 
1891 	ixl_del_default_hw_filters(vsi);
1892 
1893 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1894 	/*
1895 	 * Prevent Tx flow control frames from being sent out by
1896 	 * non-firmware transmitters.
1897 	 * This affects every VSI in the PF.
1898 	 */
1899 	if (pf->enable_tx_fc_filter)
1900 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1901 }
1902 
1903 /*
1904 ** This routine adds mulicast filters
1905 */
1906 void
1907 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1908 {
1909 	struct ixl_mac_filter *f;
1910 
1911 	/* Does one already exist */
1912 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1913 	if (f != NULL)
1914 		return;
1915 
1916 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1917 	if (f != NULL)
1918 		f->flags |= IXL_FILTER_MC;
1919 	else
1920 		printf("WARNING: no filter available!!\n");
1921 }
1922 
1923 void
1924 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1925 {
1926 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1927 }
1928 
1929 /*
1930  * This routine adds a MAC/VLAN filter to the software filter
1931  * list, then adds that new filter to the HW if it doesn't already
1932  * exist in the SW filter list.
1933  */
1934 void
1935 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1936 {
1937 	struct ixl_mac_filter	*f, *tmp;
1938 	struct ixl_pf		*pf;
1939 	device_t		dev;
1940 
1941 	DEBUGOUT("ixl_add_filter: begin");
1942 
1943 	pf = vsi->back;
1944 	dev = pf->dev;
1945 
1946 	/* Does one already exist */
1947 	f = ixl_find_filter(vsi, macaddr, vlan);
1948 	if (f != NULL)
1949 		return;
1950 	/*
1951 	** Is this the first vlan being registered, if so we
1952 	** need to remove the ANY filter that indicates we are
1953 	** not in a vlan, and replace that with a 0 filter.
1954 	*/
1955 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1956 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1957 		if (tmp != NULL) {
1958 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 			ixl_add_filter(vsi, macaddr, 0);
1960 		}
1961 	}
1962 
1963 	f = ixl_new_filter(vsi, macaddr, vlan);
1964 	if (f == NULL) {
1965 		device_printf(dev, "WARNING: no filter available!!\n");
1966 		return;
1967 	}
1968 	if (f->vlan != IXL_VLAN_ANY)
1969 		f->flags |= IXL_FILTER_VLAN;
1970 	else
1971 		vsi->num_macs++;
1972 
1973 	f->flags |= IXL_FILTER_USED;
1974 	ixl_add_hw_filters(vsi, f->flags, 1);
1975 }
1976 
1977 void
1978 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1979 {
1980 	struct ixl_mac_filter *f;
1981 
1982 	f = ixl_find_filter(vsi, macaddr, vlan);
1983 	if (f == NULL)
1984 		return;
1985 
1986 	f->flags |= IXL_FILTER_DEL;
1987 	ixl_del_hw_filters(vsi, 1);
1988 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1989 		vsi->num_macs--;
1990 
1991 	/* Check if this is the last vlan removal */
1992 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1993 		/* Switch back to a non-vlan filter */
1994 		ixl_del_filter(vsi, macaddr, 0);
1995 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1996 	}
1997 	return;
1998 }
1999 
2000 /*
2001 ** Find the filter with both matching mac addr and vlan id
2002 */
2003 struct ixl_mac_filter *
2004 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2005 {
2006 	struct ixl_mac_filter	*f;
2007 
2008 	SLIST_FOREACH(f, &vsi->ftl, next) {
2009 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2010 		    && (f->vlan == vlan)) {
2011 			return (f);
2012 		}
2013 	}
2014 
2015 	return (NULL);
2016 }
2017 
2018 /*
2019 ** This routine takes additions to the vsi filter
2020 ** table and creates an Admin Queue call to create
2021 ** the filters in the hardware.
2022 */
2023 void
2024 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2025 {
2026 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2027 	struct ixl_mac_filter	*f;
2028 	struct ixl_pf		*pf;
2029 	struct i40e_hw		*hw;
2030 	device_t		dev;
2031 	enum i40e_status_code	status;
2032 	int			j = 0;
2033 
2034 	pf = vsi->back;
2035 	dev = vsi->dev;
2036 	hw = &pf->hw;
2037 
2038 	if (cnt < 1) {
2039 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2040 		return;
2041 	}
2042 
2043 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2044 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2045 	if (a == NULL) {
2046 		device_printf(dev, "add_hw_filters failed to get memory\n");
2047 		return;
2048 	}
2049 
2050 	/*
2051 	** Scan the filter list, each time we find one
2052 	** we add it to the admin queue array and turn off
2053 	** the add bit.
2054 	*/
2055 	SLIST_FOREACH(f, &vsi->ftl, next) {
2056 		if ((f->flags & flags) == flags) {
2057 			b = &a[j]; // a pox on fvl long names :)
2058 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2059 			if (f->vlan == IXL_VLAN_ANY) {
2060 				b->vlan_tag = 0;
2061 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2062 			} else {
2063 				b->vlan_tag = f->vlan;
2064 				b->flags = 0;
2065 			}
2066 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2067 			f->flags &= ~IXL_FILTER_ADD;
2068 			j++;
2069 
2070 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2071 			    MAC_FORMAT_ARGS(f->macaddr));
2072 		}
2073 		if (j == cnt)
2074 			break;
2075 	}
2076 	if (j > 0) {
2077 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2078 		if (status)
2079 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2080 			    "error %s\n", i40e_stat_str(hw, status),
2081 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2082 		else
2083 			vsi->hw_filters_add += j;
2084 	}
2085 	free(a, M_DEVBUF);
2086 	return;
2087 }
2088 
2089 /*
2090 ** This routine takes removals in the vsi filter
2091 ** table and creates an Admin Queue call to delete
2092 ** the filters in the hardware.
2093 */
2094 void
2095 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2096 {
2097 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2098 	struct ixl_pf		*pf;
2099 	struct i40e_hw		*hw;
2100 	device_t		dev;
2101 	struct ixl_mac_filter	*f, *f_temp;
2102 	enum i40e_status_code	status;
2103 	int			j = 0;
2104 
2105 	pf = vsi->back;
2106 	hw = &pf->hw;
2107 	dev = vsi->dev;
2108 
2109 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2110 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2111 	if (d == NULL) {
2112 		device_printf(dev, "%s: failed to get memory\n", __func__);
2113 		return;
2114 	}
2115 
2116 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2117 		if (f->flags & IXL_FILTER_DEL) {
2118 			e = &d[j]; // a pox on fvl long names :)
2119 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2120 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2121 			if (f->vlan == IXL_VLAN_ANY) {
2122 				e->vlan_tag = 0;
2123 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2124 			} else {
2125 				e->vlan_tag = f->vlan;
2126 			}
2127 
2128 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2129 			    MAC_FORMAT_ARGS(f->macaddr));
2130 
2131 			/* delete entry from vsi list */
2132 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2133 			free(f, M_DEVBUF);
2134 			j++;
2135 		}
2136 		if (j == cnt)
2137 			break;
2138 	}
2139 	if (j > 0) {
2140 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2141 		if (status) {
2142 			int sc = 0;
2143 			for (int i = 0; i < j; i++)
2144 				sc += (!d[i].error_code);
2145 			vsi->hw_filters_del += sc;
2146 			device_printf(dev,
2147 			    "Failed to remove %d/%d filters, error %s\n",
2148 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2149 		} else
2150 			vsi->hw_filters_del += j;
2151 	}
2152 	free(d, M_DEVBUF);
2153 	return;
2154 }
2155 
2156 int
2157 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2158 {
2159 	struct i40e_hw	*hw = &pf->hw;
2160 	int		error = 0;
2161 	u32		reg;
2162 	u16		pf_qidx;
2163 
2164 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2165 
2166 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2167 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2168 	    pf_qidx, vsi_qidx);
2169 
2170 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2171 
2172 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2173 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2174 	    I40E_QTX_ENA_QENA_STAT_MASK;
2175 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2176 	/* Verify the enable took */
2177 	for (int j = 0; j < 10; j++) {
2178 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2179 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2180 			break;
2181 		i40e_usec_delay(10);
2182 	}
2183 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2184 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2185 		    pf_qidx);
2186 		error = ETIMEDOUT;
2187 	}
2188 
2189 	return (error);
2190 }
2191 
2192 int
2193 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2194 {
2195 	struct i40e_hw	*hw = &pf->hw;
2196 	int		error = 0;
2197 	u32		reg;
2198 	u16		pf_qidx;
2199 
2200 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2201 
2202 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2203 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2204 	    pf_qidx, vsi_qidx);
2205 
2206 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2207 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2208 	    I40E_QRX_ENA_QENA_STAT_MASK;
2209 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2210 	/* Verify the enable took */
2211 	for (int j = 0; j < 10; j++) {
2212 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2213 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2214 			break;
2215 		i40e_usec_delay(10);
2216 	}
2217 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2218 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2219 		    pf_qidx);
2220 		error = ETIMEDOUT;
2221 	}
2222 
2223 	return (error);
2224 }
2225 
2226 int
2227 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2228 {
2229 	int error = 0;
2230 
2231 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2232 	/* Called function already prints error message */
2233 	if (error)
2234 		return (error);
2235 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2236 	return (error);
2237 }
2238 
2239 /* For PF VSI only */
2240 int
2241 ixl_enable_rings(struct ixl_vsi *vsi)
2242 {
2243 	struct ixl_pf	*pf = vsi->back;
2244 	int		error = 0;
2245 
2246 	for (int i = 0; i < vsi->num_tx_queues; i++)
2247 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2248 
2249 	for (int i = 0; i < vsi->num_rx_queues; i++)
2250 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2251 
2252 	return (error);
2253 }
2254 
2255 /*
2256  * Returns error on first ring that is detected hung.
2257  */
2258 int
2259 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2260 {
2261 	struct i40e_hw	*hw = &pf->hw;
2262 	int		error = 0;
2263 	u32		reg;
2264 	u16		pf_qidx;
2265 
2266 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2267 
2268 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2269 	i40e_usec_delay(500);
2270 
2271 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2272 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2273 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2274 	/* Verify the disable took */
2275 	for (int j = 0; j < 10; j++) {
2276 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2277 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2278 			break;
2279 		i40e_msec_delay(10);
2280 	}
2281 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2282 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2283 		    pf_qidx);
2284 		error = ETIMEDOUT;
2285 	}
2286 
2287 	return (error);
2288 }
2289 
2290 /*
2291  * Returns error on first ring that is detected hung.
2292  */
2293 int
2294 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2295 {
2296 	struct i40e_hw	*hw = &pf->hw;
2297 	int		error = 0;
2298 	u32		reg;
2299 	u16		pf_qidx;
2300 
2301 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2302 
2303 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2304 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2305 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2306 	/* Verify the disable took */
2307 	for (int j = 0; j < 10; j++) {
2308 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2309 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2310 			break;
2311 		i40e_msec_delay(10);
2312 	}
2313 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2314 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2315 		    pf_qidx);
2316 		error = ETIMEDOUT;
2317 	}
2318 
2319 	return (error);
2320 }
2321 
2322 int
2323 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2324 {
2325 	int error = 0;
2326 
2327 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2328 	/* Called function already prints error message */
2329 	if (error)
2330 		return (error);
2331 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2332 	return (error);
2333 }
2334 
2335 int
2336 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2337 {
2338 	int error = 0;
2339 
2340 	for (int i = 0; i < vsi->num_tx_queues; i++)
2341 		error = ixl_disable_tx_ring(pf, qtag, i);
2342 
2343 	for (int i = 0; i < vsi->num_rx_queues; i++)
2344 		error = ixl_disable_rx_ring(pf, qtag, i);
2345 
2346 	return (error);
2347 }
2348 
2349 static void
2350 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2351 {
2352 	struct i40e_hw *hw = &pf->hw;
2353 	device_t dev = pf->dev;
2354 	struct ixl_vf *vf;
2355 	bool mdd_detected = false;
2356 	bool pf_mdd_detected = false;
2357 	bool vf_mdd_detected = false;
2358 	u16 vf_num, queue;
2359 	u8 pf_num, event;
2360 	u8 pf_mdet_num, vp_mdet_num;
2361 	u32 reg;
2362 
2363 	/* find what triggered the MDD event */
2364 	reg = rd32(hw, I40E_GL_MDET_TX);
2365 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2366 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2367 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2368 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2369 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2370 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2371 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2372 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2373 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2374 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2375 		mdd_detected = true;
2376 	}
2377 
2378 	if (!mdd_detected)
2379 		return;
2380 
2381 	reg = rd32(hw, I40E_PF_MDET_TX);
2382 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2383 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2384 		pf_mdet_num = hw->pf_id;
2385 		pf_mdd_detected = true;
2386 	}
2387 
2388 	/* Check if MDD was caused by a VF */
2389 	for (int i = 0; i < pf->num_vfs; i++) {
2390 		vf = &(pf->vfs[i]);
2391 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2392 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2393 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2394 			vp_mdet_num = i;
2395 			vf->num_mdd_events++;
2396 			vf_mdd_detected = true;
2397 		}
2398 	}
2399 
2400 	/* Print out an error message */
2401 	if (vf_mdd_detected && pf_mdd_detected)
2402 		device_printf(dev,
2403 		    "Malicious Driver Detection event %d"
2404 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2405 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2406 	else if (vf_mdd_detected && !pf_mdd_detected)
2407 		device_printf(dev,
2408 		    "Malicious Driver Detection event %d"
2409 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2410 		    event, queue, pf_num, vf_num, vp_mdet_num);
2411 	else if (!vf_mdd_detected && pf_mdd_detected)
2412 		device_printf(dev,
2413 		    "Malicious Driver Detection event %d"
2414 		    " on TX queue %d, pf number %d (PF-%d)\n",
2415 		    event, queue, pf_num, pf_mdet_num);
2416 	/* Theoretically shouldn't happen */
2417 	else
2418 		device_printf(dev,
2419 		    "TX Malicious Driver Detection event (unknown)\n");
2420 }
2421 
2422 static void
2423 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2424 {
2425 	struct i40e_hw *hw = &pf->hw;
2426 	device_t dev = pf->dev;
2427 	struct ixl_vf *vf;
2428 	bool mdd_detected = false;
2429 	bool pf_mdd_detected = false;
2430 	bool vf_mdd_detected = false;
2431 	u16 queue;
2432 	u8 pf_num, event;
2433 	u8 pf_mdet_num, vp_mdet_num;
2434 	u32 reg;
2435 
2436 	/*
2437 	 * GL_MDET_RX doesn't contain VF number information, unlike
2438 	 * GL_MDET_TX.
2439 	 */
2440 	reg = rd32(hw, I40E_GL_MDET_RX);
2441 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2442 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2443 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2444 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2445 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2446 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2447 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2448 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2449 		mdd_detected = true;
2450 	}
2451 
2452 	if (!mdd_detected)
2453 		return;
2454 
2455 	reg = rd32(hw, I40E_PF_MDET_RX);
2456 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2457 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2458 		pf_mdet_num = hw->pf_id;
2459 		pf_mdd_detected = true;
2460 	}
2461 
2462 	/* Check if MDD was caused by a VF */
2463 	for (int i = 0; i < pf->num_vfs; i++) {
2464 		vf = &(pf->vfs[i]);
2465 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2466 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2467 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2468 			vp_mdet_num = i;
2469 			vf->num_mdd_events++;
2470 			vf_mdd_detected = true;
2471 		}
2472 	}
2473 
2474 	/* Print out an error message */
2475 	if (vf_mdd_detected && pf_mdd_detected)
2476 		device_printf(dev,
2477 		    "Malicious Driver Detection event %d"
2478 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2479 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2480 	else if (vf_mdd_detected && !pf_mdd_detected)
2481 		device_printf(dev,
2482 		    "Malicious Driver Detection event %d"
2483 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2484 		    event, queue, pf_num, vp_mdet_num);
2485 	else if (!vf_mdd_detected && pf_mdd_detected)
2486 		device_printf(dev,
2487 		    "Malicious Driver Detection event %d"
2488 		    " on RX queue %d, pf number %d (PF-%d)\n",
2489 		    event, queue, pf_num, pf_mdet_num);
2490 	/* Theoretically shouldn't happen */
2491 	else
2492 		device_printf(dev,
2493 		    "RX Malicious Driver Detection event (unknown)\n");
2494 }
2495 
2496 /**
2497  * ixl_handle_mdd_event
2498  *
2499  * Called from interrupt handler to identify possibly malicious vfs
2500  * (But also detects events from the PF, as well)
2501  **/
2502 void
2503 ixl_handle_mdd_event(struct ixl_pf *pf)
2504 {
2505 	struct i40e_hw *hw = &pf->hw;
2506 	u32 reg;
2507 
2508 	/*
2509 	 * Handle both TX/RX because it's possible they could
2510 	 * both trigger in the same interrupt.
2511 	 */
2512 	ixl_handle_tx_mdd_event(pf);
2513 	ixl_handle_rx_mdd_event(pf);
2514 
2515 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2516 
2517 	/* re-enable mdd interrupt cause */
2518 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2519 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2520 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2521 	ixl_flush(hw);
2522 }
2523 
2524 void
2525 ixl_enable_intr(struct ixl_vsi *vsi)
2526 {
2527 	struct i40e_hw		*hw = vsi->hw;
2528 	struct ixl_rx_queue	*que = vsi->rx_queues;
2529 
2530 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2531 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2532 			ixl_enable_queue(hw, que->rxr.me);
2533 	} else
2534 		ixl_enable_intr0(hw);
2535 }
2536 
2537 void
2538 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2539 {
2540 	struct i40e_hw		*hw = vsi->hw;
2541 	struct ixl_rx_queue	*que = vsi->rx_queues;
2542 
2543 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 		ixl_disable_queue(hw, que->rxr.me);
2545 }
2546 
2547 void
2548 ixl_enable_intr0(struct i40e_hw *hw)
2549 {
2550 	u32		reg;
2551 
2552 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2553 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2554 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2555 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2556 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2557 }
2558 
2559 void
2560 ixl_disable_intr0(struct i40e_hw *hw)
2561 {
2562 	u32		reg;
2563 
2564 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2565 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2566 	ixl_flush(hw);
2567 }
2568 
2569 void
2570 ixl_enable_queue(struct i40e_hw *hw, int id)
2571 {
2572 	u32		reg;
2573 
2574 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2575 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2576 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2577 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2578 }
2579 
2580 void
2581 ixl_disable_queue(struct i40e_hw *hw, int id)
2582 {
2583 	u32		reg;
2584 
2585 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2586 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2587 }
2588 
2589 void
2590 ixl_update_stats_counters(struct ixl_pf *pf)
2591 {
2592 	struct i40e_hw	*hw = &pf->hw;
2593 	struct ixl_vsi	*vsi = &pf->vsi;
2594 	struct ixl_vf	*vf;
2595 
2596 	struct i40e_hw_port_stats *nsd = &pf->stats;
2597 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2598 
2599 	/* Update hw stats */
2600 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2601 			   pf->stat_offsets_loaded,
2602 			   &osd->crc_errors, &nsd->crc_errors);
2603 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2604 			   pf->stat_offsets_loaded,
2605 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2606 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2607 			   I40E_GLPRT_GORCL(hw->port),
2608 			   pf->stat_offsets_loaded,
2609 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2610 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2611 			   I40E_GLPRT_GOTCL(hw->port),
2612 			   pf->stat_offsets_loaded,
2613 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2614 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2615 			   pf->stat_offsets_loaded,
2616 			   &osd->eth.rx_discards,
2617 			   &nsd->eth.rx_discards);
2618 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2619 			   I40E_GLPRT_UPRCL(hw->port),
2620 			   pf->stat_offsets_loaded,
2621 			   &osd->eth.rx_unicast,
2622 			   &nsd->eth.rx_unicast);
2623 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2624 			   I40E_GLPRT_UPTCL(hw->port),
2625 			   pf->stat_offsets_loaded,
2626 			   &osd->eth.tx_unicast,
2627 			   &nsd->eth.tx_unicast);
2628 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2629 			   I40E_GLPRT_MPRCL(hw->port),
2630 			   pf->stat_offsets_loaded,
2631 			   &osd->eth.rx_multicast,
2632 			   &nsd->eth.rx_multicast);
2633 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2634 			   I40E_GLPRT_MPTCL(hw->port),
2635 			   pf->stat_offsets_loaded,
2636 			   &osd->eth.tx_multicast,
2637 			   &nsd->eth.tx_multicast);
2638 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2639 			   I40E_GLPRT_BPRCL(hw->port),
2640 			   pf->stat_offsets_loaded,
2641 			   &osd->eth.rx_broadcast,
2642 			   &nsd->eth.rx_broadcast);
2643 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2644 			   I40E_GLPRT_BPTCL(hw->port),
2645 			   pf->stat_offsets_loaded,
2646 			   &osd->eth.tx_broadcast,
2647 			   &nsd->eth.tx_broadcast);
2648 
2649 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2650 			   pf->stat_offsets_loaded,
2651 			   &osd->tx_dropped_link_down,
2652 			   &nsd->tx_dropped_link_down);
2653 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2654 			   pf->stat_offsets_loaded,
2655 			   &osd->mac_local_faults,
2656 			   &nsd->mac_local_faults);
2657 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2658 			   pf->stat_offsets_loaded,
2659 			   &osd->mac_remote_faults,
2660 			   &nsd->mac_remote_faults);
2661 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2662 			   pf->stat_offsets_loaded,
2663 			   &osd->rx_length_errors,
2664 			   &nsd->rx_length_errors);
2665 
2666 	/* Flow control (LFC) stats */
2667 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2668 			   pf->stat_offsets_loaded,
2669 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2670 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2671 			   pf->stat_offsets_loaded,
2672 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2673 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2674 			   pf->stat_offsets_loaded,
2675 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2676 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2677 			   pf->stat_offsets_loaded,
2678 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2679 
2680 	/* Packet size stats rx */
2681 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2682 			   I40E_GLPRT_PRC64L(hw->port),
2683 			   pf->stat_offsets_loaded,
2684 			   &osd->rx_size_64, &nsd->rx_size_64);
2685 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2686 			   I40E_GLPRT_PRC127L(hw->port),
2687 			   pf->stat_offsets_loaded,
2688 			   &osd->rx_size_127, &nsd->rx_size_127);
2689 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2690 			   I40E_GLPRT_PRC255L(hw->port),
2691 			   pf->stat_offsets_loaded,
2692 			   &osd->rx_size_255, &nsd->rx_size_255);
2693 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2694 			   I40E_GLPRT_PRC511L(hw->port),
2695 			   pf->stat_offsets_loaded,
2696 			   &osd->rx_size_511, &nsd->rx_size_511);
2697 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2698 			   I40E_GLPRT_PRC1023L(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2701 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2702 			   I40E_GLPRT_PRC1522L(hw->port),
2703 			   pf->stat_offsets_loaded,
2704 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2705 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2706 			   I40E_GLPRT_PRC9522L(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->rx_size_big, &nsd->rx_size_big);
2709 
2710 	/* Packet size stats tx */
2711 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2712 			   I40E_GLPRT_PTC64L(hw->port),
2713 			   pf->stat_offsets_loaded,
2714 			   &osd->tx_size_64, &nsd->tx_size_64);
2715 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2716 			   I40E_GLPRT_PTC127L(hw->port),
2717 			   pf->stat_offsets_loaded,
2718 			   &osd->tx_size_127, &nsd->tx_size_127);
2719 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2720 			   I40E_GLPRT_PTC255L(hw->port),
2721 			   pf->stat_offsets_loaded,
2722 			   &osd->tx_size_255, &nsd->tx_size_255);
2723 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2724 			   I40E_GLPRT_PTC511L(hw->port),
2725 			   pf->stat_offsets_loaded,
2726 			   &osd->tx_size_511, &nsd->tx_size_511);
2727 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2728 			   I40E_GLPRT_PTC1023L(hw->port),
2729 			   pf->stat_offsets_loaded,
2730 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2731 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2732 			   I40E_GLPRT_PTC1522L(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2735 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2736 			   I40E_GLPRT_PTC9522L(hw->port),
2737 			   pf->stat_offsets_loaded,
2738 			   &osd->tx_size_big, &nsd->tx_size_big);
2739 
2740 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2741 			   pf->stat_offsets_loaded,
2742 			   &osd->rx_undersize, &nsd->rx_undersize);
2743 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2744 			   pf->stat_offsets_loaded,
2745 			   &osd->rx_fragments, &nsd->rx_fragments);
2746 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2747 			   pf->stat_offsets_loaded,
2748 			   &osd->rx_oversize, &nsd->rx_oversize);
2749 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2750 			   pf->stat_offsets_loaded,
2751 			   &osd->rx_jabber, &nsd->rx_jabber);
2752 	pf->stat_offsets_loaded = true;
2753 	/* End hw stats */
2754 
2755 	/* Update vsi stats */
2756 	ixl_update_vsi_stats(vsi);
2757 
2758 	for (int i = 0; i < pf->num_vfs; i++) {
2759 		vf = &pf->vfs[i];
2760 		if (vf->vf_flags & VF_FLAG_ENABLED)
2761 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2762 	}
2763 }
2764 
2765 int
2766 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2767 {
2768 	struct i40e_hw *hw = &pf->hw;
2769 	device_t dev = pf->dev;
2770 	int error = 0;
2771 
2772 	error = i40e_shutdown_lan_hmc(hw);
2773 	if (error)
2774 		device_printf(dev,
2775 		    "Shutdown LAN HMC failed with code %d\n", error);
2776 
2777 	ixl_disable_intr0(hw);
2778 
2779 	error = i40e_shutdown_adminq(hw);
2780 	if (error)
2781 		device_printf(dev,
2782 		    "Shutdown Admin queue failed with code %d\n", error);
2783 
2784 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2785 	return (error);
2786 }
2787 
2788 int
2789 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2790 {
2791 	struct i40e_hw *hw = &pf->hw;
2792 	struct ixl_vsi *vsi = &pf->vsi;
2793 	device_t dev = pf->dev;
2794 	int error = 0;
2795 
2796 	device_printf(dev, "Rebuilding driver state...\n");
2797 
2798 	error = i40e_pf_reset(hw);
2799 	if (error) {
2800 		device_printf(dev, "PF reset failure %s\n",
2801 		    i40e_stat_str(hw, error));
2802 		goto ixl_rebuild_hw_structs_after_reset_err;
2803 	}
2804 
2805 	/* Setup */
2806 	error = i40e_init_adminq(hw);
2807 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2808 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2809 		    error);
2810 		goto ixl_rebuild_hw_structs_after_reset_err;
2811 	}
2812 
2813 	i40e_clear_pxe_mode(hw);
2814 
2815 	error = ixl_get_hw_capabilities(pf);
2816 	if (error) {
2817 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2818 		goto ixl_rebuild_hw_structs_after_reset_err;
2819 	}
2820 
2821 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2822 	    hw->func_caps.num_rx_qp, 0, 0);
2823 	if (error) {
2824 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2825 		goto ixl_rebuild_hw_structs_after_reset_err;
2826 	}
2827 
2828 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2829 	if (error) {
2830 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2831 		goto ixl_rebuild_hw_structs_after_reset_err;
2832 	}
2833 
2834 	/* reserve a contiguous allocation for the PF's VSI */
2835 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2836 	if (error) {
2837 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2838 		    error);
2839 		/* TODO: error handling */
2840 	}
2841 
2842 	error = ixl_switch_config(pf);
2843 	if (error) {
2844 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2845 		     error);
2846 		error = EIO;
2847 		goto ixl_rebuild_hw_structs_after_reset_err;
2848 	}
2849 
2850 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2851 	    NULL);
2852         if (error) {
2853 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2854 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2855 		error = EIO;
2856 		goto ixl_rebuild_hw_structs_after_reset_err;
2857 	}
2858 
2859 	u8 set_fc_err_mask;
2860 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2861 	if (error) {
2862 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2863 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2864 		error = EIO;
2865 		goto ixl_rebuild_hw_structs_after_reset_err;
2866 	}
2867 
2868 	/* Remove default filters reinstalled by FW on reset */
2869 	ixl_del_default_hw_filters(vsi);
2870 
2871 	/* Determine link state */
2872 	if (ixl_attach_get_link_status(pf)) {
2873 		error = EINVAL;
2874 		/* TODO: error handling */
2875 	}
2876 
2877 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2878 	ixl_get_fw_lldp_status(pf);
2879 
2880 	/* Keep admin queue interrupts active while driver is loaded */
2881 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2882  		ixl_configure_intr0_msix(pf);
2883  		ixl_enable_intr0(hw);
2884 	}
2885 
2886 	device_printf(dev, "Rebuilding driver state done.\n");
2887 	return (0);
2888 
2889 ixl_rebuild_hw_structs_after_reset_err:
2890 	device_printf(dev, "Reload the driver to recover\n");
2891 	return (error);
2892 }
2893 
2894 void
2895 ixl_handle_empr_reset(struct ixl_pf *pf)
2896 {
2897 	struct ixl_vsi	*vsi = &pf->vsi;
2898 	struct i40e_hw	*hw = &pf->hw;
2899 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2900 	int count = 0;
2901 	u32 reg;
2902 
2903 	ixl_prepare_for_reset(pf, is_up);
2904 
2905 	/* Typically finishes within 3-4 seconds */
2906 	while (count++ < 100) {
2907 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2908 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2909 		if (reg)
2910 			i40e_msec_delay(100);
2911 		else
2912 			break;
2913 	}
2914 	ixl_dbg(pf, IXL_DBG_INFO,
2915 			"Reset wait count: %d\n", count);
2916 
2917 	ixl_rebuild_hw_structs_after_reset(pf);
2918 
2919 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2920 }
2921 
2922 /**
2923  * Update VSI-specific ethernet statistics counters.
2924  **/
2925 void
2926 ixl_update_eth_stats(struct ixl_vsi *vsi)
2927 {
2928 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2929 	struct i40e_hw *hw = &pf->hw;
2930 	struct i40e_eth_stats *es;
2931 	struct i40e_eth_stats *oes;
2932 	struct i40e_hw_port_stats *nsd;
2933 	u16 stat_idx = vsi->info.stat_counter_idx;
2934 
2935 	es = &vsi->eth_stats;
2936 	oes = &vsi->eth_stats_offsets;
2937 	nsd = &pf->stats;
2938 
2939 	/* Gather up the stats that the hw collects */
2940 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2941 			   vsi->stat_offsets_loaded,
2942 			   &oes->tx_errors, &es->tx_errors);
2943 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2944 			   vsi->stat_offsets_loaded,
2945 			   &oes->rx_discards, &es->rx_discards);
2946 
2947 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2948 			   I40E_GLV_GORCL(stat_idx),
2949 			   vsi->stat_offsets_loaded,
2950 			   &oes->rx_bytes, &es->rx_bytes);
2951 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2952 			   I40E_GLV_UPRCL(stat_idx),
2953 			   vsi->stat_offsets_loaded,
2954 			   &oes->rx_unicast, &es->rx_unicast);
2955 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2956 			   I40E_GLV_MPRCL(stat_idx),
2957 			   vsi->stat_offsets_loaded,
2958 			   &oes->rx_multicast, &es->rx_multicast);
2959 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2960 			   I40E_GLV_BPRCL(stat_idx),
2961 			   vsi->stat_offsets_loaded,
2962 			   &oes->rx_broadcast, &es->rx_broadcast);
2963 
2964 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2965 			   I40E_GLV_GOTCL(stat_idx),
2966 			   vsi->stat_offsets_loaded,
2967 			   &oes->tx_bytes, &es->tx_bytes);
2968 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2969 			   I40E_GLV_UPTCL(stat_idx),
2970 			   vsi->stat_offsets_loaded,
2971 			   &oes->tx_unicast, &es->tx_unicast);
2972 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2973 			   I40E_GLV_MPTCL(stat_idx),
2974 			   vsi->stat_offsets_loaded,
2975 			   &oes->tx_multicast, &es->tx_multicast);
2976 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2977 			   I40E_GLV_BPTCL(stat_idx),
2978 			   vsi->stat_offsets_loaded,
2979 			   &oes->tx_broadcast, &es->tx_broadcast);
2980 	vsi->stat_offsets_loaded = true;
2981 }
2982 
2983 void
2984 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2985 {
2986 	struct ixl_pf		*pf;
2987 	struct ifnet		*ifp;
2988 	struct i40e_eth_stats	*es;
2989 	u64			tx_discards;
2990 
2991 	struct i40e_hw_port_stats *nsd;
2992 
2993 	pf = vsi->back;
2994 	ifp = vsi->ifp;
2995 	es = &vsi->eth_stats;
2996 	nsd = &pf->stats;
2997 
2998 	ixl_update_eth_stats(vsi);
2999 
3000 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3001 
3002 	/* Update ifnet stats */
3003 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3004 	                   es->rx_multicast +
3005 			   es->rx_broadcast);
3006 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3007 	                   es->tx_multicast +
3008 			   es->tx_broadcast);
3009 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3010 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3011 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3012 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3013 
3014 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3015 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3016 	    nsd->rx_jabber);
3017 	IXL_SET_OERRORS(vsi, es->tx_errors);
3018 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3019 	IXL_SET_OQDROPS(vsi, tx_discards);
3020 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3021 	IXL_SET_COLLISIONS(vsi, 0);
3022 }
3023 
3024 /**
3025  * Reset all of the stats for the given pf
3026  **/
3027 void
3028 ixl_pf_reset_stats(struct ixl_pf *pf)
3029 {
3030 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3031 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3032 	pf->stat_offsets_loaded = false;
3033 }
3034 
3035 /**
3036  * Resets all stats of the given vsi
3037  **/
3038 void
3039 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3040 {
3041 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3042 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3043 	vsi->stat_offsets_loaded = false;
3044 }
3045 
3046 /**
3047  * Read and update a 48 bit stat from the hw
3048  *
3049  * Since the device stats are not reset at PFReset, they likely will not
3050  * be zeroed when the driver starts.  We'll save the first values read
3051  * and use them as offsets to be subtracted from the raw values in order
3052  * to report stats that count from zero.
3053  **/
3054 void
3055 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3056 	bool offset_loaded, u64 *offset, u64 *stat)
3057 {
3058 	u64 new_data;
3059 
3060 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3061 	new_data = rd64(hw, loreg);
3062 #else
3063 	/*
3064 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3065 	 * 10 don't support 64-bit bus reads/writes.
3066 	 */
3067 	new_data = rd32(hw, loreg);
3068 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3069 #endif
3070 
3071 	if (!offset_loaded)
3072 		*offset = new_data;
3073 	if (new_data >= *offset)
3074 		*stat = new_data - *offset;
3075 	else
3076 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3077 	*stat &= 0xFFFFFFFFFFFFULL;
3078 }
3079 
3080 /**
3081  * Read and update a 32 bit stat from the hw
3082  **/
3083 void
3084 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3085 	bool offset_loaded, u64 *offset, u64 *stat)
3086 {
3087 	u32 new_data;
3088 
3089 	new_data = rd32(hw, reg);
3090 	if (!offset_loaded)
3091 		*offset = new_data;
3092 	if (new_data >= *offset)
3093 		*stat = (u32)(new_data - *offset);
3094 	else
3095 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3096 }
3097 
3098 void
3099 ixl_add_device_sysctls(struct ixl_pf *pf)
3100 {
3101 	device_t dev = pf->dev;
3102 	struct i40e_hw *hw = &pf->hw;
3103 
3104 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3105 	struct sysctl_oid_list *ctx_list =
3106 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3107 
3108 	struct sysctl_oid *debug_node;
3109 	struct sysctl_oid_list *debug_list;
3110 
3111 	struct sysctl_oid *fec_node;
3112 	struct sysctl_oid_list *fec_list;
3113 
3114 	/* Set up sysctls */
3115 	SYSCTL_ADD_PROC(ctx, ctx_list,
3116 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3117 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3118 
3119 	SYSCTL_ADD_PROC(ctx, ctx_list,
3120 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3121 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3122 
3123 	SYSCTL_ADD_PROC(ctx, ctx_list,
3124 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3125 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3126 
3127 	SYSCTL_ADD_PROC(ctx, ctx_list,
3128 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3129 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3130 
3131 	SYSCTL_ADD_PROC(ctx, ctx_list,
3132 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3133 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3134 
3135 	SYSCTL_ADD_PROC(ctx, ctx_list,
3136 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3137 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3138 	    "Queues not allocated to a PF or VF");
3139 
3140 	SYSCTL_ADD_PROC(ctx, ctx_list,
3141 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3142 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3143 	    "Immediately set TX ITR value for all queues");
3144 
3145 	SYSCTL_ADD_PROC(ctx, ctx_list,
3146 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3147 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3148 	    "Immediately set RX ITR value for all queues");
3149 
3150 	SYSCTL_ADD_INT(ctx, ctx_list,
3151 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3152 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3153 
3154 	SYSCTL_ADD_INT(ctx, ctx_list,
3155 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3156 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3157 
3158 	/* Add FEC sysctls for 25G adapters */
3159 	if (i40e_is_25G_device(hw->device_id)) {
3160 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3161 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3162 		fec_list = SYSCTL_CHILDREN(fec_node);
3163 
3164 		SYSCTL_ADD_PROC(ctx, fec_list,
3165 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3166 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3167 
3168 		SYSCTL_ADD_PROC(ctx, fec_list,
3169 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3170 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3171 
3172 		SYSCTL_ADD_PROC(ctx, fec_list,
3173 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3174 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3175 
3176 		SYSCTL_ADD_PROC(ctx, fec_list,
3177 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3178 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3179 
3180 		SYSCTL_ADD_PROC(ctx, fec_list,
3181 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3182 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3183 	}
3184 
3185 	SYSCTL_ADD_PROC(ctx, ctx_list,
3186 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3187 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3188 
3189 	/* Add sysctls meant to print debug information, but don't list them
3190 	 * in "sysctl -a" output. */
3191 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3192 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3193 	debug_list = SYSCTL_CHILDREN(debug_node);
3194 
3195 	SYSCTL_ADD_UINT(ctx, debug_list,
3196 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3197 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3198 
3199 	SYSCTL_ADD_UINT(ctx, debug_list,
3200 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3201 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3202 
3203 	SYSCTL_ADD_PROC(ctx, debug_list,
3204 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3205 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3206 
3207 	SYSCTL_ADD_PROC(ctx, debug_list,
3208 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3209 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3210 
3211 	SYSCTL_ADD_PROC(ctx, debug_list,
3212 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3213 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3214 
3215 	SYSCTL_ADD_PROC(ctx, debug_list,
3216 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3217 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3218 
3219 	SYSCTL_ADD_PROC(ctx, debug_list,
3220 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3221 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3222 
3223 	SYSCTL_ADD_PROC(ctx, debug_list,
3224 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3225 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3226 
3227 	SYSCTL_ADD_PROC(ctx, debug_list,
3228 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3229 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3230 
3231 	SYSCTL_ADD_PROC(ctx, debug_list,
3232 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3233 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3234 
3235 	SYSCTL_ADD_PROC(ctx, debug_list,
3236 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3237 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3238 
3239 	SYSCTL_ADD_PROC(ctx, debug_list,
3240 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3241 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3242 
3243 	SYSCTL_ADD_PROC(ctx, debug_list,
3244 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3245 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3246 
3247 	SYSCTL_ADD_PROC(ctx, debug_list,
3248 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3249 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3250 
3251 	SYSCTL_ADD_PROC(ctx, debug_list,
3252 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3253 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3254 
3255 	SYSCTL_ADD_PROC(ctx, debug_list,
3256 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3258 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3259 
3260 	SYSCTL_ADD_PROC(ctx, debug_list,
3261 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3262 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3263 
3264 	if (pf->has_i2c) {
3265 		SYSCTL_ADD_PROC(ctx, debug_list,
3266 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3267 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3268 
3269 		SYSCTL_ADD_PROC(ctx, debug_list,
3270 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3271 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3272 
3273 		SYSCTL_ADD_PROC(ctx, debug_list,
3274 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3275 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3276 	}
3277 }
3278 
3279 /*
3280  * Primarily for finding out how many queues can be assigned to VFs,
3281  * at runtime.
3282  */
3283 static int
3284 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3285 {
3286 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3287 	int queues;
3288 
3289 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3290 
3291 	return sysctl_handle_int(oidp, NULL, queues, req);
3292 }
3293 
3294 /*
3295 ** Set flow control using sysctl:
3296 ** 	0 - off
3297 **	1 - rx pause
3298 **	2 - tx pause
3299 **	3 - full
3300 */
3301 int
3302 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3303 {
3304 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3305 	struct i40e_hw *hw = &pf->hw;
3306 	device_t dev = pf->dev;
3307 	int requested_fc, error = 0;
3308 	enum i40e_status_code aq_error = 0;
3309 	u8 fc_aq_err = 0;
3310 
3311 	/* Get request */
3312 	requested_fc = pf->fc;
3313 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3314 	if ((error) || (req->newptr == NULL))
3315 		return (error);
3316 	if (requested_fc < 0 || requested_fc > 3) {
3317 		device_printf(dev,
3318 		    "Invalid fc mode; valid modes are 0 through 3\n");
3319 		return (EINVAL);
3320 	}
3321 
3322 	/* Set fc ability for port */
3323 	hw->fc.requested_mode = requested_fc;
3324 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3325 	if (aq_error) {
3326 		device_printf(dev,
3327 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3328 		    __func__, aq_error, fc_aq_err);
3329 		return (EIO);
3330 	}
3331 	pf->fc = requested_fc;
3332 
3333 	return (0);
3334 }
3335 
3336 char *
3337 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3338 {
3339 	int index;
3340 
3341 	char *speeds[] = {
3342 		"Unknown",
3343 		"100 Mbps",
3344 		"1 Gbps",
3345 		"10 Gbps",
3346 		"40 Gbps",
3347 		"20 Gbps",
3348 		"25 Gbps",
3349 	};
3350 
3351 	switch (link_speed) {
3352 	case I40E_LINK_SPEED_100MB:
3353 		index = 1;
3354 		break;
3355 	case I40E_LINK_SPEED_1GB:
3356 		index = 2;
3357 		break;
3358 	case I40E_LINK_SPEED_10GB:
3359 		index = 3;
3360 		break;
3361 	case I40E_LINK_SPEED_40GB:
3362 		index = 4;
3363 		break;
3364 	case I40E_LINK_SPEED_20GB:
3365 		index = 5;
3366 		break;
3367 	case I40E_LINK_SPEED_25GB:
3368 		index = 6;
3369 		break;
3370 	case I40E_LINK_SPEED_UNKNOWN:
3371 	default:
3372 		index = 0;
3373 		break;
3374 	}
3375 
3376 	return speeds[index];
3377 }
3378 
3379 int
3380 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3381 {
3382 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3383 	struct i40e_hw *hw = &pf->hw;
3384 	int error = 0;
3385 
3386 	ixl_update_link_status(pf);
3387 
3388 	error = sysctl_handle_string(oidp,
3389 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3390 	    8, req);
3391 	return (error);
3392 }
3393 
3394 /*
3395  * Converts 8-bit speeds value to and from sysctl flags and
3396  * Admin Queue flags.
3397  */
3398 static u8
3399 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3400 {
3401 	static u16 speedmap[6] = {
3402 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3403 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3404 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3405 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3406 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3407 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3408 	};
3409 	u8 retval = 0;
3410 
3411 	for (int i = 0; i < 6; i++) {
3412 		if (to_aq)
3413 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3414 		else
3415 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3416 	}
3417 
3418 	return (retval);
3419 }
3420 
3421 int
3422 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3423 {
3424 	struct i40e_hw *hw = &pf->hw;
3425 	device_t dev = pf->dev;
3426 	struct i40e_aq_get_phy_abilities_resp abilities;
3427 	struct i40e_aq_set_phy_config config;
3428 	enum i40e_status_code aq_error = 0;
3429 
3430 	/* Get current capability information */
3431 	aq_error = i40e_aq_get_phy_capabilities(hw,
3432 	    FALSE, FALSE, &abilities, NULL);
3433 	if (aq_error) {
3434 		device_printf(dev,
3435 		    "%s: Error getting phy capabilities %d,"
3436 		    " aq error: %d\n", __func__, aq_error,
3437 		    hw->aq.asq_last_status);
3438 		return (EIO);
3439 	}
3440 
3441 	/* Prepare new config */
3442 	bzero(&config, sizeof(config));
3443 	if (from_aq)
3444 		config.link_speed = speeds;
3445 	else
3446 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3447 	config.phy_type = abilities.phy_type;
3448 	config.phy_type_ext = abilities.phy_type_ext;
3449 	config.abilities = abilities.abilities
3450 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3451 	config.eee_capability = abilities.eee_capability;
3452 	config.eeer = abilities.eeer_val;
3453 	config.low_power_ctrl = abilities.d3_lpan;
3454 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3455 
3456 	/* Do aq command & restart link */
3457 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3458 	if (aq_error) {
3459 		device_printf(dev,
3460 		    "%s: Error setting new phy config %d,"
3461 		    " aq error: %d\n", __func__, aq_error,
3462 		    hw->aq.asq_last_status);
3463 		return (EIO);
3464 	}
3465 
3466 	return (0);
3467 }
3468 
3469 /*
3470 ** Supported link speedsL
3471 **	Flags:
3472 **	 0x1 - 100 Mb
3473 **	 0x2 - 1G
3474 **	 0x4 - 10G
3475 **	 0x8 - 20G
3476 **	0x10 - 25G
3477 **	0x20 - 40G
3478 */
3479 static int
3480 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3481 {
3482 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3483 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3484 
3485 	return sysctl_handle_int(oidp, NULL, supported, req);
3486 }
3487 
3488 /*
3489 ** Control link advertise speed:
3490 **	Flags:
3491 **	 0x1 - advertise 100 Mb
3492 **	 0x2 - advertise 1G
3493 **	 0x4 - advertise 10G
3494 **	 0x8 - advertise 20G
3495 **	0x10 - advertise 25G
3496 **	0x20 - advertise 40G
3497 **
3498 **	Set to 0 to disable link
3499 */
3500 int
3501 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3502 {
3503 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3504 	device_t dev = pf->dev;
3505 	u8 converted_speeds;
3506 	int requested_ls = 0;
3507 	int error = 0;
3508 
3509 	/* Read in new mode */
3510 	requested_ls = pf->advertised_speed;
3511 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3512 	if ((error) || (req->newptr == NULL))
3513 		return (error);
3514 
3515 	/* Error out if bits outside of possible flag range are set */
3516 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3517 		device_printf(dev, "Input advertised speed out of range; "
3518 		    "valid flags are: 0x%02x\n",
3519 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3520 		return (EINVAL);
3521 	}
3522 
3523 	/* Check if adapter supports input value */
3524 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3525 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3526 		device_printf(dev, "Invalid advertised speed; "
3527 		    "valid flags are: 0x%02x\n",
3528 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3529 		return (EINVAL);
3530 	}
3531 
3532 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3533 	if (error)
3534 		return (error);
3535 
3536 	pf->advertised_speed = requested_ls;
3537 	ixl_update_link_status(pf);
3538 	return (0);
3539 }
3540 
3541 /*
3542 ** Get the width and transaction speed of
3543 ** the bus this adapter is plugged into.
3544 */
3545 void
3546 ixl_get_bus_info(struct ixl_pf *pf)
3547 {
3548 	struct i40e_hw *hw = &pf->hw;
3549 	device_t dev = pf->dev;
3550         u16 link;
3551         u32 offset, num_ports;
3552 	u64 max_speed;
3553 
3554 	/* Some devices don't use PCIE */
3555 	if (hw->mac.type == I40E_MAC_X722)
3556 		return;
3557 
3558         /* Read PCI Express Capabilities Link Status Register */
3559         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3560         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3561 
3562 	/* Fill out hw struct with PCIE info */
3563 	i40e_set_pci_config_data(hw, link);
3564 
3565 	/* Use info to print out bandwidth messages */
3566         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3567             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3568             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3569             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3570             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3571             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3572             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3573             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3574             ("Unknown"));
3575 
3576 	/*
3577 	 * If adapter is in slot with maximum supported speed,
3578 	 * no warning message needs to be printed out.
3579 	 */
3580 	if (hw->bus.speed >= i40e_bus_speed_8000
3581 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3582 		return;
3583 
3584 	num_ports = bitcount32(hw->func_caps.valid_functions);
3585 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3586 
3587 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3588                 device_printf(dev, "PCI-Express bandwidth available"
3589                     " for this device may be insufficient for"
3590                     " optimal performance.\n");
3591                 device_printf(dev, "Please move the device to a different"
3592 		    " PCI-e link with more lanes and/or higher"
3593 		    " transfer rate.\n");
3594         }
3595 }
3596 
3597 static int
3598 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3599 {
3600 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3601 	struct i40e_hw	*hw = &pf->hw;
3602 	struct sbuf	*sbuf;
3603 
3604 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3605 	ixl_nvm_version_str(hw, sbuf);
3606 	sbuf_finish(sbuf);
3607 	sbuf_delete(sbuf);
3608 
3609 	return (0);
3610 }
3611 
3612 void
3613 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3614 {
3615 	if ((nvma->command == I40E_NVM_READ) &&
3616 	    ((nvma->config & 0xFF) == 0xF) &&
3617 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3618 	    (nvma->offset == 0) &&
3619 	    (nvma->data_size == 1)) {
3620 		// device_printf(dev, "- Get Driver Status Command\n");
3621 	}
3622 	else if (nvma->command == I40E_NVM_READ) {
3623 
3624 	}
3625 	else {
3626 		switch (nvma->command) {
3627 		case 0xB:
3628 			device_printf(dev, "- command: I40E_NVM_READ\n");
3629 			break;
3630 		case 0xC:
3631 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3632 			break;
3633 		default:
3634 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3635 			break;
3636 		}
3637 
3638 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3639 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3640 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3641 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3642 	}
3643 }
3644 
3645 int
3646 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3647 {
3648 	struct i40e_hw *hw = &pf->hw;
3649 	struct i40e_nvm_access *nvma;
3650 	device_t dev = pf->dev;
3651 	enum i40e_status_code status = 0;
3652 	size_t nvma_size, ifd_len, exp_len;
3653 	int err, perrno;
3654 
3655 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3656 
3657 	/* Sanity checks */
3658 	nvma_size = sizeof(struct i40e_nvm_access);
3659 	ifd_len = ifd->ifd_len;
3660 
3661 	if (ifd_len < nvma_size ||
3662 	    ifd->ifd_data == NULL) {
3663 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3664 		    __func__);
3665 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3666 		    __func__, ifd_len, nvma_size);
3667 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3668 		    ifd->ifd_data);
3669 		return (EINVAL);
3670 	}
3671 
3672 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3673 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3674 	if (err) {
3675 		device_printf(dev, "%s: Cannot get request from user space\n",
3676 		    __func__);
3677 		free(nvma, M_DEVBUF);
3678 		return (err);
3679 	}
3680 
3681 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3682 		ixl_print_nvm_cmd(dev, nvma);
3683 
3684 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3685 		int count = 0;
3686 		while (count++ < 100) {
3687 			i40e_msec_delay(100);
3688 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3689 				break;
3690 		}
3691 	}
3692 
3693 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3694 		free(nvma, M_DEVBUF);
3695 		return (-EBUSY);
3696 	}
3697 
3698 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3699 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3700 		    __func__);
3701 		free(nvma, M_DEVBUF);
3702 		return (EINVAL);
3703 	}
3704 
3705 	/*
3706 	 * Older versions of the NVM update tool don't set ifd_len to the size
3707 	 * of the entire buffer passed to the ioctl. Check the data_size field
3708 	 * in the contained i40e_nvm_access struct and ensure everything is
3709 	 * copied in from userspace.
3710 	 */
3711 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3712 
3713 	if (ifd_len < exp_len) {
3714 		ifd_len = exp_len;
3715 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3716 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3717 		if (err) {
3718 			device_printf(dev, "%s: Cannot get request from user space\n",
3719 					__func__);
3720 			free(nvma, M_DEVBUF);
3721 			return (err);
3722 		}
3723 	}
3724 
3725 	// TODO: Might need a different lock here
3726 	// IXL_PF_LOCK(pf);
3727 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3728 	// IXL_PF_UNLOCK(pf);
3729 
3730 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3731 	free(nvma, M_DEVBUF);
3732 	if (err) {
3733 		device_printf(dev, "%s: Cannot return data to user space\n",
3734 				__func__);
3735 		return (err);
3736 	}
3737 
3738 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3739 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3740 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3741 		    i40e_stat_str(hw, status), perrno);
3742 
3743 	/*
3744 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3745 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3746 	 */
3747 	if (perrno == -EPERM)
3748 		return (-EACCES);
3749 	else
3750 		return (perrno);
3751 }
3752 
3753 int
3754 ixl_find_i2c_interface(struct ixl_pf *pf)
3755 {
3756 	struct i40e_hw *hw = &pf->hw;
3757 	bool i2c_en, port_matched;
3758 	u32 reg;
3759 
3760 	for (int i = 0; i < 4; i++) {
3761 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3762 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3763 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3764 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3765 		    & BIT(hw->port);
3766 		if (i2c_en && port_matched)
3767 			return (i);
3768 	}
3769 
3770 	return (-1);
3771 }
3772 
3773 static char *
3774 ixl_phy_type_string(u32 bit_pos, bool ext)
3775 {
3776 	static char * phy_types_str[32] = {
3777 		"SGMII",
3778 		"1000BASE-KX",
3779 		"10GBASE-KX4",
3780 		"10GBASE-KR",
3781 		"40GBASE-KR4",
3782 		"XAUI",
3783 		"XFI",
3784 		"SFI",
3785 		"XLAUI",
3786 		"XLPPI",
3787 		"40GBASE-CR4",
3788 		"10GBASE-CR1",
3789 		"SFP+ Active DA",
3790 		"QSFP+ Active DA",
3791 		"Reserved (14)",
3792 		"Reserved (15)",
3793 		"Reserved (16)",
3794 		"100BASE-TX",
3795 		"1000BASE-T",
3796 		"10GBASE-T",
3797 		"10GBASE-SR",
3798 		"10GBASE-LR",
3799 		"10GBASE-SFP+Cu",
3800 		"10GBASE-CR1",
3801 		"40GBASE-CR4",
3802 		"40GBASE-SR4",
3803 		"40GBASE-LR4",
3804 		"1000BASE-SX",
3805 		"1000BASE-LX",
3806 		"1000BASE-T Optical",
3807 		"20GBASE-KR2",
3808 		"Reserved (31)"
3809 	};
3810 	static char * ext_phy_types_str[8] = {
3811 		"25GBASE-KR",
3812 		"25GBASE-CR",
3813 		"25GBASE-SR",
3814 		"25GBASE-LR",
3815 		"25GBASE-AOC",
3816 		"25GBASE-ACC",
3817 		"Reserved (6)",
3818 		"Reserved (7)"
3819 	};
3820 
3821 	if (ext && bit_pos > 7) return "Invalid_Ext";
3822 	if (bit_pos > 31) return "Invalid";
3823 
3824 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3825 }
3826 
3827 /* TODO: ERJ: I don't this is necessary anymore. */
3828 int
3829 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3830 {
3831 	device_t dev = pf->dev;
3832 	struct i40e_hw *hw = &pf->hw;
3833 	struct i40e_aq_desc desc;
3834 	enum i40e_status_code status;
3835 
3836 	struct i40e_aqc_get_link_status *aq_link_status =
3837 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3838 
3839 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3840 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3841 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3842 	if (status) {
3843 		device_printf(dev,
3844 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3845 		    __func__, i40e_stat_str(hw, status),
3846 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3847 		return (EIO);
3848 	}
3849 
3850 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3851 	return (0);
3852 }
3853 
3854 static char *
3855 ixl_phy_type_string_ls(u8 val)
3856 {
3857 	if (val >= 0x1F)
3858 		return ixl_phy_type_string(val - 0x1F, true);
3859 	else
3860 		return ixl_phy_type_string(val, false);
3861 }
3862 
3863 static int
3864 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3865 {
3866 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3867 	device_t dev = pf->dev;
3868 	struct sbuf *buf;
3869 	int error = 0;
3870 
3871 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3872 	if (!buf) {
3873 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3874 		return (ENOMEM);
3875 	}
3876 
3877 	struct i40e_aqc_get_link_status link_status;
3878 	error = ixl_aq_get_link_status(pf, &link_status);
3879 	if (error) {
3880 		sbuf_delete(buf);
3881 		return (error);
3882 	}
3883 
3884 	sbuf_printf(buf, "\n"
3885 	    "PHY Type : 0x%02x<%s>\n"
3886 	    "Speed    : 0x%02x\n"
3887 	    "Link info: 0x%02x\n"
3888 	    "AN info  : 0x%02x\n"
3889 	    "Ext info : 0x%02x\n"
3890 	    "Loopback : 0x%02x\n"
3891 	    "Max Frame: %d\n"
3892 	    "Config   : 0x%02x\n"
3893 	    "Power    : 0x%02x",
3894 	    link_status.phy_type,
3895 	    ixl_phy_type_string_ls(link_status.phy_type),
3896 	    link_status.link_speed,
3897 	    link_status.link_info,
3898 	    link_status.an_info,
3899 	    link_status.ext_info,
3900 	    link_status.loopback,
3901 	    link_status.max_frame_size,
3902 	    link_status.config,
3903 	    link_status.power_desc);
3904 
3905 	error = sbuf_finish(buf);
3906 	if (error)
3907 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3908 
3909 	sbuf_delete(buf);
3910 	return (error);
3911 }
3912 
3913 static int
3914 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3915 {
3916 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3917 	struct i40e_hw *hw = &pf->hw;
3918 	device_t dev = pf->dev;
3919 	enum i40e_status_code status;
3920 	struct i40e_aq_get_phy_abilities_resp abilities;
3921 	struct sbuf *buf;
3922 	int error = 0;
3923 
3924 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3925 	if (!buf) {
3926 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3927 		return (ENOMEM);
3928 	}
3929 
3930 	status = i40e_aq_get_phy_capabilities(hw,
3931 	    FALSE, FALSE, &abilities, NULL);
3932 	if (status) {
3933 		device_printf(dev,
3934 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3935 		    __func__, i40e_stat_str(hw, status),
3936 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3937 		sbuf_delete(buf);
3938 		return (EIO);
3939 	}
3940 
3941 	sbuf_printf(buf, "\n"
3942 	    "PHY Type : %08x",
3943 	    abilities.phy_type);
3944 
3945 	if (abilities.phy_type != 0) {
3946 		sbuf_printf(buf, "<");
3947 		for (int i = 0; i < 32; i++)
3948 			if ((1 << i) & abilities.phy_type)
3949 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3950 		sbuf_printf(buf, ">\n");
3951 	}
3952 
3953 	sbuf_printf(buf, "PHY Ext  : %02x",
3954 	    abilities.phy_type_ext);
3955 
3956 	if (abilities.phy_type_ext != 0) {
3957 		sbuf_printf(buf, "<");
3958 		for (int i = 0; i < 4; i++)
3959 			if ((1 << i) & abilities.phy_type_ext)
3960 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3961 		sbuf_printf(buf, ">");
3962 	}
3963 	sbuf_printf(buf, "\n");
3964 
3965 	sbuf_printf(buf,
3966 	    "Speed    : %02x\n"
3967 	    "Abilities: %02x\n"
3968 	    "EEE cap  : %04x\n"
3969 	    "EEER reg : %08x\n"
3970 	    "D3 Lpan  : %02x\n"
3971 	    "ID       : %02x %02x %02x %02x\n"
3972 	    "ModType  : %02x %02x %02x\n"
3973 	    "ModType E: %01x\n"
3974 	    "FEC Cfg  : %02x\n"
3975 	    "Ext CC   : %02x",
3976 	    abilities.link_speed,
3977 	    abilities.abilities, abilities.eee_capability,
3978 	    abilities.eeer_val, abilities.d3_lpan,
3979 	    abilities.phy_id[0], abilities.phy_id[1],
3980 	    abilities.phy_id[2], abilities.phy_id[3],
3981 	    abilities.module_type[0], abilities.module_type[1],
3982 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3983 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3984 	    abilities.ext_comp_code);
3985 
3986 	error = sbuf_finish(buf);
3987 	if (error)
3988 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3989 
3990 	sbuf_delete(buf);
3991 	return (error);
3992 }
3993 
3994 static int
3995 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3996 {
3997 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3998 	struct ixl_vsi *vsi = &pf->vsi;
3999 	struct ixl_mac_filter *f;
4000 	device_t dev = pf->dev;
4001 	int error = 0, ftl_len = 0, ftl_counter = 0;
4002 
4003 	struct sbuf *buf;
4004 
4005 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4006 	if (!buf) {
4007 		device_printf(dev, "Could not allocate sbuf for output.\n");
4008 		return (ENOMEM);
4009 	}
4010 
4011 	sbuf_printf(buf, "\n");
4012 
4013 	/* Print MAC filters */
4014 	sbuf_printf(buf, "PF Filters:\n");
4015 	SLIST_FOREACH(f, &vsi->ftl, next)
4016 		ftl_len++;
4017 
4018 	if (ftl_len < 1)
4019 		sbuf_printf(buf, "(none)\n");
4020 	else {
4021 		SLIST_FOREACH(f, &vsi->ftl, next) {
4022 			sbuf_printf(buf,
4023 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4024 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4025 			/* don't print '\n' for last entry */
4026 			if (++ftl_counter != ftl_len)
4027 				sbuf_printf(buf, "\n");
4028 		}
4029 	}
4030 
4031 #ifdef PCI_IOV
4032 	/* TODO: Give each VF its own filter list sysctl */
4033 	struct ixl_vf *vf;
4034 	if (pf->num_vfs > 0) {
4035 		sbuf_printf(buf, "\n\n");
4036 		for (int i = 0; i < pf->num_vfs; i++) {
4037 			vf = &pf->vfs[i];
4038 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4039 				continue;
4040 
4041 			vsi = &vf->vsi;
4042 			ftl_len = 0, ftl_counter = 0;
4043 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4044 			SLIST_FOREACH(f, &vsi->ftl, next)
4045 				ftl_len++;
4046 
4047 			if (ftl_len < 1)
4048 				sbuf_printf(buf, "(none)\n");
4049 			else {
4050 				SLIST_FOREACH(f, &vsi->ftl, next) {
4051 					sbuf_printf(buf,
4052 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4053 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4054 				}
4055 			}
4056 		}
4057 	}
4058 #endif
4059 
4060 	error = sbuf_finish(buf);
4061 	if (error)
4062 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4063 	sbuf_delete(buf);
4064 
4065 	return (error);
4066 }
4067 
4068 #define IXL_SW_RES_SIZE 0x14
4069 int
4070 ixl_res_alloc_cmp(const void *a, const void *b)
4071 {
4072 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4073 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4074 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4075 
4076 	return ((int)one->resource_type - (int)two->resource_type);
4077 }
4078 
4079 /*
4080  * Longest string length: 25
4081  */
4082 char *
4083 ixl_switch_res_type_string(u8 type)
4084 {
4085 	// TODO: This should be changed to static const
4086 	char * ixl_switch_res_type_strings[0x14] = {
4087 		"VEB",
4088 		"VSI",
4089 		"Perfect Match MAC address",
4090 		"S-tag",
4091 		"(Reserved)",
4092 		"Multicast hash entry",
4093 		"Unicast hash entry",
4094 		"VLAN",
4095 		"VSI List entry",
4096 		"(Reserved)",
4097 		"VLAN Statistic Pool",
4098 		"Mirror Rule",
4099 		"Queue Set",
4100 		"Inner VLAN Forward filter",
4101 		"(Reserved)",
4102 		"Inner MAC",
4103 		"IP",
4104 		"GRE/VN1 Key",
4105 		"VN2 Key",
4106 		"Tunneling Port"
4107 	};
4108 
4109 	if (type < 0x14)
4110 		return ixl_switch_res_type_strings[type];
4111 	else
4112 		return "(Reserved)";
4113 }
4114 
4115 static int
4116 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4117 {
4118 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4119 	struct i40e_hw *hw = &pf->hw;
4120 	device_t dev = pf->dev;
4121 	struct sbuf *buf;
4122 	enum i40e_status_code status;
4123 	int error = 0;
4124 
4125 	u8 num_entries;
4126 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4127 
4128 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4129 	if (!buf) {
4130 		device_printf(dev, "Could not allocate sbuf for output.\n");
4131 		return (ENOMEM);
4132 	}
4133 
4134 	bzero(resp, sizeof(resp));
4135 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4136 				resp,
4137 				IXL_SW_RES_SIZE,
4138 				NULL);
4139 	if (status) {
4140 		device_printf(dev,
4141 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4142 		    __func__, i40e_stat_str(hw, status),
4143 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4144 		sbuf_delete(buf);
4145 		return (error);
4146 	}
4147 
4148 	/* Sort entries by type for display */
4149 	qsort(resp, num_entries,
4150 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4151 	    &ixl_res_alloc_cmp);
4152 
4153 	sbuf_cat(buf, "\n");
4154 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4155 	sbuf_printf(buf,
4156 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4157 	    "                          | (this)     | (all) | (this) | (all)       \n");
4158 	for (int i = 0; i < num_entries; i++) {
4159 		sbuf_printf(buf,
4160 		    "%25s | %10d   %5d   %6d   %12d",
4161 		    ixl_switch_res_type_string(resp[i].resource_type),
4162 		    resp[i].guaranteed,
4163 		    resp[i].total,
4164 		    resp[i].used,
4165 		    resp[i].total_unalloced);
4166 		if (i < num_entries - 1)
4167 			sbuf_cat(buf, "\n");
4168 	}
4169 
4170 	error = sbuf_finish(buf);
4171 	if (error)
4172 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4173 
4174 	sbuf_delete(buf);
4175 	return (error);
4176 }
4177 
4178 /*
4179 ** Caller must init and delete sbuf; this function will clear and
4180 ** finish it for caller.
4181 */
4182 char *
4183 ixl_switch_element_string(struct sbuf *s,
4184     struct i40e_aqc_switch_config_element_resp *element)
4185 {
4186 	sbuf_clear(s);
4187 
4188 	switch (element->element_type) {
4189 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4190 		sbuf_printf(s, "MAC %3d", element->element_info);
4191 		break;
4192 	case I40E_AQ_SW_ELEM_TYPE_PF:
4193 		sbuf_printf(s, "PF  %3d", element->element_info);
4194 		break;
4195 	case I40E_AQ_SW_ELEM_TYPE_VF:
4196 		sbuf_printf(s, "VF  %3d", element->element_info);
4197 		break;
4198 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4199 		sbuf_cat(s, "EMP");
4200 		break;
4201 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4202 		sbuf_cat(s, "BMC");
4203 		break;
4204 	case I40E_AQ_SW_ELEM_TYPE_PV:
4205 		sbuf_cat(s, "PV");
4206 		break;
4207 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4208 		sbuf_cat(s, "VEB");
4209 		break;
4210 	case I40E_AQ_SW_ELEM_TYPE_PA:
4211 		sbuf_cat(s, "PA");
4212 		break;
4213 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4214 		sbuf_printf(s, "VSI %3d", element->element_info);
4215 		break;
4216 	default:
4217 		sbuf_cat(s, "?");
4218 		break;
4219 	}
4220 
4221 	sbuf_finish(s);
4222 	return sbuf_data(s);
4223 }
4224 
4225 static int
4226 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4227 {
4228 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4229 	struct i40e_hw *hw = &pf->hw;
4230 	device_t dev = pf->dev;
4231 	struct sbuf *buf;
4232 	struct sbuf *nmbuf;
4233 	enum i40e_status_code status;
4234 	int error = 0;
4235 	u16 next = 0;
4236 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4237 
4238 	struct i40e_aqc_get_switch_config_resp *sw_config;
4239 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4240 
4241 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4242 	if (!buf) {
4243 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4244 		return (ENOMEM);
4245 	}
4246 
4247 	status = i40e_aq_get_switch_config(hw, sw_config,
4248 	    sizeof(aq_buf), &next, NULL);
4249 	if (status) {
4250 		device_printf(dev,
4251 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4252 		    __func__, i40e_stat_str(hw, status),
4253 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4254 		sbuf_delete(buf);
4255 		return error;
4256 	}
4257 	if (next)
4258 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4259 		    __func__, next);
4260 
4261 	nmbuf = sbuf_new_auto();
4262 	if (!nmbuf) {
4263 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4264 		sbuf_delete(buf);
4265 		return (ENOMEM);
4266 	}
4267 
4268 	sbuf_cat(buf, "\n");
4269 	/* Assuming <= 255 elements in switch */
4270 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4271 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4272 	/* Exclude:
4273 	** Revision -- all elements are revision 1 for now
4274 	*/
4275 	sbuf_printf(buf,
4276 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4277 	    "                |          |          | (uplink)\n");
4278 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4279 		// "%4d (%8s) | %8s   %8s   %#8x",
4280 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4281 		sbuf_cat(buf, " ");
4282 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4283 		    &sw_config->element[i]));
4284 		sbuf_cat(buf, " | ");
4285 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4286 		sbuf_cat(buf, "   ");
4287 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4288 		sbuf_cat(buf, "   ");
4289 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4290 		if (i < sw_config->header.num_reported - 1)
4291 			sbuf_cat(buf, "\n");
4292 	}
4293 	sbuf_delete(nmbuf);
4294 
4295 	error = sbuf_finish(buf);
4296 	if (error)
4297 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4298 
4299 	sbuf_delete(buf);
4300 
4301 	return (error);
4302 }
4303 
4304 static int
4305 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4306 {
4307 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4308 	struct i40e_hw *hw = &pf->hw;
4309 	device_t dev = pf->dev;
4310 	struct sbuf *buf;
4311 	int error = 0;
4312 	enum i40e_status_code status;
4313 	u32 reg;
4314 
4315 	struct i40e_aqc_get_set_rss_key_data key_data;
4316 
4317 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4318 	if (!buf) {
4319 		device_printf(dev, "Could not allocate sbuf for output.\n");
4320 		return (ENOMEM);
4321 	}
4322 
4323 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4324 
4325 	sbuf_cat(buf, "\n");
4326 	if (hw->mac.type == I40E_MAC_X722) {
4327 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4328 		if (status)
4329 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4330 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4331 	} else {
4332 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4333 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4334 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4335 		}
4336 	}
4337 
4338 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4339 
4340 	error = sbuf_finish(buf);
4341 	if (error)
4342 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4343 	sbuf_delete(buf);
4344 
4345 	return (error);
4346 }
4347 
4348 static void
4349 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4350 {
4351 	int i, j, k, width;
4352 	char c;
4353 
4354 	if (length < 1 || buf == NULL) return;
4355 
4356 	int byte_stride = 16;
4357 	int lines = length / byte_stride;
4358 	int rem = length % byte_stride;
4359 	if (rem > 0)
4360 		lines++;
4361 
4362 	for (i = 0; i < lines; i++) {
4363 		width = (rem > 0 && i == lines - 1)
4364 		    ? rem : byte_stride;
4365 
4366 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4367 
4368 		for (j = 0; j < width; j++)
4369 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4370 
4371 		if (width < byte_stride) {
4372 			for (k = 0; k < (byte_stride - width); k++)
4373 				sbuf_printf(sb, "   ");
4374 		}
4375 
4376 		if (!text) {
4377 			sbuf_printf(sb, "\n");
4378 			continue;
4379 		}
4380 
4381 		for (j = 0; j < width; j++) {
4382 			c = (char)buf[i * byte_stride + j];
4383 			if (c < 32 || c > 126)
4384 				sbuf_printf(sb, ".");
4385 			else
4386 				sbuf_printf(sb, "%c", c);
4387 
4388 			if (j == width - 1)
4389 				sbuf_printf(sb, "\n");
4390 		}
4391 	}
4392 }
4393 
4394 static int
4395 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4396 {
4397 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4398 	struct i40e_hw *hw = &pf->hw;
4399 	device_t dev = pf->dev;
4400 	struct sbuf *buf;
4401 	int error = 0;
4402 	enum i40e_status_code status;
4403 	u8 hlut[512];
4404 	u32 reg;
4405 
4406 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4407 	if (!buf) {
4408 		device_printf(dev, "Could not allocate sbuf for output.\n");
4409 		return (ENOMEM);
4410 	}
4411 
4412 	bzero(hlut, sizeof(hlut));
4413 	sbuf_cat(buf, "\n");
4414 	if (hw->mac.type == I40E_MAC_X722) {
4415 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4416 		if (status)
4417 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4418 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4419 	} else {
4420 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4421 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4422 			bcopy(&reg, &hlut[i << 2], 4);
4423 		}
4424 	}
4425 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4426 
4427 	error = sbuf_finish(buf);
4428 	if (error)
4429 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4430 	sbuf_delete(buf);
4431 
4432 	return (error);
4433 }
4434 
4435 static int
4436 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4437 {
4438 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4439 	struct i40e_hw *hw = &pf->hw;
4440 	u64 hena;
4441 
4442 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4443 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4444 
4445 	return sysctl_handle_long(oidp, NULL, hena, req);
4446 }
4447 
4448 /*
4449  * Sysctl to disable firmware's link management
4450  *
4451  * 1 - Disable link management on this port
4452  * 0 - Re-enable link management
4453  *
4454  * On normal NVMs, firmware manages link by default.
4455  */
4456 static int
4457 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4458 {
4459 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4460 	struct i40e_hw *hw = &pf->hw;
4461 	device_t dev = pf->dev;
4462 	int requested_mode = -1;
4463 	enum i40e_status_code status = 0;
4464 	int error = 0;
4465 
4466 	/* Read in new mode */
4467 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4468 	if ((error) || (req->newptr == NULL))
4469 		return (error);
4470 	/* Check for sane value */
4471 	if (requested_mode < 0 || requested_mode > 1) {
4472 		device_printf(dev, "Valid modes are 0 or 1\n");
4473 		return (EINVAL);
4474 	}
4475 
4476 	/* Set new mode */
4477 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4478 	if (status) {
4479 		device_printf(dev,
4480 		    "%s: Error setting new phy debug mode %s,"
4481 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4482 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4483 		return (EIO);
4484 	}
4485 
4486 	return (0);
4487 }
4488 
4489 /*
4490  * Read some diagnostic data from an SFP module
4491  * Bytes 96-99, 102-105 from device address 0xA2
4492  */
4493 static int
4494 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4495 {
4496 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4497 	device_t dev = pf->dev;
4498 	struct sbuf *sbuf;
4499 	int error = 0;
4500 	u8 output;
4501 
4502 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4503 	if (error) {
4504 		device_printf(dev, "Error reading from i2c\n");
4505 		return (error);
4506 	}
4507 	if (output != 0x3) {
4508 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4509 		return (EIO);
4510 	}
4511 
4512 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4513 	if (!(output & 0x60)) {
4514 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4515 		return (EIO);
4516 	}
4517 
4518 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4519 
4520 	for (u8 offset = 96; offset < 100; offset++) {
4521 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4522 		sbuf_printf(sbuf, "%02X ", output);
4523 	}
4524 	for (u8 offset = 102; offset < 106; offset++) {
4525 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4526 		sbuf_printf(sbuf, "%02X ", output);
4527 	}
4528 
4529 	sbuf_finish(sbuf);
4530 	sbuf_delete(sbuf);
4531 
4532 	return (0);
4533 }
4534 
4535 /*
4536  * Sysctl to read a byte from I2C bus.
4537  *
4538  * Input: 32-bit value:
4539  * 	bits 0-7:   device address (0xA0 or 0xA2)
4540  * 	bits 8-15:  offset (0-255)
4541  *	bits 16-31: unused
4542  * Output: 8-bit value read
4543  */
4544 static int
4545 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4546 {
4547 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4548 	device_t dev = pf->dev;
4549 	int input = -1, error = 0;
4550 	u8 dev_addr, offset, output;
4551 
4552 	/* Read in I2C read parameters */
4553 	error = sysctl_handle_int(oidp, &input, 0, req);
4554 	if ((error) || (req->newptr == NULL))
4555 		return (error);
4556 	/* Validate device address */
4557 	dev_addr = input & 0xFF;
4558 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4559 		return (EINVAL);
4560 	}
4561 	offset = (input >> 8) & 0xFF;
4562 
4563 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4564 	if (error)
4565 		return (error);
4566 
4567 	device_printf(dev, "%02X\n", output);
4568 	return (0);
4569 }
4570 
4571 /*
4572  * Sysctl to write a byte to the I2C bus.
4573  *
4574  * Input: 32-bit value:
4575  * 	bits 0-7:   device address (0xA0 or 0xA2)
4576  * 	bits 8-15:  offset (0-255)
4577  *	bits 16-23: value to write
4578  *	bits 24-31: unused
4579  * Output: 8-bit value written
4580  */
4581 static int
4582 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4583 {
4584 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4585 	device_t dev = pf->dev;
4586 	int input = -1, error = 0;
4587 	u8 dev_addr, offset, value;
4588 
4589 	/* Read in I2C write parameters */
4590 	error = sysctl_handle_int(oidp, &input, 0, req);
4591 	if ((error) || (req->newptr == NULL))
4592 		return (error);
4593 	/* Validate device address */
4594 	dev_addr = input & 0xFF;
4595 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4596 		return (EINVAL);
4597 	}
4598 	offset = (input >> 8) & 0xFF;
4599 	value = (input >> 16) & 0xFF;
4600 
4601 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4602 	if (error)
4603 		return (error);
4604 
4605 	device_printf(dev, "%02X written\n", value);
4606 	return (0);
4607 }
4608 
4609 static int
4610 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4611     u8 bit_pos, int *is_set)
4612 {
4613 	device_t dev = pf->dev;
4614 	struct i40e_hw *hw = &pf->hw;
4615 	enum i40e_status_code status;
4616 
4617 	status = i40e_aq_get_phy_capabilities(hw,
4618 	    FALSE, FALSE, abilities, NULL);
4619 	if (status) {
4620 		device_printf(dev,
4621 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4622 		    __func__, i40e_stat_str(hw, status),
4623 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4624 		return (EIO);
4625 	}
4626 
4627 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4628 	return (0);
4629 }
4630 
4631 static int
4632 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4633     u8 bit_pos, int set)
4634 {
4635 	device_t dev = pf->dev;
4636 	struct i40e_hw *hw = &pf->hw;
4637 	struct i40e_aq_set_phy_config config;
4638 	enum i40e_status_code status;
4639 
4640 	/* Set new PHY config */
4641 	memset(&config, 0, sizeof(config));
4642 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4643 	if (set)
4644 		config.fec_config |= bit_pos;
4645 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4646 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4647 		config.phy_type = abilities->phy_type;
4648 		config.phy_type_ext = abilities->phy_type_ext;
4649 		config.link_speed = abilities->link_speed;
4650 		config.eee_capability = abilities->eee_capability;
4651 		config.eeer = abilities->eeer_val;
4652 		config.low_power_ctrl = abilities->d3_lpan;
4653 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4654 
4655 		if (status) {
4656 			device_printf(dev,
4657 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4658 			    __func__, i40e_stat_str(hw, status),
4659 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4660 			return (EIO);
4661 		}
4662 	}
4663 
4664 	return (0);
4665 }
4666 
4667 static int
4668 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4669 {
4670 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4671 	int mode, error = 0;
4672 
4673 	struct i40e_aq_get_phy_abilities_resp abilities;
4674 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4675 	if (error)
4676 		return (error);
4677 	/* Read in new mode */
4678 	error = sysctl_handle_int(oidp, &mode, 0, req);
4679 	if ((error) || (req->newptr == NULL))
4680 		return (error);
4681 
4682 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4683 }
4684 
4685 static int
4686 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4687 {
4688 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4689 	int mode, error = 0;
4690 
4691 	struct i40e_aq_get_phy_abilities_resp abilities;
4692 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4693 	if (error)
4694 		return (error);
4695 	/* Read in new mode */
4696 	error = sysctl_handle_int(oidp, &mode, 0, req);
4697 	if ((error) || (req->newptr == NULL))
4698 		return (error);
4699 
4700 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4701 }
4702 
4703 static int
4704 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4705 {
4706 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4707 	int mode, error = 0;
4708 
4709 	struct i40e_aq_get_phy_abilities_resp abilities;
4710 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4711 	if (error)
4712 		return (error);
4713 	/* Read in new mode */
4714 	error = sysctl_handle_int(oidp, &mode, 0, req);
4715 	if ((error) || (req->newptr == NULL))
4716 		return (error);
4717 
4718 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4719 }
4720 
4721 static int
4722 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4723 {
4724 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4725 	int mode, error = 0;
4726 
4727 	struct i40e_aq_get_phy_abilities_resp abilities;
4728 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4729 	if (error)
4730 		return (error);
4731 	/* Read in new mode */
4732 	error = sysctl_handle_int(oidp, &mode, 0, req);
4733 	if ((error) || (req->newptr == NULL))
4734 		return (error);
4735 
4736 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4737 }
4738 
4739 static int
4740 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4741 {
4742 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4743 	int mode, error = 0;
4744 
4745 	struct i40e_aq_get_phy_abilities_resp abilities;
4746 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4747 	if (error)
4748 		return (error);
4749 	/* Read in new mode */
4750 	error = sysctl_handle_int(oidp, &mode, 0, req);
4751 	if ((error) || (req->newptr == NULL))
4752 		return (error);
4753 
4754 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4755 }
4756 
4757 static int
4758 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4759 {
4760 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4761 	struct i40e_hw *hw = &pf->hw;
4762 	device_t dev = pf->dev;
4763 	struct sbuf *buf;
4764 	int error = 0;
4765 	enum i40e_status_code status;
4766 
4767 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4768 	if (!buf) {
4769 		device_printf(dev, "Could not allocate sbuf for output.\n");
4770 		return (ENOMEM);
4771 	}
4772 
4773 	u8 *final_buff;
4774 	/* This amount is only necessary if reading the entire cluster into memory */
4775 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4776 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4777 	if (final_buff == NULL) {
4778 		device_printf(dev, "Could not allocate memory for output.\n");
4779 		goto out;
4780 	}
4781 	int final_buff_len = 0;
4782 
4783 	u8 cluster_id = 1;
4784 	bool more = true;
4785 
4786 	u8 dump_buf[4096];
4787 	u16 curr_buff_size = 4096;
4788 	u8 curr_next_table = 0;
4789 	u32 curr_next_index = 0;
4790 
4791 	u16 ret_buff_size;
4792 	u8 ret_next_table;
4793 	u32 ret_next_index;
4794 
4795 	sbuf_cat(buf, "\n");
4796 
4797 	while (more) {
4798 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4799 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4800 		if (status) {
4801 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4802 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4803 			goto free_out;
4804 		}
4805 
4806 		/* copy info out of temp buffer */
4807 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4808 		final_buff_len += ret_buff_size;
4809 
4810 		if (ret_next_table != curr_next_table) {
4811 			/* We're done with the current table; we can dump out read data. */
4812 			sbuf_printf(buf, "%d:", curr_next_table);
4813 			int bytes_printed = 0;
4814 			while (bytes_printed <= final_buff_len) {
4815 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4816 				bytes_printed += 16;
4817 			}
4818 				sbuf_cat(buf, "\n");
4819 
4820 			/* The entire cluster has been read; we're finished */
4821 			if (ret_next_table == 0xFF)
4822 				break;
4823 
4824 			/* Otherwise clear the output buffer and continue reading */
4825 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4826 			final_buff_len = 0;
4827 		}
4828 
4829 		if (ret_next_index == 0xFFFFFFFF)
4830 			ret_next_index = 0;
4831 
4832 		bzero(dump_buf, sizeof(dump_buf));
4833 		curr_next_table = ret_next_table;
4834 		curr_next_index = ret_next_index;
4835 	}
4836 
4837 free_out:
4838 	free(final_buff, M_DEVBUF);
4839 out:
4840 	error = sbuf_finish(buf);
4841 	if (error)
4842 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4843 	sbuf_delete(buf);
4844 
4845 	return (error);
4846 }
4847 
4848 static int
4849 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4850 {
4851 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4852 	struct i40e_hw *hw = &pf->hw;
4853 	device_t dev = pf->dev;
4854 	int error = 0;
4855 	int state, new_state;
4856 	enum i40e_status_code status;
4857 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4858 
4859 	/* Read in new mode */
4860 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4861 	if ((error) || (req->newptr == NULL))
4862 		return (error);
4863 
4864 	/* Already in requested state */
4865 	if (new_state == state)
4866 		return (error);
4867 
4868 	if (new_state == 0) {
4869 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4870 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4871 			return (EINVAL);
4872 		}
4873 
4874 		if (pf->hw.aq.api_maj_ver < 1 ||
4875 		    (pf->hw.aq.api_maj_ver == 1 &&
4876 		    pf->hw.aq.api_min_ver < 7)) {
4877 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4878 			return (EINVAL);
4879 		}
4880 
4881 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4882 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4883 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4884 	} else {
4885 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4886 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4887 			device_printf(dev, "FW LLDP agent is already running\n");
4888 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4889 	}
4890 
4891 	return (0);
4892 }
4893 
4894 /*
4895  * Get FW LLDP Agent status
4896  */
4897 int
4898 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4899 {
4900 	enum i40e_status_code ret = I40E_SUCCESS;
4901 	struct i40e_lldp_variables lldp_cfg;
4902 	struct i40e_hw *hw = &pf->hw;
4903 	u8 adminstatus = 0;
4904 
4905 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4906 	if (ret)
4907 		return ret;
4908 
4909 	/* Get the LLDP AdminStatus for the current port */
4910 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4911 	adminstatus &= 0xf;
4912 
4913 	/* Check if LLDP agent is disabled */
4914 	if (!adminstatus) {
4915 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4916 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4917 	} else
4918 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4919 
4920 	return (0);
4921 }
4922 
4923 int
4924 ixl_attach_get_link_status(struct ixl_pf *pf)
4925 {
4926 	struct i40e_hw *hw = &pf->hw;
4927 	device_t dev = pf->dev;
4928 	int error = 0;
4929 
4930 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4931 	    (hw->aq.fw_maj_ver < 4)) {
4932 		i40e_msec_delay(75);
4933 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4934 		if (error) {
4935 			device_printf(dev, "link restart failed, aq_err=%d\n",
4936 			    pf->hw.aq.asq_last_status);
4937 			return error;
4938 		}
4939 	}
4940 
4941 	/* Determine link state */
4942 	hw->phy.get_link_info = TRUE;
4943 	i40e_get_link_status(hw, &pf->link_up);
4944 	return (0);
4945 }
4946 
4947 static int
4948 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4949 {
4950 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4951 	int requested = 0, error = 0;
4952 
4953 	/* Read in new mode */
4954 	error = sysctl_handle_int(oidp, &requested, 0, req);
4955 	if ((error) || (req->newptr == NULL))
4956 		return (error);
4957 
4958 	/* Initiate the PF reset later in the admin task */
4959 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4960 
4961 	return (error);
4962 }
4963 
4964 static int
4965 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4966 {
4967 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4968 	struct i40e_hw *hw = &pf->hw;
4969 	int requested = 0, error = 0;
4970 
4971 	/* Read in new mode */
4972 	error = sysctl_handle_int(oidp, &requested, 0, req);
4973 	if ((error) || (req->newptr == NULL))
4974 		return (error);
4975 
4976 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4977 
4978 	return (error);
4979 }
4980 
4981 static int
4982 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4983 {
4984 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4985 	struct i40e_hw *hw = &pf->hw;
4986 	int requested = 0, error = 0;
4987 
4988 	/* Read in new mode */
4989 	error = sysctl_handle_int(oidp, &requested, 0, req);
4990 	if ((error) || (req->newptr == NULL))
4991 		return (error);
4992 
4993 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4994 
4995 	return (error);
4996 }
4997 
4998 static int
4999 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5000 {
5001 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5002 	struct i40e_hw *hw = &pf->hw;
5003 	int requested = 0, error = 0;
5004 
5005 	/* Read in new mode */
5006 	error = sysctl_handle_int(oidp, &requested, 0, req);
5007 	if ((error) || (req->newptr == NULL))
5008 		return (error);
5009 
5010 	/* TODO: Find out how to bypass this */
5011 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5012 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5013 		error = EINVAL;
5014 	} else
5015 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5016 
5017 	return (error);
5018 }
5019 
5020 /*
5021  * Print out mapping of TX queue indexes and Rx queue indexes
5022  * to MSI-X vectors.
5023  */
5024 static int
5025 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5026 {
5027 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5028 	struct ixl_vsi *vsi = &pf->vsi;
5029 	device_t dev = pf->dev;
5030 	struct sbuf *buf;
5031 	int error = 0;
5032 
5033 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5034 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5035 
5036 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5037 	if (!buf) {
5038 		device_printf(dev, "Could not allocate sbuf for output.\n");
5039 		return (ENOMEM);
5040 	}
5041 
5042 	sbuf_cat(buf, "\n");
5043 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5044 		rx_que = &vsi->rx_queues[i];
5045 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5046 	}
5047 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5048 		tx_que = &vsi->tx_queues[i];
5049 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5050 	}
5051 
5052 	error = sbuf_finish(buf);
5053 	if (error)
5054 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5055 	sbuf_delete(buf);
5056 
5057 	return (error);
5058 }
5059