xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision 20b91f0aa52c8415e0bc1a06b8b4b5e2cac47bd2)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 
50 /* Sysctls */
51 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
59 
60 /* Debug Sysctls */
61 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85 #ifdef IXL_DEBUG
86 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
88 #endif
89 
90 #ifdef IXL_IW
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
93 #endif
94 
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
97 
98 const char * const ixl_fc_string[6] = {
99 	"None",
100 	"Rx",
101 	"Tx",
102 	"Full",
103 	"Priority",
104 	"Default"
105 };
106 
107 static char *ixl_fec_string[3] = {
108        "CL108 RS-FEC",
109        "CL74 FC-FEC/BASE-R",
110        "None"
111 };
112 
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
114 
115 /*
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
117 */
118 void
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
120 {
121 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
124 
125 	sbuf_printf(buf,
126 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 	    IXL_NVM_VERSION_HI_SHIFT,
131 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 	    IXL_NVM_VERSION_LO_SHIFT,
133 	    hw->nvm.eetrack,
134 	    oem_ver, oem_build, oem_patch);
135 }
136 
137 void
138 ixl_print_nvm_version(struct ixl_pf *pf)
139 {
140 	struct i40e_hw *hw = &pf->hw;
141 	device_t dev = pf->dev;
142 	struct sbuf *sbuf;
143 
144 	sbuf = sbuf_new_auto();
145 	ixl_nvm_version_str(hw, sbuf);
146 	sbuf_finish(sbuf);
147 	device_printf(dev, "%s\n", sbuf_data(sbuf));
148 	sbuf_delete(sbuf);
149 }
150 
151 static void
152 ixl_configure_tx_itr(struct ixl_pf *pf)
153 {
154 	struct i40e_hw		*hw = &pf->hw;
155 	struct ixl_vsi		*vsi = &pf->vsi;
156 	struct ixl_tx_queue	*que = vsi->tx_queues;
157 
158 	vsi->tx_itr_setting = pf->tx_itr;
159 
160 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 		struct tx_ring	*txr = &que->txr;
162 
163 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 		    vsi->tx_itr_setting);
165 		txr->itr = vsi->tx_itr_setting;
166 		txr->latency = IXL_AVE_LATENCY;
167 	}
168 }
169 
170 static void
171 ixl_configure_rx_itr(struct ixl_pf *pf)
172 {
173 	struct i40e_hw		*hw = &pf->hw;
174 	struct ixl_vsi		*vsi = &pf->vsi;
175 	struct ixl_rx_queue	*que = vsi->rx_queues;
176 
177 	vsi->rx_itr_setting = pf->rx_itr;
178 
179 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 		struct rx_ring 	*rxr = &que->rxr;
181 
182 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 		    vsi->rx_itr_setting);
184 		rxr->itr = vsi->rx_itr_setting;
185 		rxr->latency = IXL_AVE_LATENCY;
186 	}
187 }
188 
189 /*
190  * Write PF ITR values to queue ITR registers.
191  */
192 void
193 ixl_configure_itr(struct ixl_pf *pf)
194 {
195 	ixl_configure_tx_itr(pf);
196 	ixl_configure_rx_itr(pf);
197 }
198 
199 /*********************************************************************
200  *
201  *  Get the hardware capabilities
202  *
203  **********************************************************************/
204 
205 int
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
207 {
208 	struct i40e_aqc_list_capabilities_element_resp *buf;
209 	struct i40e_hw	*hw = &pf->hw;
210 	device_t 	dev = pf->dev;
211 	enum i40e_status_code status;
212 	int len, i2c_intfc_num;
213 	bool again = TRUE;
214 	u16 needed;
215 
216 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
217 retry:
218 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 		device_printf(dev, "Unable to allocate cap memory\n");
221                 return (ENOMEM);
222 	}
223 
224 	/* This populates the hw struct */
225         status = i40e_aq_discover_capabilities(hw, buf, len,
226 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
227 	free(buf, M_DEVBUF);
228 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
229 	    (again == TRUE)) {
230 		/* retry once with a larger buffer */
231 		again = FALSE;
232 		len = needed;
233 		goto retry;
234 	} else if (status != I40E_SUCCESS) {
235 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
237 		return (ENODEV);
238 	}
239 
240 	/*
241 	 * Some devices have both MDIO and I2C; since this isn't reported
242 	 * by the FW, check registers to see if an I2C interface exists.
243 	 */
244 	i2c_intfc_num = ixl_find_i2c_interface(pf);
245 	if (i2c_intfc_num != -1)
246 		pf->has_i2c = true;
247 
248 	/* Determine functions to use for driver I2C accesses */
249 	switch (pf->i2c_access_method) {
250 	case 0: {
251 		if (hw->mac.type == I40E_MAC_XL710 &&
252 		    hw->aq.api_maj_ver == 1 &&
253 		    hw->aq.api_min_ver >= 7) {
254 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
256 		} else {
257 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
259 		}
260 		break;
261 	}
262 	case 3:
263 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
265 		break;
266 	case 2:
267 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
269 		break;
270 	case 1:
271 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
273 		break;
274 	default:
275 		/* Should not happen */
276 		device_printf(dev, "Error setting I2C access functions\n");
277 		break;
278 	}
279 
280 	/* Print a subset of the capability information. */
281 	device_printf(dev,
282 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
288 	    "MDIO shared");
289 
290 	return (0);
291 }
292 
293 /* For the set_advertise sysctl */
294 void
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 {
297 	device_t dev = pf->dev;
298 	int err;
299 
300 	/* Make sure to initialize the device to the complete list of
301 	 * supported speeds on driver load, to ensure unloading and
302 	 * reloading the driver will restore this value.
303 	 */
304 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 	if (err) {
306 		/* Non-fatal error */
307 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
308 			      __func__, err);
309 		return;
310 	}
311 
312 	pf->advertised_speed =
313 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
314 }
315 
316 int
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 {
319 	enum i40e_status_code status = 0;
320 	struct i40e_hw *hw = &pf->hw;
321 	device_t dev = pf->dev;
322 
323 	/* Shutdown LAN HMC */
324 	if (hw->hmc.hmc_obj) {
325 		status = i40e_shutdown_lan_hmc(hw);
326 		if (status) {
327 			device_printf(dev,
328 			    "init: LAN HMC shutdown failure; status %s\n",
329 			    i40e_stat_str(hw, status));
330 			goto err_out;
331 		}
332 	}
333 
334 	/* Shutdown admin queue */
335 	ixl_disable_intr0(hw);
336 	status = i40e_shutdown_adminq(hw);
337 	if (status)
338 		device_printf(dev,
339 		    "init: Admin Queue shutdown failure; status %s\n",
340 		    i40e_stat_str(hw, status));
341 
342 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
343 err_out:
344 	return (status);
345 }
346 
347 int
348 ixl_reset(struct ixl_pf *pf)
349 {
350 	struct i40e_hw *hw = &pf->hw;
351 	device_t dev = pf->dev;
352 	u32 reg;
353 	int error = 0;
354 
355 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 	i40e_clear_hw(hw);
357 	error = i40e_pf_reset(hw);
358 	if (error) {
359 		device_printf(dev, "init: PF reset failure\n");
360 		error = EIO;
361 		goto err_out;
362 	}
363 
364 	error = i40e_init_adminq(hw);
365 	if (error) {
366 		device_printf(dev, "init: Admin queue init failure;"
367 		    " status code %d\n", error);
368 		error = EIO;
369 		goto err_out;
370 	}
371 
372 	i40e_clear_pxe_mode(hw);
373 
374 #if 0
375 	error = ixl_get_hw_capabilities(pf);
376 	if (error) {
377 		device_printf(dev, "init: Error retrieving HW capabilities;"
378 		    " status code %d\n", error);
379 		goto err_out;
380 	}
381 
382 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 	    hw->func_caps.num_rx_qp, 0, 0);
384 	if (error) {
385 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
386 		    error);
387 		error = EIO;
388 		goto err_out;
389 	}
390 
391 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 	if (error) {
393 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
394 		    error);
395 		error = EIO;
396 		goto err_out;
397 	}
398 
399 	// XXX: possible fix for panic, but our failure recovery is still broken
400 	error = ixl_switch_config(pf);
401 	if (error) {
402 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
403 		     error);
404 		goto err_out;
405 	}
406 
407 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
408 	    NULL);
409         if (error) {
410 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 		    " aq_err %d\n", error, hw->aq.asq_last_status);
412 		error = EIO;
413 		goto err_out;
414 	}
415 
416 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 	if (error) {
418 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
420 		goto err_out;
421 	}
422 
423 	// XXX: (Rebuild VSIs?)
424 
425 	/* Firmware delay workaround */
426 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 	    (hw->aq.fw_maj_ver < 4)) {
428 		i40e_msec_delay(75);
429 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 		if (error) {
431 			device_printf(dev, "init: link restart failed, aq_err %d\n",
432 			    hw->aq.asq_last_status);
433 			goto err_out;
434 		}
435 	}
436 
437 
438 	/* Re-enable admin queue interrupt */
439 	if (pf->msix > 1) {
440 		ixl_configure_intr0_msix(pf);
441 		ixl_enable_intr0(hw);
442 	}
443 
444 err_out:
445 	return (error);
446 #endif
447 	ixl_rebuild_hw_structs_after_reset(pf);
448 
449 	/* The PF reset should have cleared any critical errors */
450 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 
453 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 	reg |= IXL_ICR0_CRIT_ERR_MASK;
455 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
456 
457  err_out:
458  	return (error);
459 }
460 
461 /*
462  * TODO: Make sure this properly handles admin queue / single rx queue intr
463  */
464 int
465 ixl_intr(void *arg)
466 {
467 	struct ixl_pf		*pf = arg;
468 	struct i40e_hw		*hw =  &pf->hw;
469 	struct ixl_vsi		*vsi = &pf->vsi;
470 	struct ixl_rx_queue	*que = vsi->rx_queues;
471         u32			icr0;
472 
473 	// pf->admin_irq++
474 	++que->irqs;
475 
476 // TODO: Check against proper field
477 #if 0
478 	/* Clear PBA at start of ISR if using legacy interrupts */
479 	if (pf->msix == 0)
480 		wr32(hw, I40E_PFINT_DYN_CTL0,
481 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
483 #endif
484 
485 	icr0 = rd32(hw, I40E_PFINT_ICR0);
486 
487 
488 #ifdef PCI_IOV
489 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 		iflib_iov_intr_deferred(vsi->ctx);
491 #endif
492 
493 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 		iflib_admin_intr_deferred(vsi->ctx);
496 
497 	// TODO: Is intr0 enabled somewhere else?
498 	ixl_enable_intr0(hw);
499 
500 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 		return (FILTER_SCHEDULE_THREAD);
502 	else
503 		return (FILTER_HANDLED);
504 }
505 
506 
507 /*********************************************************************
508  *
509  *  MSI-X VSI Interrupt Service routine
510  *
511  **********************************************************************/
512 int
513 ixl_msix_que(void *arg)
514 {
515 	struct ixl_rx_queue *rx_que = arg;
516 
517 	++rx_que->irqs;
518 
519 	ixl_set_queue_rx_itr(rx_que);
520 	// ixl_set_queue_tx_itr(que);
521 
522 	return (FILTER_SCHEDULE_THREAD);
523 }
524 
525 
526 /*********************************************************************
527  *
528  *  MSI-X Admin Queue Interrupt Service routine
529  *
530  **********************************************************************/
531 int
532 ixl_msix_adminq(void *arg)
533 {
534 	struct ixl_pf	*pf = arg;
535 	struct i40e_hw	*hw = &pf->hw;
536 	device_t	dev = pf->dev;
537 	u32		reg, mask, rstat_reg;
538 	bool		do_task = FALSE;
539 
540 	DDPRINTF(dev, "begin");
541 
542 	++pf->admin_irq;
543 
544 	reg = rd32(hw, I40E_PFINT_ICR0);
545 	/*
546 	 * For masking off interrupt causes that need to be handled before
547 	 * they can be re-enabled
548 	 */
549 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 
551 	/* Check on the cause */
552 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
554 		do_task = TRUE;
555 	}
556 
557 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
560 		do_task = TRUE;
561 	}
562 
563 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 		device_printf(dev, "Reset Requested!\n");
566 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 		device_printf(dev, "Reset type: ");
570 		switch (rstat_reg) {
571 		/* These others might be handled similarly to an EMPR reset */
572 		case I40E_RESET_CORER:
573 			printf("CORER\n");
574 			break;
575 		case I40E_RESET_GLOBR:
576 			printf("GLOBR\n");
577 			break;
578 		case I40E_RESET_EMPR:
579 			printf("EMPR\n");
580 			break;
581 		default:
582 			printf("POR\n");
583 			break;
584 		}
585 		/* overload admin queue task to check reset progress */
586 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
587 		do_task = TRUE;
588 	}
589 
590 	/*
591 	 * PE / PCI / ECC exceptions are all handled in the same way:
592 	 * mask out these three causes, then request a PF reset
593 	 *
594 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 	 */
596 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597  		device_printf(dev, "ECC Error detected!\n");
598 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 		device_printf(dev, "PCI Exception detected!\n");
600 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 	/* Checks against the conditions above */
603 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 		atomic_set_32(&pf->state,
606 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
607 		do_task = TRUE;
608 	}
609 
610 	// TODO: Linux driver never re-enables this interrupt once it has been detected
611 	// Then what is supposed to happen? A PF reset? Should it never happen?
612 	// TODO: Parse out this error into something human readable
613 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 			device_printf(dev, "HMC Error detected!\n");
617 			device_printf(dev, "INFO 0x%08x\n", reg);
618 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 			device_printf(dev, "DATA 0x%08x\n", reg);
620 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
621 		}
622 	}
623 
624 #ifdef PCI_IOV
625 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 		iflib_iov_intr_deferred(pf->vsi.ctx);
628 	}
629 #endif
630 
631 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 	ixl_enable_intr0(hw);
633 
634 	if (do_task)
635 		return (FILTER_SCHEDULE_THREAD);
636 	else
637 		return (FILTER_HANDLED);
638 }
639 
640 static u_int
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
642 {
643 	struct ixl_vsi *vsi = arg;
644 
645 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
646 
647 	return (1);
648 }
649 
650 /*********************************************************************
651  * 	Filter Routines
652  *
653  *	Routines for multicast and vlan filter management.
654  *
655  *********************************************************************/
656 void
657 ixl_add_multi(struct ixl_vsi *vsi)
658 {
659 	struct ifnet		*ifp = vsi->ifp;
660 	struct i40e_hw		*hw = vsi->hw;
661 	int			mcnt = 0, flags;
662 
663 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
664 
665 	/*
666 	** First just get a count, to decide if we
667 	** we simply use multicast promiscuous.
668 	*/
669 	mcnt = if_llmaddr_count(ifp);
670 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 		/* delete existing MC filters */
672 		ixl_del_hw_filters(vsi, mcnt);
673 		i40e_aq_set_vsi_multicast_promiscuous(hw,
674 		    vsi->seid, TRUE, NULL);
675 		return;
676 	}
677 
678 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
679 	if (mcnt > 0) {
680 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 		ixl_add_hw_filters(vsi, flags, mcnt);
682 	}
683 
684 	IOCTL_DEBUGOUT("ixl_add_multi: end");
685 }
686 
687 static u_int
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
689 {
690 	struct ixl_mac_filter *f = arg;
691 
692 	if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
693 		return (1);
694 	else
695 		return (0);
696 }
697 
698 int
699 ixl_del_multi(struct ixl_vsi *vsi)
700 {
701 	struct ifnet		*ifp = vsi->ifp;
702 	struct ixl_mac_filter	*f;
703 	int			mcnt = 0;
704 
705 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
706 
707 	SLIST_FOREACH(f, &vsi->ftl, next)
708 		if ((f->flags & IXL_FILTER_USED) &&
709 		    (f->flags & IXL_FILTER_MC) &&
710 		    (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 			f->flags |= IXL_FILTER_DEL;
712 			mcnt++;
713 		}
714 
715 	if (mcnt > 0)
716 		ixl_del_hw_filters(vsi, mcnt);
717 
718 	return (mcnt);
719 }
720 
721 void
722 ixl_link_up_msg(struct ixl_pf *pf)
723 {
724 	struct i40e_hw *hw = &pf->hw;
725 	struct ifnet *ifp = pf->vsi.ifp;
726 	char *req_fec_string, *neg_fec_string;
727 	u8 fec_abilities;
728 
729 	fec_abilities = hw->phy.link_info.req_fec_info;
730 	/* If both RS and KR are requested, only show RS */
731 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 		req_fec_string = ixl_fec_string[0];
733 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 		req_fec_string = ixl_fec_string[1];
735 	else
736 		req_fec_string = ixl_fec_string[2];
737 
738 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 		neg_fec_string = ixl_fec_string[0];
740 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 		neg_fec_string = ixl_fec_string[1];
742 	else
743 		neg_fec_string = ixl_fec_string[2];
744 
745 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
746 	    ifp->if_xname,
747 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 	    req_fec_string, neg_fec_string,
749 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 		ixl_fc_string[1] : ixl_fc_string[0]);
755 }
756 
757 /*
758  * Configure admin queue/misc interrupt cause registers in hardware.
759  */
760 void
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
762 {
763 	struct i40e_hw *hw = &pf->hw;
764 	u32 reg;
765 
766 	/* First set up the adminq - vector 0 */
767 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
768 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
769 
770 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
772 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
779 
780 	/*
781 	 * 0x7FF is the end of the queue list.
782 	 * This means we won't use MSI-X vector 0 for a queue interrupt
783 	 * in MSI-X mode.
784 	 */
785 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
788 
789 	wr32(hw, I40E_PFINT_DYN_CTL0,
790 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
792 
793 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
794 }
795 
796 /*
797  * Configure queue interrupt cause registers in hardware.
798  *
799  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
800  */
801 void
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
803 {
804 	struct i40e_hw *hw = &pf->hw;
805 	struct ixl_vsi *vsi = &pf->vsi;
806 	u32		reg;
807 	u16		vector = 1;
808 
809 	// TODO: See if max is really necessary
810 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 		/* Make sure interrupt is disabled */
812 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 		/* Set linked list head to point to corresponding RX queue
814 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
820 
821 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 		wr32(hw, I40E_QINT_RQCTL(i), reg);
827 
828 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 		wr32(hw, I40E_QINT_TQCTL(i), reg);
834 	}
835 }
836 
837 /*
838  * Configure for single interrupt vector operation
839  */
840 void
841 ixl_configure_legacy(struct ixl_pf *pf)
842 {
843 	struct i40e_hw	*hw = &pf->hw;
844 	struct ixl_vsi	*vsi = &pf->vsi;
845 	u32 reg;
846 
847 // TODO: Fix
848 #if 0
849 	/* Configure ITR */
850 	vsi->tx_itr_setting = pf->tx_itr;
851 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 	    vsi->tx_itr_setting);
853 	txr->itr = vsi->tx_itr_setting;
854 
855 	vsi->rx_itr_setting = pf->rx_itr;
856 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 	    vsi->rx_itr_setting);
858 	rxr->itr = vsi->rx_itr_setting;
859 	/* XXX: Assuming only 1 queue in single interrupt mode */
860 #endif
861 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
862 
863 	/* Setup "other" causes */
864 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
867 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
872 	    ;
873 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
874 
875 	/* No ITR for non-queue interrupts */
876 	wr32(hw, I40E_PFINT_STAT_CTL0,
877 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
878 
879 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 	wr32(hw, I40E_PFINT_LNKLST0, 0);
881 
882 	/* Associate the queue pair to the vector and enable the q int */
883 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 	wr32(hw, I40E_QINT_RQCTL(0), reg);
887 
888 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 	wr32(hw, I40E_QINT_TQCTL(0), reg);
892 }
893 
894 void
895 ixl_free_pci_resources(struct ixl_pf *pf)
896 {
897 	struct ixl_vsi		*vsi = &pf->vsi;
898 	device_t		dev = iflib_get_dev(vsi->ctx);
899 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
900 
901 	/* We may get here before stations are set up */
902 	if (rx_que == NULL)
903 		goto early;
904 
905 	/*
906 	**  Release all MSI-X VSI resources:
907 	*/
908 	iflib_irq_free(vsi->ctx, &vsi->irq);
909 
910 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
912 early:
913 	if (pf->pci_mem != NULL)
914 		bus_release_resource(dev, SYS_RES_MEMORY,
915 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
916 }
917 
918 void
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
920 {
921 	/* Display supported media types */
922 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
924 
925 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
931 
932 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
936 
937 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
943 
944 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
954 
955 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
957 
958 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
969 
970 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
972 
973 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
977 
978 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
990 }
991 
992 /*********************************************************************
993  *
994  *  Setup networking device structure and register an interface.
995  *
996  **********************************************************************/
997 int
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
999 {
1000 	struct ixl_vsi *vsi = &pf->vsi;
1001 	if_ctx_t ctx = vsi->ctx;
1002 	struct i40e_hw *hw = &pf->hw;
1003 	struct ifnet *ifp = iflib_get_ifp(ctx);
1004 	struct i40e_aq_get_phy_abilities_resp abilities;
1005 	enum i40e_status_code aq_error = 0;
1006 
1007 	INIT_DBG_DEV(dev, "begin");
1008 
1009 	vsi->shared->isc_max_frame_size =
1010 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 	    + ETHER_VLAN_ENCAP_LEN;
1012 
1013 	aq_error = i40e_aq_get_phy_capabilities(hw,
1014 	    FALSE, TRUE, &abilities, NULL);
1015 	/* May need delay to detect fiber correctly */
1016 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 		/* TODO: Maybe just retry this in a task... */
1018 		i40e_msec_delay(200);
1019 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 		    TRUE, &abilities, NULL);
1021 	}
1022 	if (aq_error) {
1023 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 			device_printf(dev, "Unknown PHY type detected!\n");
1025 		else
1026 			device_printf(dev,
1027 			    "Error getting supported media types, err %d,"
1028 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1029 	} else {
1030 		pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1033 #else
1034 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1035 #endif
1036 
1037 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1038 	}
1039 
1040 	/* Use autoselect media by default */
1041 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1043 
1044 	return (0);
1045 }
1046 
1047 /*
1048  * Input: bitmap of enum i40e_aq_link_speed
1049  */
1050 u64
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1052 {
1053 	if (link_speeds & I40E_LINK_SPEED_40GB)
1054 		return IF_Gbps(40);
1055 	if (link_speeds & I40E_LINK_SPEED_25GB)
1056 		return IF_Gbps(25);
1057 	if (link_speeds & I40E_LINK_SPEED_20GB)
1058 		return IF_Gbps(20);
1059 	if (link_speeds & I40E_LINK_SPEED_10GB)
1060 		return IF_Gbps(10);
1061 	if (link_speeds & I40E_LINK_SPEED_1GB)
1062 		return IF_Gbps(1);
1063 	if (link_speeds & I40E_LINK_SPEED_100MB)
1064 		return IF_Mbps(100);
1065 	else
1066 		/* Minimum supported link speed */
1067 		return IF_Mbps(100);
1068 }
1069 
1070 /*
1071 ** Run when the Admin Queue gets a link state change interrupt.
1072 */
1073 void
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1075 {
1076 	struct i40e_hw *hw = &pf->hw;
1077 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 	struct i40e_aqc_get_link_status *status =
1079 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1080 
1081 	/* Request link status from adapter */
1082 	hw->phy.get_link_info = TRUE;
1083 	i40e_get_link_status(hw, &pf->link_up);
1084 
1085 	/* Print out message if an unqualified module is found */
1086 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 	    (pf->advertised_speed) &&
1088 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1090 		device_printf(dev, "Link failed because "
1091 		    "an unqualified module was detected!\n");
1092 
1093 	/* OS link info is updated elsewhere */
1094 }
1095 
1096 /*********************************************************************
1097  *
1098  *  Get Firmware Switch configuration
1099  *	- this will need to be more robust when more complex
1100  *	  switch configurations are enabled.
1101  *
1102  **********************************************************************/
1103 int
1104 ixl_switch_config(struct ixl_pf *pf)
1105 {
1106 	struct i40e_hw	*hw = &pf->hw;
1107 	struct ixl_vsi	*vsi = &pf->vsi;
1108 	device_t 	dev = iflib_get_dev(vsi->ctx);
1109 	struct i40e_aqc_get_switch_config_resp *sw_config;
1110 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1111 	int	ret;
1112 	u16	next = 0;
1113 
1114 	memset(&aq_buf, 0, sizeof(aq_buf));
1115 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 	ret = i40e_aq_get_switch_config(hw, sw_config,
1117 	    sizeof(aq_buf), &next, NULL);
1118 	if (ret) {
1119 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1121 		return (ret);
1122 	}
1123 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1124 		device_printf(dev,
1125 		    "Switch config: header reported: %d in structure, %d total\n",
1126 		    sw_config->header.num_reported, sw_config->header.num_total);
1127 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1128 			device_printf(dev,
1129 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 			    sw_config->element[i].element_type,
1131 			    sw_config->element[i].seid,
1132 			    sw_config->element[i].uplink_seid,
1133 			    sw_config->element[i].downlink_seid);
1134 		}
1135 	}
1136 	/* Simplified due to a single VSI */
1137 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 	vsi->seid = sw_config->element[0].seid;
1140 	return (ret);
1141 }
1142 
1143 /*********************************************************************
1144  *
1145  *  Initialize the VSI:  this handles contexts, which means things
1146  *  			 like the number of descriptors, buffer size,
1147  *			 plus we init the rings thru this function.
1148  *
1149  **********************************************************************/
1150 int
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1152 {
1153 	struct ixl_pf *pf = vsi->back;
1154 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1155 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1156 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1157 	device_t		dev = iflib_get_dev(vsi->ctx);
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct i40e_vsi_context	ctxt;
1160 	int 			tc_queues;
1161 	int			err = 0;
1162 
1163 	memset(&ctxt, 0, sizeof(ctxt));
1164 	ctxt.seid = vsi->seid;
1165 	if (pf->veb_seid != 0)
1166 		ctxt.uplink_seid = pf->veb_seid;
1167 	ctxt.pf_num = hw->pf_id;
1168 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1169 	if (err) {
1170 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1172 		return (err);
1173 	}
1174 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 	    ctxt.uplink_seid, ctxt.vsi_number,
1179 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1182 	/*
1183 	** Set the queue and traffic class bits
1184 	**  - when multiple traffic classes are supported
1185 	**    this will need to be more robust.
1186 	*/
1187 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 	ctxt.info.queue_mapping[0] = 0;
1191 	/*
1192 	 * This VSI will only use traffic class 0; start traffic class 0's
1193 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 	 * the driver may not use all of them).
1195 	 */
1196 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1201 
1202 	/* Set VLAN receive stripping mode */
1203 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1207 	else
1208 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1209 
1210 #ifdef IXL_IW
1211 	/* Set TCP Enable for iWARP capable VSI */
1212 	if (ixl_enable_iwarp && pf->iw_enabled) {
1213 		ctxt.info.valid_sections |=
1214 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1216 	}
1217 #endif
1218 	/* Save VSI number and info for use later */
1219 	vsi->vsi_num = ctxt.vsi_number;
1220 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1221 
1222 	/* Reset VSI statistics */
1223 	ixl_vsi_reset_stats(vsi);
1224 	vsi->hw_filters_add = 0;
1225 	vsi->hw_filters_del = 0;
1226 
1227 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1228 
1229 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1230 	if (err) {
1231 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1233 		return (err);
1234 	}
1235 
1236 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 		struct tx_ring		*txr = &tx_que->txr;
1238 		struct i40e_hmc_obj_txq tctx;
1239 		u32			txctl;
1240 
1241 		/* Setup the HMC TX Context  */
1242 		bzero(&tctx, sizeof(tctx));
1243 		tctx.new_context = 1;
1244 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 		tctx.qlen = scctx->isc_ntxd[0];
1246 		tctx.fc_ena = 0;	/* Disable FCoE */
1247 		/*
1248 		 * This value needs to pulled from the VSI that this queue
1249 		 * is assigned to. Index into array is traffic class.
1250 		 */
1251 		tctx.rdylist = vsi->info.qs_handle[0];
1252 		/*
1253 		 * Set these to enable Head Writeback
1254 		 * - Address is last entry in TX ring (reserved for HWB index)
1255 		 * Leave these as 0 for Descriptor Writeback
1256 		 */
1257 		if (vsi->enable_head_writeback) {
1258 			tctx.head_wb_ena = 1;
1259 			tctx.head_wb_addr = txr->tx_paddr +
1260 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1261 		} else {
1262 			tctx.head_wb_ena = 0;
1263 			tctx.head_wb_addr = 0;
1264 		}
1265 		tctx.rdylist_act = 0;
1266 		err = i40e_clear_lan_tx_queue_context(hw, i);
1267 		if (err) {
1268 			device_printf(dev, "Unable to clear TX context\n");
1269 			break;
1270 		}
1271 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1272 		if (err) {
1273 			device_printf(dev, "Unable to set TX context\n");
1274 			break;
1275 		}
1276 		/* Associate the ring with this PF */
1277 		txctl = I40E_QTX_CTL_PF_QUEUE;
1278 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 		    I40E_QTX_CTL_PF_INDX_MASK);
1280 		wr32(hw, I40E_QTX_CTL(i), txctl);
1281 		ixl_flush(hw);
1282 
1283 		/* Do ring (re)init */
1284 		ixl_init_tx_ring(vsi, tx_que);
1285 	}
1286 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 		struct rx_ring 		*rxr = &rx_que->rxr;
1288 		struct i40e_hmc_obj_rxq rctx;
1289 
1290 		/* Next setup the HMC RX Context  */
1291 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1292 
1293 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1294 
1295 		/* Set up an RX context for the HMC */
1296 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1297 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1298 		/* ignore header split for now */
1299 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1300 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1301 		    scctx->isc_max_frame_size : max_rxmax;
1302 		rctx.dtype = 0;
1303 		rctx.dsize = 1;		/* do 32byte descriptors */
1304 		rctx.hsplit_0 = 0;	/* no header split */
1305 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1306 		rctx.qlen = scctx->isc_nrxd[0];
1307 		rctx.tphrdesc_ena = 1;
1308 		rctx.tphwdesc_ena = 1;
1309 		rctx.tphdata_ena = 0;	/* Header Split related */
1310 		rctx.tphhead_ena = 0;	/* Header Split related */
1311 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1312 		rctx.crcstrip = 1;
1313 		rctx.l2tsel = 1;
1314 		rctx.showiv = 1;	/* Strip inner VLAN header */
1315 		rctx.fc_ena = 0;	/* Disable FCoE */
1316 		rctx.prefena = 1;	/* Prefetch descriptors */
1317 
1318 		err = i40e_clear_lan_rx_queue_context(hw, i);
1319 		if (err) {
1320 			device_printf(dev,
1321 			    "Unable to clear RX context %d\n", i);
1322 			break;
1323 		}
1324 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1325 		if (err) {
1326 			device_printf(dev, "Unable to set RX context %d\n", i);
1327 			break;
1328 		}
1329 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1330 	}
1331 	return (err);
1332 }
1333 
1334 void
1335 ixl_free_mac_filters(struct ixl_vsi *vsi)
1336 {
1337 	struct ixl_mac_filter *f;
1338 
1339 	while (!SLIST_EMPTY(&vsi->ftl)) {
1340 		f = SLIST_FIRST(&vsi->ftl);
1341 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1342 		free(f, M_DEVBUF);
1343 	}
1344 }
1345 
1346 /*
1347 ** Provide a update to the queue RX
1348 ** interrupt moderation value.
1349 */
1350 void
1351 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1352 {
1353 	struct ixl_vsi	*vsi = que->vsi;
1354 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1355 	struct i40e_hw	*hw = vsi->hw;
1356 	struct rx_ring	*rxr = &que->rxr;
1357 	u16		rx_itr;
1358 	u16		rx_latency = 0;
1359 	int		rx_bytes;
1360 
1361 	/* Idle, do nothing */
1362 	if (rxr->bytes == 0)
1363 		return;
1364 
1365 	if (pf->dynamic_rx_itr) {
1366 		rx_bytes = rxr->bytes/rxr->itr;
1367 		rx_itr = rxr->itr;
1368 
1369 		/* Adjust latency range */
1370 		switch (rxr->latency) {
1371 		case IXL_LOW_LATENCY:
1372 			if (rx_bytes > 10) {
1373 				rx_latency = IXL_AVE_LATENCY;
1374 				rx_itr = IXL_ITR_20K;
1375 			}
1376 			break;
1377 		case IXL_AVE_LATENCY:
1378 			if (rx_bytes > 20) {
1379 				rx_latency = IXL_BULK_LATENCY;
1380 				rx_itr = IXL_ITR_8K;
1381 			} else if (rx_bytes <= 10) {
1382 				rx_latency = IXL_LOW_LATENCY;
1383 				rx_itr = IXL_ITR_100K;
1384 			}
1385 			break;
1386 		case IXL_BULK_LATENCY:
1387 			if (rx_bytes <= 20) {
1388 				rx_latency = IXL_AVE_LATENCY;
1389 				rx_itr = IXL_ITR_20K;
1390 			}
1391 			break;
1392        		 }
1393 
1394 		rxr->latency = rx_latency;
1395 
1396 		if (rx_itr != rxr->itr) {
1397 			/* do an exponential smoothing */
1398 			rx_itr = (10 * rx_itr * rxr->itr) /
1399 			    ((9 * rx_itr) + rxr->itr);
1400 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1401 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1402 			    rxr->me), rxr->itr);
1403 		}
1404 	} else { /* We may have have toggled to non-dynamic */
1405 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1406 			vsi->rx_itr_setting = pf->rx_itr;
1407 		/* Update the hardware if needed */
1408 		if (rxr->itr != vsi->rx_itr_setting) {
1409 			rxr->itr = vsi->rx_itr_setting;
1410 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1411 			    rxr->me), rxr->itr);
1412 		}
1413 	}
1414 	rxr->bytes = 0;
1415 	rxr->packets = 0;
1416 }
1417 
1418 
1419 /*
1420 ** Provide a update to the queue TX
1421 ** interrupt moderation value.
1422 */
1423 void
1424 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1425 {
1426 	struct ixl_vsi	*vsi = que->vsi;
1427 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1428 	struct i40e_hw	*hw = vsi->hw;
1429 	struct tx_ring	*txr = &que->txr;
1430 	u16		tx_itr;
1431 	u16		tx_latency = 0;
1432 	int		tx_bytes;
1433 
1434 
1435 	/* Idle, do nothing */
1436 	if (txr->bytes == 0)
1437 		return;
1438 
1439 	if (pf->dynamic_tx_itr) {
1440 		tx_bytes = txr->bytes/txr->itr;
1441 		tx_itr = txr->itr;
1442 
1443 		switch (txr->latency) {
1444 		case IXL_LOW_LATENCY:
1445 			if (tx_bytes > 10) {
1446 				tx_latency = IXL_AVE_LATENCY;
1447 				tx_itr = IXL_ITR_20K;
1448 			}
1449 			break;
1450 		case IXL_AVE_LATENCY:
1451 			if (tx_bytes > 20) {
1452 				tx_latency = IXL_BULK_LATENCY;
1453 				tx_itr = IXL_ITR_8K;
1454 			} else if (tx_bytes <= 10) {
1455 				tx_latency = IXL_LOW_LATENCY;
1456 				tx_itr = IXL_ITR_100K;
1457 			}
1458 			break;
1459 		case IXL_BULK_LATENCY:
1460 			if (tx_bytes <= 20) {
1461 				tx_latency = IXL_AVE_LATENCY;
1462 				tx_itr = IXL_ITR_20K;
1463 			}
1464 			break;
1465 		}
1466 
1467 		txr->latency = tx_latency;
1468 
1469 		if (tx_itr != txr->itr) {
1470        	         /* do an exponential smoothing */
1471 			tx_itr = (10 * tx_itr * txr->itr) /
1472 			    ((9 * tx_itr) + txr->itr);
1473 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1474 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1475 			    txr->me), txr->itr);
1476 		}
1477 
1478 	} else { /* We may have have toggled to non-dynamic */
1479 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1480 			vsi->tx_itr_setting = pf->tx_itr;
1481 		/* Update the hardware if needed */
1482 		if (txr->itr != vsi->tx_itr_setting) {
1483 			txr->itr = vsi->tx_itr_setting;
1484 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1485 			    txr->me), txr->itr);
1486 		}
1487 	}
1488 	txr->bytes = 0;
1489 	txr->packets = 0;
1490 	return;
1491 }
1492 
1493 #ifdef IXL_DEBUG
1494 /**
1495  * ixl_sysctl_qtx_tail_handler
1496  * Retrieves I40E_QTX_TAIL value from hardware
1497  * for a sysctl.
1498  */
1499 int
1500 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1501 {
1502 	struct ixl_tx_queue *tx_que;
1503 	int error;
1504 	u32 val;
1505 
1506 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1507 	if (!tx_que) return 0;
1508 
1509 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1510 	error = sysctl_handle_int(oidp, &val, 0, req);
1511 	if (error || !req->newptr)
1512 		return error;
1513 	return (0);
1514 }
1515 
1516 /**
1517  * ixl_sysctl_qrx_tail_handler
1518  * Retrieves I40E_QRX_TAIL value from hardware
1519  * for a sysctl.
1520  */
1521 int
1522 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1523 {
1524 	struct ixl_rx_queue *rx_que;
1525 	int error;
1526 	u32 val;
1527 
1528 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1529 	if (!rx_que) return 0;
1530 
1531 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1532 	error = sysctl_handle_int(oidp, &val, 0, req);
1533 	if (error || !req->newptr)
1534 		return error;
1535 	return (0);
1536 }
1537 #endif
1538 
1539 /*
1540  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1541  * Writes to the ITR registers immediately.
1542  */
1543 static int
1544 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1545 {
1546 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1547 	device_t dev = pf->dev;
1548 	int error = 0;
1549 	int requested_tx_itr;
1550 
1551 	requested_tx_itr = pf->tx_itr;
1552 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1553 	if ((error) || (req->newptr == NULL))
1554 		return (error);
1555 	if (pf->dynamic_tx_itr) {
1556 		device_printf(dev,
1557 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1558 		    return (EINVAL);
1559 	}
1560 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1561 		device_printf(dev,
1562 		    "Invalid TX itr value; value must be between 0 and %d\n",
1563 		        IXL_MAX_ITR);
1564 		return (EINVAL);
1565 	}
1566 
1567 	pf->tx_itr = requested_tx_itr;
1568 	ixl_configure_tx_itr(pf);
1569 
1570 	return (error);
1571 }
1572 
1573 /*
1574  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1575  * Writes to the ITR registers immediately.
1576  */
1577 static int
1578 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1579 {
1580 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1581 	device_t dev = pf->dev;
1582 	int error = 0;
1583 	int requested_rx_itr;
1584 
1585 	requested_rx_itr = pf->rx_itr;
1586 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1587 	if ((error) || (req->newptr == NULL))
1588 		return (error);
1589 	if (pf->dynamic_rx_itr) {
1590 		device_printf(dev,
1591 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1592 		    return (EINVAL);
1593 	}
1594 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1595 		device_printf(dev,
1596 		    "Invalid RX itr value; value must be between 0 and %d\n",
1597 		        IXL_MAX_ITR);
1598 		return (EINVAL);
1599 	}
1600 
1601 	pf->rx_itr = requested_rx_itr;
1602 	ixl_configure_rx_itr(pf);
1603 
1604 	return (error);
1605 }
1606 
1607 void
1608 ixl_add_hw_stats(struct ixl_pf *pf)
1609 {
1610 	struct ixl_vsi *vsi = &pf->vsi;
1611 	device_t dev = iflib_get_dev(vsi->ctx);
1612 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1613 
1614 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1615 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1616 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1617 
1618 	/* Driver statistics */
1619 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1620 			CTLFLAG_RD, &pf->admin_irq,
1621 			"Admin Queue IRQs received");
1622 
1623 	ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1624 
1625 	ixl_add_queues_sysctls(dev, vsi);
1626 
1627 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1628 }
1629 
1630 void
1631 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1632 	struct sysctl_oid_list *child,
1633 	struct i40e_hw_port_stats *stats)
1634 {
1635 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1636 	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1637 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1638 
1639 	struct i40e_eth_stats *eth_stats = &stats->eth;
1640 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1641 
1642 	struct ixl_sysctl_info ctls[] =
1643 	{
1644 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1645 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1646 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1647 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1648 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1649 		/* Packet Reception Stats */
1650 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1651 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1652 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1653 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1654 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1655 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1656 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1657 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1658 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1659 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1660 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1661 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1662 		/* Packet Transmission Stats */
1663 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1664 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1665 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1666 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1667 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1668 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1669 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1670 		/* Flow control */
1671 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1672 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1673 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1674 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1675 		/* End */
1676 		{0,0,0}
1677 	};
1678 
1679 	struct ixl_sysctl_info *entry = ctls;
1680 	while (entry->stat != 0)
1681 	{
1682 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1683 				CTLFLAG_RD, entry->stat,
1684 				entry->description);
1685 		entry++;
1686 	}
1687 }
1688 
1689 void
1690 ixl_set_rss_key(struct ixl_pf *pf)
1691 {
1692 	struct i40e_hw *hw = &pf->hw;
1693 	struct ixl_vsi *vsi = &pf->vsi;
1694 	device_t	dev = pf->dev;
1695 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1696 	enum i40e_status_code status;
1697 
1698 #ifdef RSS
1699         /* Fetch the configured RSS key */
1700         rss_getkey((uint8_t *) &rss_seed);
1701 #else
1702 	ixl_get_default_rss_key(rss_seed);
1703 #endif
1704 	/* Fill out hash function seed */
1705 	if (hw->mac.type == I40E_MAC_X722) {
1706 		struct i40e_aqc_get_set_rss_key_data key_data;
1707 		bcopy(rss_seed, &key_data, 52);
1708 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1709 		if (status)
1710 			device_printf(dev,
1711 			    "i40e_aq_set_rss_key status %s, error %s\n",
1712 			    i40e_stat_str(hw, status),
1713 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1714 	} else {
1715 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1716 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1717 	}
1718 }
1719 
1720 /*
1721  * Configure enabled PCTYPES for RSS.
1722  */
1723 void
1724 ixl_set_rss_pctypes(struct ixl_pf *pf)
1725 {
1726 	struct i40e_hw *hw = &pf->hw;
1727 	u64		set_hena = 0, hena;
1728 
1729 #ifdef RSS
1730 	u32		rss_hash_config;
1731 
1732 	rss_hash_config = rss_gethashconfig();
1733 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1734                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1735 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1736                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1737 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1738                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1739 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1740                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1741 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1742 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1743 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1744                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1745         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1746                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1747 #else
1748 	if (hw->mac.type == I40E_MAC_X722)
1749 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1750 	else
1751 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1752 #endif
1753 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1754 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1755 	hena |= set_hena;
1756 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1757 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1758 
1759 }
1760 
1761 void
1762 ixl_set_rss_hlut(struct ixl_pf *pf)
1763 {
1764 	struct i40e_hw	*hw = &pf->hw;
1765 	struct ixl_vsi *vsi = &pf->vsi;
1766 	device_t	dev = iflib_get_dev(vsi->ctx);
1767 	int		i, que_id;
1768 	int		lut_entry_width;
1769 	u32		lut = 0;
1770 	enum i40e_status_code status;
1771 
1772 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1773 
1774 	/* Populate the LUT with max no. of queues in round robin fashion */
1775 	u8 hlut_buf[512];
1776 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1777 #ifdef RSS
1778 		/*
1779 		 * Fetch the RSS bucket id for the given indirection entry.
1780 		 * Cap it at the number of configured buckets (which is
1781 		 * num_queues.)
1782 		 */
1783 		que_id = rss_get_indirection_to_bucket(i);
1784 		que_id = que_id % vsi->num_rx_queues;
1785 #else
1786 		que_id = i % vsi->num_rx_queues;
1787 #endif
1788 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1789 		hlut_buf[i] = lut;
1790 	}
1791 
1792 	if (hw->mac.type == I40E_MAC_X722) {
1793 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1794 		if (status)
1795 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1796 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1797 	} else {
1798 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1799 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1800 		ixl_flush(hw);
1801 	}
1802 }
1803 
1804 /*
1805 ** Setup the PF's RSS parameters.
1806 */
1807 void
1808 ixl_config_rss(struct ixl_pf *pf)
1809 {
1810 	ixl_set_rss_key(pf);
1811 	ixl_set_rss_pctypes(pf);
1812 	ixl_set_rss_hlut(pf);
1813 }
1814 
1815 /*
1816 ** This routine updates vlan filters, called by init
1817 ** it scans the filter table and then updates the hw
1818 ** after a soft reset.
1819 */
1820 void
1821 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1822 {
1823 	struct ixl_mac_filter	*f;
1824 	int			cnt = 0, flags;
1825 
1826 	if (vsi->num_vlans == 0)
1827 		return;
1828 	/*
1829 	** Scan the filter list for vlan entries,
1830 	** mark them for addition and then call
1831 	** for the AQ update.
1832 	*/
1833 	SLIST_FOREACH(f, &vsi->ftl, next) {
1834 		if (f->flags & IXL_FILTER_VLAN) {
1835 			f->flags |=
1836 			    (IXL_FILTER_ADD |
1837 			    IXL_FILTER_USED);
1838 			cnt++;
1839 		}
1840 	}
1841 	if (cnt == 0) {
1842 		printf("setup vlan: no filters found!\n");
1843 		return;
1844 	}
1845 	flags = IXL_FILTER_VLAN;
1846 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1847 	ixl_add_hw_filters(vsi, flags, cnt);
1848 }
1849 
1850 /*
1851  * In some firmware versions there is default MAC/VLAN filter
1852  * configured which interferes with filters managed by driver.
1853  * Make sure it's removed.
1854  */
1855 void
1856 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1857 {
1858 	struct i40e_aqc_remove_macvlan_element_data e;
1859 
1860 	bzero(&e, sizeof(e));
1861 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1862 	e.vlan_tag = 0;
1863 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1864 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1865 
1866 	bzero(&e, sizeof(e));
1867 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1868 	e.vlan_tag = 0;
1869 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1870 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1871 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1872 }
1873 
1874 /*
1875 ** Initialize filter list and add filters that the hardware
1876 ** needs to know about.
1877 **
1878 ** Requires VSI's filter list & seid to be set before calling.
1879 */
1880 void
1881 ixl_init_filters(struct ixl_vsi *vsi)
1882 {
1883 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1884 
1885 	/* Initialize mac filter list for VSI */
1886 	SLIST_INIT(&vsi->ftl);
1887 
1888 	/* Receive broadcast Ethernet frames */
1889 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1890 
1891 	ixl_del_default_hw_filters(vsi);
1892 
1893 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1894 	/*
1895 	 * Prevent Tx flow control frames from being sent out by
1896 	 * non-firmware transmitters.
1897 	 * This affects every VSI in the PF.
1898 	 */
1899 	if (pf->enable_tx_fc_filter)
1900 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1901 }
1902 
1903 /*
1904 ** This routine adds mulicast filters
1905 */
1906 void
1907 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1908 {
1909 	struct ixl_mac_filter *f;
1910 
1911 	/* Does one already exist */
1912 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1913 	if (f != NULL)
1914 		return;
1915 
1916 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1917 	if (f != NULL)
1918 		f->flags |= IXL_FILTER_MC;
1919 	else
1920 		printf("WARNING: no filter available!!\n");
1921 }
1922 
1923 void
1924 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1925 {
1926 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1927 }
1928 
1929 /*
1930  * This routine adds a MAC/VLAN filter to the software filter
1931  * list, then adds that new filter to the HW if it doesn't already
1932  * exist in the SW filter list.
1933  */
1934 void
1935 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1936 {
1937 	struct ixl_mac_filter	*f, *tmp;
1938 	struct ixl_pf		*pf;
1939 	device_t		dev;
1940 
1941 	DEBUGOUT("ixl_add_filter: begin");
1942 
1943 	pf = vsi->back;
1944 	dev = pf->dev;
1945 
1946 	/* Does one already exist */
1947 	f = ixl_find_filter(vsi, macaddr, vlan);
1948 	if (f != NULL)
1949 		return;
1950 	/*
1951 	** Is this the first vlan being registered, if so we
1952 	** need to remove the ANY filter that indicates we are
1953 	** not in a vlan, and replace that with a 0 filter.
1954 	*/
1955 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1956 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1957 		if (tmp != NULL) {
1958 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 			ixl_add_filter(vsi, macaddr, 0);
1960 		}
1961 	}
1962 
1963 	f = ixl_new_filter(vsi, macaddr, vlan);
1964 	if (f == NULL) {
1965 		device_printf(dev, "WARNING: no filter available!!\n");
1966 		return;
1967 	}
1968 	if (f->vlan != IXL_VLAN_ANY)
1969 		f->flags |= IXL_FILTER_VLAN;
1970 	else
1971 		vsi->num_macs++;
1972 
1973 	f->flags |= IXL_FILTER_USED;
1974 	ixl_add_hw_filters(vsi, f->flags, 1);
1975 }
1976 
1977 void
1978 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1979 {
1980 	struct ixl_mac_filter *f;
1981 
1982 	f = ixl_find_filter(vsi, macaddr, vlan);
1983 	if (f == NULL)
1984 		return;
1985 
1986 	f->flags |= IXL_FILTER_DEL;
1987 	ixl_del_hw_filters(vsi, 1);
1988 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1989 		vsi->num_macs--;
1990 
1991 	/* Check if this is the last vlan removal */
1992 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1993 		/* Switch back to a non-vlan filter */
1994 		ixl_del_filter(vsi, macaddr, 0);
1995 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1996 	}
1997 	return;
1998 }
1999 
2000 /*
2001 ** Find the filter with both matching mac addr and vlan id
2002 */
2003 struct ixl_mac_filter *
2004 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2005 {
2006 	struct ixl_mac_filter	*f;
2007 
2008 	SLIST_FOREACH(f, &vsi->ftl, next) {
2009 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2010 		    && (f->vlan == vlan)) {
2011 			return (f);
2012 		}
2013 	}
2014 
2015 	return (NULL);
2016 }
2017 
2018 /*
2019 ** This routine takes additions to the vsi filter
2020 ** table and creates an Admin Queue call to create
2021 ** the filters in the hardware.
2022 */
2023 void
2024 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2025 {
2026 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2027 	struct ixl_mac_filter	*f;
2028 	struct ixl_pf		*pf;
2029 	struct i40e_hw		*hw;
2030 	device_t		dev;
2031 	enum i40e_status_code	status;
2032 	int			j = 0;
2033 
2034 	pf = vsi->back;
2035 	dev = vsi->dev;
2036 	hw = &pf->hw;
2037 
2038 	if (cnt < 1) {
2039 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2040 		return;
2041 	}
2042 
2043 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2044 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2045 	if (a == NULL) {
2046 		device_printf(dev, "add_hw_filters failed to get memory\n");
2047 		return;
2048 	}
2049 
2050 	/*
2051 	** Scan the filter list, each time we find one
2052 	** we add it to the admin queue array and turn off
2053 	** the add bit.
2054 	*/
2055 	SLIST_FOREACH(f, &vsi->ftl, next) {
2056 		if ((f->flags & flags) == flags) {
2057 			b = &a[j]; // a pox on fvl long names :)
2058 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2059 			if (f->vlan == IXL_VLAN_ANY) {
2060 				b->vlan_tag = 0;
2061 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2062 			} else {
2063 				b->vlan_tag = f->vlan;
2064 				b->flags = 0;
2065 			}
2066 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2067 			f->flags &= ~IXL_FILTER_ADD;
2068 			j++;
2069 
2070 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2071 			    MAC_FORMAT_ARGS(f->macaddr));
2072 		}
2073 		if (j == cnt)
2074 			break;
2075 	}
2076 	if (j > 0) {
2077 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2078 		if (status)
2079 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2080 			    "error %s\n", i40e_stat_str(hw, status),
2081 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2082 		else
2083 			vsi->hw_filters_add += j;
2084 	}
2085 	free(a, M_DEVBUF);
2086 	return;
2087 }
2088 
2089 /*
2090 ** This routine takes removals in the vsi filter
2091 ** table and creates an Admin Queue call to delete
2092 ** the filters in the hardware.
2093 */
2094 void
2095 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2096 {
2097 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2098 	struct ixl_pf		*pf;
2099 	struct i40e_hw		*hw;
2100 	device_t		dev;
2101 	struct ixl_mac_filter	*f, *f_temp;
2102 	enum i40e_status_code	status;
2103 	int			j = 0;
2104 
2105 	pf = vsi->back;
2106 	hw = &pf->hw;
2107 	dev = vsi->dev;
2108 
2109 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2110 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2111 	if (d == NULL) {
2112 		device_printf(dev, "%s: failed to get memory\n", __func__);
2113 		return;
2114 	}
2115 
2116 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2117 		if (f->flags & IXL_FILTER_DEL) {
2118 			e = &d[j]; // a pox on fvl long names :)
2119 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2120 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2121 			if (f->vlan == IXL_VLAN_ANY) {
2122 				e->vlan_tag = 0;
2123 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2124 			} else {
2125 				e->vlan_tag = f->vlan;
2126 			}
2127 
2128 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2129 			    MAC_FORMAT_ARGS(f->macaddr));
2130 
2131 			/* delete entry from vsi list */
2132 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2133 			free(f, M_DEVBUF);
2134 			j++;
2135 		}
2136 		if (j == cnt)
2137 			break;
2138 	}
2139 	if (j > 0) {
2140 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2141 		if (status) {
2142 			int sc = 0;
2143 			for (int i = 0; i < j; i++)
2144 				sc += (!d[i].error_code);
2145 			vsi->hw_filters_del += sc;
2146 			device_printf(dev,
2147 			    "Failed to remove %d/%d filters, error %s\n",
2148 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2149 		} else
2150 			vsi->hw_filters_del += j;
2151 	}
2152 	free(d, M_DEVBUF);
2153 	return;
2154 }
2155 
2156 int
2157 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2158 {
2159 	struct i40e_hw	*hw = &pf->hw;
2160 	int		error = 0;
2161 	u32		reg;
2162 	u16		pf_qidx;
2163 
2164 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2165 
2166 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2167 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2168 	    pf_qidx, vsi_qidx);
2169 
2170 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2171 
2172 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2173 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2174 	    I40E_QTX_ENA_QENA_STAT_MASK;
2175 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2176 	/* Verify the enable took */
2177 	for (int j = 0; j < 10; j++) {
2178 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2179 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2180 			break;
2181 		i40e_usec_delay(10);
2182 	}
2183 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2184 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2185 		    pf_qidx);
2186 		error = ETIMEDOUT;
2187 	}
2188 
2189 	return (error);
2190 }
2191 
2192 int
2193 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2194 {
2195 	struct i40e_hw	*hw = &pf->hw;
2196 	int		error = 0;
2197 	u32		reg;
2198 	u16		pf_qidx;
2199 
2200 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2201 
2202 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2203 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2204 	    pf_qidx, vsi_qidx);
2205 
2206 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2207 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2208 	    I40E_QRX_ENA_QENA_STAT_MASK;
2209 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2210 	/* Verify the enable took */
2211 	for (int j = 0; j < 10; j++) {
2212 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2213 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2214 			break;
2215 		i40e_usec_delay(10);
2216 	}
2217 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2218 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2219 		    pf_qidx);
2220 		error = ETIMEDOUT;
2221 	}
2222 
2223 	return (error);
2224 }
2225 
2226 int
2227 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2228 {
2229 	int error = 0;
2230 
2231 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2232 	/* Called function already prints error message */
2233 	if (error)
2234 		return (error);
2235 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2236 	return (error);
2237 }
2238 
2239 /* For PF VSI only */
2240 int
2241 ixl_enable_rings(struct ixl_vsi *vsi)
2242 {
2243 	struct ixl_pf	*pf = vsi->back;
2244 	int		error = 0;
2245 
2246 	for (int i = 0; i < vsi->num_tx_queues; i++)
2247 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2248 
2249 	for (int i = 0; i < vsi->num_rx_queues; i++)
2250 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2251 
2252 	return (error);
2253 }
2254 
2255 /*
2256  * Returns error on first ring that is detected hung.
2257  */
2258 int
2259 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2260 {
2261 	struct i40e_hw	*hw = &pf->hw;
2262 	int		error = 0;
2263 	u32		reg;
2264 	u16		pf_qidx;
2265 
2266 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2267 
2268 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2269 	i40e_usec_delay(500);
2270 
2271 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2272 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2273 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2274 	/* Verify the disable took */
2275 	for (int j = 0; j < 10; j++) {
2276 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2277 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2278 			break;
2279 		i40e_msec_delay(10);
2280 	}
2281 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2282 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2283 		    pf_qidx);
2284 		error = ETIMEDOUT;
2285 	}
2286 
2287 	return (error);
2288 }
2289 
2290 /*
2291  * Returns error on first ring that is detected hung.
2292  */
2293 int
2294 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2295 {
2296 	struct i40e_hw	*hw = &pf->hw;
2297 	int		error = 0;
2298 	u32		reg;
2299 	u16		pf_qidx;
2300 
2301 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2302 
2303 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2304 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2305 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2306 	/* Verify the disable took */
2307 	for (int j = 0; j < 10; j++) {
2308 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2309 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2310 			break;
2311 		i40e_msec_delay(10);
2312 	}
2313 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2314 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2315 		    pf_qidx);
2316 		error = ETIMEDOUT;
2317 	}
2318 
2319 	return (error);
2320 }
2321 
2322 int
2323 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2324 {
2325 	int error = 0;
2326 
2327 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2328 	/* Called function already prints error message */
2329 	if (error)
2330 		return (error);
2331 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2332 	return (error);
2333 }
2334 
2335 int
2336 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2337 {
2338 	int error = 0;
2339 
2340 	for (int i = 0; i < vsi->num_tx_queues; i++)
2341 		error = ixl_disable_tx_ring(pf, qtag, i);
2342 
2343 	for (int i = 0; i < vsi->num_rx_queues; i++)
2344 		error = ixl_disable_rx_ring(pf, qtag, i);
2345 
2346 	return (error);
2347 }
2348 
2349 static void
2350 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2351 {
2352 	struct i40e_hw *hw = &pf->hw;
2353 	device_t dev = pf->dev;
2354 	struct ixl_vf *vf;
2355 	bool mdd_detected = false;
2356 	bool pf_mdd_detected = false;
2357 	bool vf_mdd_detected = false;
2358 	u16 vf_num, queue;
2359 	u8 pf_num, event;
2360 	u8 pf_mdet_num, vp_mdet_num;
2361 	u32 reg;
2362 
2363 	/* find what triggered the MDD event */
2364 	reg = rd32(hw, I40E_GL_MDET_TX);
2365 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2366 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2367 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
2368 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2369 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
2370 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2371 		    I40E_GL_MDET_TX_EVENT_SHIFT;
2372 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2373 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
2374 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2375 		mdd_detected = true;
2376 	}
2377 
2378 	if (!mdd_detected)
2379 		return;
2380 
2381 	reg = rd32(hw, I40E_PF_MDET_TX);
2382 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2383 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2384 		pf_mdet_num = hw->pf_id;
2385 		pf_mdd_detected = true;
2386 	}
2387 
2388 	/* Check if MDD was caused by a VF */
2389 	for (int i = 0; i < pf->num_vfs; i++) {
2390 		vf = &(pf->vfs[i]);
2391 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2392 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2393 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2394 			vp_mdet_num = i;
2395 			vf->num_mdd_events++;
2396 			vf_mdd_detected = true;
2397 		}
2398 	}
2399 
2400 	/* Print out an error message */
2401 	if (vf_mdd_detected && pf_mdd_detected)
2402 		device_printf(dev,
2403 		    "Malicious Driver Detection event %d"
2404 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2405 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2406 	else if (vf_mdd_detected && !pf_mdd_detected)
2407 		device_printf(dev,
2408 		    "Malicious Driver Detection event %d"
2409 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2410 		    event, queue, pf_num, vf_num, vp_mdet_num);
2411 	else if (!vf_mdd_detected && pf_mdd_detected)
2412 		device_printf(dev,
2413 		    "Malicious Driver Detection event %d"
2414 		    " on TX queue %d, pf number %d (PF-%d)\n",
2415 		    event, queue, pf_num, pf_mdet_num);
2416 	/* Theoretically shouldn't happen */
2417 	else
2418 		device_printf(dev,
2419 		    "TX Malicious Driver Detection event (unknown)\n");
2420 }
2421 
2422 static void
2423 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2424 {
2425 	struct i40e_hw *hw = &pf->hw;
2426 	device_t dev = pf->dev;
2427 	struct ixl_vf *vf;
2428 	bool mdd_detected = false;
2429 	bool pf_mdd_detected = false;
2430 	bool vf_mdd_detected = false;
2431 	u16 queue;
2432 	u8 pf_num, event;
2433 	u8 pf_mdet_num, vp_mdet_num;
2434 	u32 reg;
2435 
2436 	/*
2437 	 * GL_MDET_RX doesn't contain VF number information, unlike
2438 	 * GL_MDET_TX.
2439 	 */
2440 	reg = rd32(hw, I40E_GL_MDET_RX);
2441 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2442 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2443 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
2444 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2445 		    I40E_GL_MDET_RX_EVENT_SHIFT;
2446 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2447 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
2448 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2449 		mdd_detected = true;
2450 	}
2451 
2452 	if (!mdd_detected)
2453 		return;
2454 
2455 	reg = rd32(hw, I40E_PF_MDET_RX);
2456 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2457 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2458 		pf_mdet_num = hw->pf_id;
2459 		pf_mdd_detected = true;
2460 	}
2461 
2462 	/* Check if MDD was caused by a VF */
2463 	for (int i = 0; i < pf->num_vfs; i++) {
2464 		vf = &(pf->vfs[i]);
2465 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2466 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2467 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2468 			vp_mdet_num = i;
2469 			vf->num_mdd_events++;
2470 			vf_mdd_detected = true;
2471 		}
2472 	}
2473 
2474 	/* Print out an error message */
2475 	if (vf_mdd_detected && pf_mdd_detected)
2476 		device_printf(dev,
2477 		    "Malicious Driver Detection event %d"
2478 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2479 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2480 	else if (vf_mdd_detected && !pf_mdd_detected)
2481 		device_printf(dev,
2482 		    "Malicious Driver Detection event %d"
2483 		    " on RX queue %d, pf number %d, (VF-%d)\n",
2484 		    event, queue, pf_num, vp_mdet_num);
2485 	else if (!vf_mdd_detected && pf_mdd_detected)
2486 		device_printf(dev,
2487 		    "Malicious Driver Detection event %d"
2488 		    " on RX queue %d, pf number %d (PF-%d)\n",
2489 		    event, queue, pf_num, pf_mdet_num);
2490 	/* Theoretically shouldn't happen */
2491 	else
2492 		device_printf(dev,
2493 		    "RX Malicious Driver Detection event (unknown)\n");
2494 }
2495 
2496 /**
2497  * ixl_handle_mdd_event
2498  *
2499  * Called from interrupt handler to identify possibly malicious vfs
2500  * (But also detects events from the PF, as well)
2501  **/
2502 void
2503 ixl_handle_mdd_event(struct ixl_pf *pf)
2504 {
2505 	struct i40e_hw *hw = &pf->hw;
2506 	u32 reg;
2507 
2508 	/*
2509 	 * Handle both TX/RX because it's possible they could
2510 	 * both trigger in the same interrupt.
2511 	 */
2512 	ixl_handle_tx_mdd_event(pf);
2513 	ixl_handle_rx_mdd_event(pf);
2514 
2515 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2516 
2517 	/* re-enable mdd interrupt cause */
2518 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2519 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2520 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2521 	ixl_flush(hw);
2522 }
2523 
2524 void
2525 ixl_enable_intr(struct ixl_vsi *vsi)
2526 {
2527 	struct i40e_hw		*hw = vsi->hw;
2528 	struct ixl_rx_queue	*que = vsi->rx_queues;
2529 
2530 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2531 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2532 			ixl_enable_queue(hw, que->rxr.me);
2533 	} else
2534 		ixl_enable_intr0(hw);
2535 }
2536 
2537 void
2538 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2539 {
2540 	struct i40e_hw		*hw = vsi->hw;
2541 	struct ixl_rx_queue	*que = vsi->rx_queues;
2542 
2543 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 		ixl_disable_queue(hw, que->rxr.me);
2545 }
2546 
2547 void
2548 ixl_enable_intr0(struct i40e_hw *hw)
2549 {
2550 	u32		reg;
2551 
2552 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2553 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2554 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2555 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2556 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2557 }
2558 
2559 void
2560 ixl_disable_intr0(struct i40e_hw *hw)
2561 {
2562 	u32		reg;
2563 
2564 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2565 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2566 	ixl_flush(hw);
2567 }
2568 
2569 void
2570 ixl_enable_queue(struct i40e_hw *hw, int id)
2571 {
2572 	u32		reg;
2573 
2574 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2575 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2576 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2577 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2578 }
2579 
2580 void
2581 ixl_disable_queue(struct i40e_hw *hw, int id)
2582 {
2583 	u32		reg;
2584 
2585 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2586 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2587 }
2588 
2589 void
2590 ixl_update_stats_counters(struct ixl_pf *pf)
2591 {
2592 	struct i40e_hw	*hw = &pf->hw;
2593 	struct ixl_vsi	*vsi = &pf->vsi;
2594 	struct ixl_vf	*vf;
2595 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2596 
2597 	struct i40e_hw_port_stats *nsd = &pf->stats;
2598 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2599 
2600 	/* Update hw stats */
2601 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2602 			   pf->stat_offsets_loaded,
2603 			   &osd->crc_errors, &nsd->crc_errors);
2604 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2605 			   pf->stat_offsets_loaded,
2606 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2607 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2608 			   I40E_GLPRT_GORCL(hw->port),
2609 			   pf->stat_offsets_loaded,
2610 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2611 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2612 			   I40E_GLPRT_GOTCL(hw->port),
2613 			   pf->stat_offsets_loaded,
2614 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2615 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2616 			   pf->stat_offsets_loaded,
2617 			   &osd->eth.rx_discards,
2618 			   &nsd->eth.rx_discards);
2619 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2620 			   I40E_GLPRT_UPRCL(hw->port),
2621 			   pf->stat_offsets_loaded,
2622 			   &osd->eth.rx_unicast,
2623 			   &nsd->eth.rx_unicast);
2624 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2625 			   I40E_GLPRT_UPTCL(hw->port),
2626 			   pf->stat_offsets_loaded,
2627 			   &osd->eth.tx_unicast,
2628 			   &nsd->eth.tx_unicast);
2629 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2630 			   I40E_GLPRT_MPRCL(hw->port),
2631 			   pf->stat_offsets_loaded,
2632 			   &osd->eth.rx_multicast,
2633 			   &nsd->eth.rx_multicast);
2634 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2635 			   I40E_GLPRT_MPTCL(hw->port),
2636 			   pf->stat_offsets_loaded,
2637 			   &osd->eth.tx_multicast,
2638 			   &nsd->eth.tx_multicast);
2639 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2640 			   I40E_GLPRT_BPRCL(hw->port),
2641 			   pf->stat_offsets_loaded,
2642 			   &osd->eth.rx_broadcast,
2643 			   &nsd->eth.rx_broadcast);
2644 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2645 			   I40E_GLPRT_BPTCL(hw->port),
2646 			   pf->stat_offsets_loaded,
2647 			   &osd->eth.tx_broadcast,
2648 			   &nsd->eth.tx_broadcast);
2649 
2650 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2651 			   pf->stat_offsets_loaded,
2652 			   &osd->tx_dropped_link_down,
2653 			   &nsd->tx_dropped_link_down);
2654 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2655 			   pf->stat_offsets_loaded,
2656 			   &osd->mac_local_faults,
2657 			   &nsd->mac_local_faults);
2658 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2659 			   pf->stat_offsets_loaded,
2660 			   &osd->mac_remote_faults,
2661 			   &nsd->mac_remote_faults);
2662 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2663 			   pf->stat_offsets_loaded,
2664 			   &osd->rx_length_errors,
2665 			   &nsd->rx_length_errors);
2666 
2667 	/* Flow control (LFC) stats */
2668 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2669 			   pf->stat_offsets_loaded,
2670 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2671 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2672 			   pf->stat_offsets_loaded,
2673 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2674 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2675 			   pf->stat_offsets_loaded,
2676 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2677 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2678 			   pf->stat_offsets_loaded,
2679 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2680 
2681 	/*
2682 	 * For watchdog management we need to know if we have been paused
2683 	 * during the last interval, so capture that here.
2684 	 */
2685 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2686 		vsi->shared->isc_pause_frames = 1;
2687 
2688 	/* Packet size stats rx */
2689 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2690 			   I40E_GLPRT_PRC64L(hw->port),
2691 			   pf->stat_offsets_loaded,
2692 			   &osd->rx_size_64, &nsd->rx_size_64);
2693 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2694 			   I40E_GLPRT_PRC127L(hw->port),
2695 			   pf->stat_offsets_loaded,
2696 			   &osd->rx_size_127, &nsd->rx_size_127);
2697 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2698 			   I40E_GLPRT_PRC255L(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->rx_size_255, &nsd->rx_size_255);
2701 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2702 			   I40E_GLPRT_PRC511L(hw->port),
2703 			   pf->stat_offsets_loaded,
2704 			   &osd->rx_size_511, &nsd->rx_size_511);
2705 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2706 			   I40E_GLPRT_PRC1023L(hw->port),
2707 			   pf->stat_offsets_loaded,
2708 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2709 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2710 			   I40E_GLPRT_PRC1522L(hw->port),
2711 			   pf->stat_offsets_loaded,
2712 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2713 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2714 			   I40E_GLPRT_PRC9522L(hw->port),
2715 			   pf->stat_offsets_loaded,
2716 			   &osd->rx_size_big, &nsd->rx_size_big);
2717 
2718 	/* Packet size stats tx */
2719 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2720 			   I40E_GLPRT_PTC64L(hw->port),
2721 			   pf->stat_offsets_loaded,
2722 			   &osd->tx_size_64, &nsd->tx_size_64);
2723 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2724 			   I40E_GLPRT_PTC127L(hw->port),
2725 			   pf->stat_offsets_loaded,
2726 			   &osd->tx_size_127, &nsd->tx_size_127);
2727 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2728 			   I40E_GLPRT_PTC255L(hw->port),
2729 			   pf->stat_offsets_loaded,
2730 			   &osd->tx_size_255, &nsd->tx_size_255);
2731 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2732 			   I40E_GLPRT_PTC511L(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->tx_size_511, &nsd->tx_size_511);
2735 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2736 			   I40E_GLPRT_PTC1023L(hw->port),
2737 			   pf->stat_offsets_loaded,
2738 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2739 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2740 			   I40E_GLPRT_PTC1522L(hw->port),
2741 			   pf->stat_offsets_loaded,
2742 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2743 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2744 			   I40E_GLPRT_PTC9522L(hw->port),
2745 			   pf->stat_offsets_loaded,
2746 			   &osd->tx_size_big, &nsd->tx_size_big);
2747 
2748 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2749 			   pf->stat_offsets_loaded,
2750 			   &osd->rx_undersize, &nsd->rx_undersize);
2751 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2752 			   pf->stat_offsets_loaded,
2753 			   &osd->rx_fragments, &nsd->rx_fragments);
2754 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2755 			   pf->stat_offsets_loaded,
2756 			   &osd->rx_oversize, &nsd->rx_oversize);
2757 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2758 			   pf->stat_offsets_loaded,
2759 			   &osd->rx_jabber, &nsd->rx_jabber);
2760 	pf->stat_offsets_loaded = true;
2761 	/* End hw stats */
2762 
2763 	/* Update vsi stats */
2764 	ixl_update_vsi_stats(vsi);
2765 
2766 	for (int i = 0; i < pf->num_vfs; i++) {
2767 		vf = &pf->vfs[i];
2768 		if (vf->vf_flags & VF_FLAG_ENABLED)
2769 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2770 	}
2771 }
2772 
2773 int
2774 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2775 {
2776 	struct i40e_hw *hw = &pf->hw;
2777 	device_t dev = pf->dev;
2778 	int error = 0;
2779 
2780 	error = i40e_shutdown_lan_hmc(hw);
2781 	if (error)
2782 		device_printf(dev,
2783 		    "Shutdown LAN HMC failed with code %d\n", error);
2784 
2785 	ixl_disable_intr0(hw);
2786 
2787 	error = i40e_shutdown_adminq(hw);
2788 	if (error)
2789 		device_printf(dev,
2790 		    "Shutdown Admin queue failed with code %d\n", error);
2791 
2792 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2793 	return (error);
2794 }
2795 
2796 int
2797 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2798 {
2799 	struct i40e_hw *hw = &pf->hw;
2800 	struct ixl_vsi *vsi = &pf->vsi;
2801 	device_t dev = pf->dev;
2802 	int error = 0;
2803 
2804 	device_printf(dev, "Rebuilding driver state...\n");
2805 
2806 	error = i40e_pf_reset(hw);
2807 	if (error) {
2808 		device_printf(dev, "PF reset failure %s\n",
2809 		    i40e_stat_str(hw, error));
2810 		goto ixl_rebuild_hw_structs_after_reset_err;
2811 	}
2812 
2813 	/* Setup */
2814 	error = i40e_init_adminq(hw);
2815 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2816 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2817 		    error);
2818 		goto ixl_rebuild_hw_structs_after_reset_err;
2819 	}
2820 
2821 	i40e_clear_pxe_mode(hw);
2822 
2823 	error = ixl_get_hw_capabilities(pf);
2824 	if (error) {
2825 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2826 		goto ixl_rebuild_hw_structs_after_reset_err;
2827 	}
2828 
2829 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2830 	    hw->func_caps.num_rx_qp, 0, 0);
2831 	if (error) {
2832 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2833 		goto ixl_rebuild_hw_structs_after_reset_err;
2834 	}
2835 
2836 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2837 	if (error) {
2838 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2839 		goto ixl_rebuild_hw_structs_after_reset_err;
2840 	}
2841 
2842 	/* reserve a contiguous allocation for the PF's VSI */
2843 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2844 	if (error) {
2845 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2846 		    error);
2847 		/* TODO: error handling */
2848 	}
2849 
2850 	error = ixl_switch_config(pf);
2851 	if (error) {
2852 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2853 		     error);
2854 		error = EIO;
2855 		goto ixl_rebuild_hw_structs_after_reset_err;
2856 	}
2857 
2858 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2859 	    NULL);
2860         if (error) {
2861 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2862 		    " aq_err %d\n", error, hw->aq.asq_last_status);
2863 		error = EIO;
2864 		goto ixl_rebuild_hw_structs_after_reset_err;
2865 	}
2866 
2867 	u8 set_fc_err_mask;
2868 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
2869 	if (error) {
2870 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
2871 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2872 		error = EIO;
2873 		goto ixl_rebuild_hw_structs_after_reset_err;
2874 	}
2875 
2876 	/* Remove default filters reinstalled by FW on reset */
2877 	ixl_del_default_hw_filters(vsi);
2878 
2879 	/* Determine link state */
2880 	if (ixl_attach_get_link_status(pf)) {
2881 		error = EINVAL;
2882 		/* TODO: error handling */
2883 	}
2884 
2885 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2886 	ixl_get_fw_lldp_status(pf);
2887 
2888 	/* Keep admin queue interrupts active while driver is loaded */
2889 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2890  		ixl_configure_intr0_msix(pf);
2891  		ixl_enable_intr0(hw);
2892 	}
2893 
2894 	device_printf(dev, "Rebuilding driver state done.\n");
2895 	return (0);
2896 
2897 ixl_rebuild_hw_structs_after_reset_err:
2898 	device_printf(dev, "Reload the driver to recover\n");
2899 	return (error);
2900 }
2901 
2902 void
2903 ixl_handle_empr_reset(struct ixl_pf *pf)
2904 {
2905 	struct ixl_vsi	*vsi = &pf->vsi;
2906 	struct i40e_hw	*hw = &pf->hw;
2907 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2908 	int count = 0;
2909 	u32 reg;
2910 
2911 	ixl_prepare_for_reset(pf, is_up);
2912 
2913 	/* Typically finishes within 3-4 seconds */
2914 	while (count++ < 100) {
2915 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2916 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2917 		if (reg)
2918 			i40e_msec_delay(100);
2919 		else
2920 			break;
2921 	}
2922 	ixl_dbg(pf, IXL_DBG_INFO,
2923 			"Reset wait count: %d\n", count);
2924 
2925 	ixl_rebuild_hw_structs_after_reset(pf);
2926 
2927 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2928 }
2929 
2930 /**
2931  * Update VSI-specific ethernet statistics counters.
2932  **/
2933 void
2934 ixl_update_eth_stats(struct ixl_vsi *vsi)
2935 {
2936 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2937 	struct i40e_hw *hw = &pf->hw;
2938 	struct i40e_eth_stats *es;
2939 	struct i40e_eth_stats *oes;
2940 	struct i40e_hw_port_stats *nsd;
2941 	u16 stat_idx = vsi->info.stat_counter_idx;
2942 
2943 	es = &vsi->eth_stats;
2944 	oes = &vsi->eth_stats_offsets;
2945 	nsd = &pf->stats;
2946 
2947 	/* Gather up the stats that the hw collects */
2948 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2949 			   vsi->stat_offsets_loaded,
2950 			   &oes->tx_errors, &es->tx_errors);
2951 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2952 			   vsi->stat_offsets_loaded,
2953 			   &oes->rx_discards, &es->rx_discards);
2954 
2955 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2956 			   I40E_GLV_GORCL(stat_idx),
2957 			   vsi->stat_offsets_loaded,
2958 			   &oes->rx_bytes, &es->rx_bytes);
2959 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2960 			   I40E_GLV_UPRCL(stat_idx),
2961 			   vsi->stat_offsets_loaded,
2962 			   &oes->rx_unicast, &es->rx_unicast);
2963 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2964 			   I40E_GLV_MPRCL(stat_idx),
2965 			   vsi->stat_offsets_loaded,
2966 			   &oes->rx_multicast, &es->rx_multicast);
2967 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2968 			   I40E_GLV_BPRCL(stat_idx),
2969 			   vsi->stat_offsets_loaded,
2970 			   &oes->rx_broadcast, &es->rx_broadcast);
2971 
2972 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2973 			   I40E_GLV_GOTCL(stat_idx),
2974 			   vsi->stat_offsets_loaded,
2975 			   &oes->tx_bytes, &es->tx_bytes);
2976 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2977 			   I40E_GLV_UPTCL(stat_idx),
2978 			   vsi->stat_offsets_loaded,
2979 			   &oes->tx_unicast, &es->tx_unicast);
2980 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2981 			   I40E_GLV_MPTCL(stat_idx),
2982 			   vsi->stat_offsets_loaded,
2983 			   &oes->tx_multicast, &es->tx_multicast);
2984 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2985 			   I40E_GLV_BPTCL(stat_idx),
2986 			   vsi->stat_offsets_loaded,
2987 			   &oes->tx_broadcast, &es->tx_broadcast);
2988 	vsi->stat_offsets_loaded = true;
2989 }
2990 
2991 void
2992 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2993 {
2994 	struct ixl_pf		*pf;
2995 	struct ifnet		*ifp;
2996 	struct i40e_eth_stats	*es;
2997 	u64			tx_discards;
2998 
2999 	struct i40e_hw_port_stats *nsd;
3000 
3001 	pf = vsi->back;
3002 	ifp = vsi->ifp;
3003 	es = &vsi->eth_stats;
3004 	nsd = &pf->stats;
3005 
3006 	ixl_update_eth_stats(vsi);
3007 
3008 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3009 
3010 	/* Update ifnet stats */
3011 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3012 	                   es->rx_multicast +
3013 			   es->rx_broadcast);
3014 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3015 	                   es->tx_multicast +
3016 			   es->tx_broadcast);
3017 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3018 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3019 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3020 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3021 
3022 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3023 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3024 	    nsd->rx_jabber);
3025 	IXL_SET_OERRORS(vsi, es->tx_errors);
3026 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3027 	IXL_SET_OQDROPS(vsi, tx_discards);
3028 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3029 	IXL_SET_COLLISIONS(vsi, 0);
3030 }
3031 
3032 /**
3033  * Reset all of the stats for the given pf
3034  **/
3035 void
3036 ixl_pf_reset_stats(struct ixl_pf *pf)
3037 {
3038 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3039 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3040 	pf->stat_offsets_loaded = false;
3041 }
3042 
3043 /**
3044  * Resets all stats of the given vsi
3045  **/
3046 void
3047 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3048 {
3049 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3050 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3051 	vsi->stat_offsets_loaded = false;
3052 }
3053 
3054 /**
3055  * Read and update a 48 bit stat from the hw
3056  *
3057  * Since the device stats are not reset at PFReset, they likely will not
3058  * be zeroed when the driver starts.  We'll save the first values read
3059  * and use them as offsets to be subtracted from the raw values in order
3060  * to report stats that count from zero.
3061  **/
3062 void
3063 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3064 	bool offset_loaded, u64 *offset, u64 *stat)
3065 {
3066 	u64 new_data;
3067 
3068 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3069 	new_data = rd64(hw, loreg);
3070 #else
3071 	/*
3072 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3073 	 * 10 don't support 64-bit bus reads/writes.
3074 	 */
3075 	new_data = rd32(hw, loreg);
3076 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3077 #endif
3078 
3079 	if (!offset_loaded)
3080 		*offset = new_data;
3081 	if (new_data >= *offset)
3082 		*stat = new_data - *offset;
3083 	else
3084 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3085 	*stat &= 0xFFFFFFFFFFFFULL;
3086 }
3087 
3088 /**
3089  * Read and update a 32 bit stat from the hw
3090  **/
3091 void
3092 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3093 	bool offset_loaded, u64 *offset, u64 *stat)
3094 {
3095 	u32 new_data;
3096 
3097 	new_data = rd32(hw, reg);
3098 	if (!offset_loaded)
3099 		*offset = new_data;
3100 	if (new_data >= *offset)
3101 		*stat = (u32)(new_data - *offset);
3102 	else
3103 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3104 }
3105 
3106 void
3107 ixl_add_device_sysctls(struct ixl_pf *pf)
3108 {
3109 	device_t dev = pf->dev;
3110 	struct i40e_hw *hw = &pf->hw;
3111 
3112 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3113 	struct sysctl_oid_list *ctx_list =
3114 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3115 
3116 	struct sysctl_oid *debug_node;
3117 	struct sysctl_oid_list *debug_list;
3118 
3119 	struct sysctl_oid *fec_node;
3120 	struct sysctl_oid_list *fec_list;
3121 
3122 	/* Set up sysctls */
3123 	SYSCTL_ADD_PROC(ctx, ctx_list,
3124 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3125 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3126 
3127 	SYSCTL_ADD_PROC(ctx, ctx_list,
3128 	    OID_AUTO, "advertise_speed",
3129 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3130 	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3131 
3132 	SYSCTL_ADD_PROC(ctx, ctx_list,
3133 	    OID_AUTO, "supported_speeds",
3134 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3135 	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3136 
3137 	SYSCTL_ADD_PROC(ctx, ctx_list,
3138 	    OID_AUTO, "current_speed",
3139 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3140 	    ixl_sysctl_current_speed, "A", "Current Port Speed");
3141 
3142 	SYSCTL_ADD_PROC(ctx, ctx_list,
3143 	    OID_AUTO, "fw_version",
3144 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3145 	    ixl_sysctl_show_fw, "A", "Firmware version");
3146 
3147 	SYSCTL_ADD_PROC(ctx, ctx_list,
3148 	    OID_AUTO, "unallocated_queues",
3149 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
3150 	    ixl_sysctl_unallocated_queues, "I",
3151 	    "Queues not allocated to a PF or VF");
3152 
3153 	SYSCTL_ADD_PROC(ctx, ctx_list,
3154 	    OID_AUTO, "tx_itr",
3155 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3156 	    ixl_sysctl_pf_tx_itr, "I",
3157 	    "Immediately set TX ITR value for all queues");
3158 
3159 	SYSCTL_ADD_PROC(ctx, ctx_list,
3160 	    OID_AUTO, "rx_itr",
3161 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3162 	    ixl_sysctl_pf_rx_itr, "I",
3163 	    "Immediately set RX ITR value for all queues");
3164 
3165 	SYSCTL_ADD_INT(ctx, ctx_list,
3166 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3167 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3168 
3169 	SYSCTL_ADD_INT(ctx, ctx_list,
3170 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3171 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3172 
3173 	/* Add FEC sysctls for 25G adapters */
3174 	if (i40e_is_25G_device(hw->device_id)) {
3175 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3176 		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3177 		    "FEC Sysctls");
3178 		fec_list = SYSCTL_CHILDREN(fec_node);
3179 
3180 		SYSCTL_ADD_PROC(ctx, fec_list,
3181 		    OID_AUTO, "fc_ability",
3182 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3183 		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3184 
3185 		SYSCTL_ADD_PROC(ctx, fec_list,
3186 		    OID_AUTO, "rs_ability",
3187 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3188 		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3189 
3190 		SYSCTL_ADD_PROC(ctx, fec_list,
3191 		    OID_AUTO, "fc_requested",
3192 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3193 		    ixl_sysctl_fec_fc_request, "I",
3194 		    "FC FEC mode requested on link");
3195 
3196 		SYSCTL_ADD_PROC(ctx, fec_list,
3197 		    OID_AUTO, "rs_requested",
3198 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3199 		    ixl_sysctl_fec_rs_request, "I",
3200 		    "RS FEC mode requested on link");
3201 
3202 		SYSCTL_ADD_PROC(ctx, fec_list,
3203 		    OID_AUTO, "auto_fec_enabled",
3204 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
3205 		    ixl_sysctl_fec_auto_enable, "I",
3206 		    "Let FW decide FEC ability/request modes");
3207 	}
3208 
3209 	SYSCTL_ADD_PROC(ctx, ctx_list,
3210 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3211 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3212 
3213 	/* Add sysctls meant to print debug information, but don't list them
3214 	 * in "sysctl -a" output. */
3215 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3216 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
3217 	    "Debug Sysctls");
3218 	debug_list = SYSCTL_CHILDREN(debug_node);
3219 
3220 	SYSCTL_ADD_UINT(ctx, debug_list,
3221 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3222 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3223 
3224 	SYSCTL_ADD_UINT(ctx, debug_list,
3225 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3226 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3227 
3228 	SYSCTL_ADD_PROC(ctx, debug_list,
3229 	    OID_AUTO, "link_status",
3230 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3231 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3232 
3233 	SYSCTL_ADD_PROC(ctx, debug_list,
3234 	    OID_AUTO, "phy_abilities",
3235 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3236 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3237 
3238 	SYSCTL_ADD_PROC(ctx, debug_list,
3239 	    OID_AUTO, "filter_list",
3240 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3241 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3242 
3243 	SYSCTL_ADD_PROC(ctx, debug_list,
3244 	    OID_AUTO, "hw_res_alloc",
3245 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3246 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3247 
3248 	SYSCTL_ADD_PROC(ctx, debug_list,
3249 	    OID_AUTO, "switch_config",
3250 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3251 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3252 
3253 	SYSCTL_ADD_PROC(ctx, debug_list,
3254 	    OID_AUTO, "rss_key",
3255 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3256 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3257 
3258 	SYSCTL_ADD_PROC(ctx, debug_list,
3259 	    OID_AUTO, "rss_lut",
3260 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3261 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3262 
3263 	SYSCTL_ADD_PROC(ctx, debug_list,
3264 	    OID_AUTO, "rss_hena",
3265 	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3266 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3267 
3268 	SYSCTL_ADD_PROC(ctx, debug_list,
3269 	    OID_AUTO, "disable_fw_link_management",
3270 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3271 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3272 
3273 	SYSCTL_ADD_PROC(ctx, debug_list,
3274 	    OID_AUTO, "dump_debug_data",
3275 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3276 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3277 
3278 	SYSCTL_ADD_PROC(ctx, debug_list,
3279 	    OID_AUTO, "do_pf_reset",
3280 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3281 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3282 
3283 	SYSCTL_ADD_PROC(ctx, debug_list,
3284 	    OID_AUTO, "do_core_reset",
3285 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3286 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3287 
3288 	SYSCTL_ADD_PROC(ctx, debug_list,
3289 	    OID_AUTO, "do_global_reset",
3290 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3291 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3292 
3293 	SYSCTL_ADD_PROC(ctx, debug_list,
3294 	    OID_AUTO, "do_emp_reset",
3295 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
3296 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3297 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3298 
3299 	SYSCTL_ADD_PROC(ctx, debug_list,
3300 	    OID_AUTO, "queue_interrupt_table",
3301 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3302 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3303 
3304 	if (pf->has_i2c) {
3305 		SYSCTL_ADD_PROC(ctx, debug_list,
3306 		    OID_AUTO, "read_i2c_byte",
3307 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3308 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3309 
3310 		SYSCTL_ADD_PROC(ctx, debug_list,
3311 		    OID_AUTO, "write_i2c_byte",
3312 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3313 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3314 
3315 		SYSCTL_ADD_PROC(ctx, debug_list,
3316 		    OID_AUTO, "read_i2c_diag_data",
3317 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3318 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3319 	}
3320 }
3321 
3322 /*
3323  * Primarily for finding out how many queues can be assigned to VFs,
3324  * at runtime.
3325  */
3326 static int
3327 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3328 {
3329 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3330 	int queues;
3331 
3332 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3333 
3334 	return sysctl_handle_int(oidp, NULL, queues, req);
3335 }
3336 
3337 /*
3338 ** Set flow control using sysctl:
3339 ** 	0 - off
3340 **	1 - rx pause
3341 **	2 - tx pause
3342 **	3 - full
3343 */
3344 int
3345 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3346 {
3347 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3348 	struct i40e_hw *hw = &pf->hw;
3349 	device_t dev = pf->dev;
3350 	int requested_fc, error = 0;
3351 	enum i40e_status_code aq_error = 0;
3352 	u8 fc_aq_err = 0;
3353 
3354 	/* Get request */
3355 	requested_fc = pf->fc;
3356 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3357 	if ((error) || (req->newptr == NULL))
3358 		return (error);
3359 	if (requested_fc < 0 || requested_fc > 3) {
3360 		device_printf(dev,
3361 		    "Invalid fc mode; valid modes are 0 through 3\n");
3362 		return (EINVAL);
3363 	}
3364 
3365 	/* Set fc ability for port */
3366 	hw->fc.requested_mode = requested_fc;
3367 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3368 	if (aq_error) {
3369 		device_printf(dev,
3370 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3371 		    __func__, aq_error, fc_aq_err);
3372 		return (EIO);
3373 	}
3374 	pf->fc = requested_fc;
3375 
3376 	return (0);
3377 }
3378 
3379 char *
3380 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3381 {
3382 	int index;
3383 
3384 	char *speeds[] = {
3385 		"Unknown",
3386 		"100 Mbps",
3387 		"1 Gbps",
3388 		"10 Gbps",
3389 		"40 Gbps",
3390 		"20 Gbps",
3391 		"25 Gbps",
3392 	};
3393 
3394 	switch (link_speed) {
3395 	case I40E_LINK_SPEED_100MB:
3396 		index = 1;
3397 		break;
3398 	case I40E_LINK_SPEED_1GB:
3399 		index = 2;
3400 		break;
3401 	case I40E_LINK_SPEED_10GB:
3402 		index = 3;
3403 		break;
3404 	case I40E_LINK_SPEED_40GB:
3405 		index = 4;
3406 		break;
3407 	case I40E_LINK_SPEED_20GB:
3408 		index = 5;
3409 		break;
3410 	case I40E_LINK_SPEED_25GB:
3411 		index = 6;
3412 		break;
3413 	case I40E_LINK_SPEED_UNKNOWN:
3414 	default:
3415 		index = 0;
3416 		break;
3417 	}
3418 
3419 	return speeds[index];
3420 }
3421 
3422 int
3423 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3424 {
3425 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3426 	struct i40e_hw *hw = &pf->hw;
3427 	int error = 0;
3428 
3429 	ixl_update_link_status(pf);
3430 
3431 	error = sysctl_handle_string(oidp,
3432 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3433 	    8, req);
3434 	return (error);
3435 }
3436 
3437 /*
3438  * Converts 8-bit speeds value to and from sysctl flags and
3439  * Admin Queue flags.
3440  */
3441 static u8
3442 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3443 {
3444 	static u16 speedmap[6] = {
3445 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3446 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3447 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3448 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3449 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3450 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3451 	};
3452 	u8 retval = 0;
3453 
3454 	for (int i = 0; i < 6; i++) {
3455 		if (to_aq)
3456 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3457 		else
3458 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3459 	}
3460 
3461 	return (retval);
3462 }
3463 
3464 int
3465 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3466 {
3467 	struct i40e_hw *hw = &pf->hw;
3468 	device_t dev = pf->dev;
3469 	struct i40e_aq_get_phy_abilities_resp abilities;
3470 	struct i40e_aq_set_phy_config config;
3471 	enum i40e_status_code aq_error = 0;
3472 
3473 	/* Get current capability information */
3474 	aq_error = i40e_aq_get_phy_capabilities(hw,
3475 	    FALSE, FALSE, &abilities, NULL);
3476 	if (aq_error) {
3477 		device_printf(dev,
3478 		    "%s: Error getting phy capabilities %d,"
3479 		    " aq error: %d\n", __func__, aq_error,
3480 		    hw->aq.asq_last_status);
3481 		return (EIO);
3482 	}
3483 
3484 	/* Prepare new config */
3485 	bzero(&config, sizeof(config));
3486 	if (from_aq)
3487 		config.link_speed = speeds;
3488 	else
3489 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3490 	config.phy_type = abilities.phy_type;
3491 	config.phy_type_ext = abilities.phy_type_ext;
3492 	config.abilities = abilities.abilities
3493 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3494 	config.eee_capability = abilities.eee_capability;
3495 	config.eeer = abilities.eeer_val;
3496 	config.low_power_ctrl = abilities.d3_lpan;
3497 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3498 
3499 	/* Do aq command & restart link */
3500 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3501 	if (aq_error) {
3502 		device_printf(dev,
3503 		    "%s: Error setting new phy config %d,"
3504 		    " aq error: %d\n", __func__, aq_error,
3505 		    hw->aq.asq_last_status);
3506 		return (EIO);
3507 	}
3508 
3509 	return (0);
3510 }
3511 
3512 /*
3513 ** Supported link speedsL
3514 **	Flags:
3515 **	 0x1 - 100 Mb
3516 **	 0x2 - 1G
3517 **	 0x4 - 10G
3518 **	 0x8 - 20G
3519 **	0x10 - 25G
3520 **	0x20 - 40G
3521 */
3522 static int
3523 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3524 {
3525 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3526 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3527 
3528 	return sysctl_handle_int(oidp, NULL, supported, req);
3529 }
3530 
3531 /*
3532 ** Control link advertise speed:
3533 **	Flags:
3534 **	 0x1 - advertise 100 Mb
3535 **	 0x2 - advertise 1G
3536 **	 0x4 - advertise 10G
3537 **	 0x8 - advertise 20G
3538 **	0x10 - advertise 25G
3539 **	0x20 - advertise 40G
3540 **
3541 **	Set to 0 to disable link
3542 */
3543 int
3544 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3545 {
3546 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3547 	device_t dev = pf->dev;
3548 	u8 converted_speeds;
3549 	int requested_ls = 0;
3550 	int error = 0;
3551 
3552 	/* Read in new mode */
3553 	requested_ls = pf->advertised_speed;
3554 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3555 	if ((error) || (req->newptr == NULL))
3556 		return (error);
3557 
3558 	/* Error out if bits outside of possible flag range are set */
3559 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3560 		device_printf(dev, "Input advertised speed out of range; "
3561 		    "valid flags are: 0x%02x\n",
3562 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3563 		return (EINVAL);
3564 	}
3565 
3566 	/* Check if adapter supports input value */
3567 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3568 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3569 		device_printf(dev, "Invalid advertised speed; "
3570 		    "valid flags are: 0x%02x\n",
3571 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3572 		return (EINVAL);
3573 	}
3574 
3575 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3576 	if (error)
3577 		return (error);
3578 
3579 	pf->advertised_speed = requested_ls;
3580 	ixl_update_link_status(pf);
3581 	return (0);
3582 }
3583 
3584 /*
3585 ** Get the width and transaction speed of
3586 ** the bus this adapter is plugged into.
3587 */
3588 void
3589 ixl_get_bus_info(struct ixl_pf *pf)
3590 {
3591 	struct i40e_hw *hw = &pf->hw;
3592 	device_t dev = pf->dev;
3593         u16 link;
3594         u32 offset, num_ports;
3595 	u64 max_speed;
3596 
3597 	/* Some devices don't use PCIE */
3598 	if (hw->mac.type == I40E_MAC_X722)
3599 		return;
3600 
3601         /* Read PCI Express Capabilities Link Status Register */
3602         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3603         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3604 
3605 	/* Fill out hw struct with PCIE info */
3606 	i40e_set_pci_config_data(hw, link);
3607 
3608 	/* Use info to print out bandwidth messages */
3609         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3610             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3611             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3612             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3613             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3614             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3615             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3616             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3617             ("Unknown"));
3618 
3619 	/*
3620 	 * If adapter is in slot with maximum supported speed,
3621 	 * no warning message needs to be printed out.
3622 	 */
3623 	if (hw->bus.speed >= i40e_bus_speed_8000
3624 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3625 		return;
3626 
3627 	num_ports = bitcount32(hw->func_caps.valid_functions);
3628 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3629 
3630 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3631                 device_printf(dev, "PCI-Express bandwidth available"
3632                     " for this device may be insufficient for"
3633                     " optimal performance.\n");
3634                 device_printf(dev, "Please move the device to a different"
3635 		    " PCI-e link with more lanes and/or higher"
3636 		    " transfer rate.\n");
3637         }
3638 }
3639 
3640 static int
3641 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3642 {
3643 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3644 	struct i40e_hw	*hw = &pf->hw;
3645 	struct sbuf	*sbuf;
3646 
3647 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3648 	ixl_nvm_version_str(hw, sbuf);
3649 	sbuf_finish(sbuf);
3650 	sbuf_delete(sbuf);
3651 
3652 	return (0);
3653 }
3654 
3655 void
3656 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3657 {
3658 	if ((nvma->command == I40E_NVM_READ) &&
3659 	    ((nvma->config & 0xFF) == 0xF) &&
3660 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3661 	    (nvma->offset == 0) &&
3662 	    (nvma->data_size == 1)) {
3663 		// device_printf(dev, "- Get Driver Status Command\n");
3664 	}
3665 	else if (nvma->command == I40E_NVM_READ) {
3666 
3667 	}
3668 	else {
3669 		switch (nvma->command) {
3670 		case 0xB:
3671 			device_printf(dev, "- command: I40E_NVM_READ\n");
3672 			break;
3673 		case 0xC:
3674 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3675 			break;
3676 		default:
3677 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3678 			break;
3679 		}
3680 
3681 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3682 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3683 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3684 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3685 	}
3686 }
3687 
3688 int
3689 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3690 {
3691 	struct i40e_hw *hw = &pf->hw;
3692 	struct i40e_nvm_access *nvma;
3693 	device_t dev = pf->dev;
3694 	enum i40e_status_code status = 0;
3695 	size_t nvma_size, ifd_len, exp_len;
3696 	int err, perrno;
3697 
3698 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3699 
3700 	/* Sanity checks */
3701 	nvma_size = sizeof(struct i40e_nvm_access);
3702 	ifd_len = ifd->ifd_len;
3703 
3704 	if (ifd_len < nvma_size ||
3705 	    ifd->ifd_data == NULL) {
3706 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3707 		    __func__);
3708 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3709 		    __func__, ifd_len, nvma_size);
3710 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3711 		    ifd->ifd_data);
3712 		return (EINVAL);
3713 	}
3714 
3715 	nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3716 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3717 	if (err) {
3718 		device_printf(dev, "%s: Cannot get request from user space\n",
3719 		    __func__);
3720 		free(nvma, M_DEVBUF);
3721 		return (err);
3722 	}
3723 
3724 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3725 		ixl_print_nvm_cmd(dev, nvma);
3726 
3727 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3728 		int count = 0;
3729 		while (count++ < 100) {
3730 			i40e_msec_delay(100);
3731 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3732 				break;
3733 		}
3734 	}
3735 
3736 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3737 		free(nvma, M_DEVBUF);
3738 		return (-EBUSY);
3739 	}
3740 
3741 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3742 		device_printf(dev, "%s: invalid request, data size not in supported range\n",
3743 		    __func__);
3744 		free(nvma, M_DEVBUF);
3745 		return (EINVAL);
3746 	}
3747 
3748 	/*
3749 	 * Older versions of the NVM update tool don't set ifd_len to the size
3750 	 * of the entire buffer passed to the ioctl. Check the data_size field
3751 	 * in the contained i40e_nvm_access struct and ensure everything is
3752 	 * copied in from userspace.
3753 	 */
3754 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3755 
3756 	if (ifd_len < exp_len) {
3757 		ifd_len = exp_len;
3758 		nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3759 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3760 		if (err) {
3761 			device_printf(dev, "%s: Cannot get request from user space\n",
3762 					__func__);
3763 			free(nvma, M_DEVBUF);
3764 			return (err);
3765 		}
3766 	}
3767 
3768 	// TODO: Might need a different lock here
3769 	// IXL_PF_LOCK(pf);
3770 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3771 	// IXL_PF_UNLOCK(pf);
3772 
3773 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3774 	free(nvma, M_DEVBUF);
3775 	if (err) {
3776 		device_printf(dev, "%s: Cannot return data to user space\n",
3777 				__func__);
3778 		return (err);
3779 	}
3780 
3781 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3782 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3783 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3784 		    i40e_stat_str(hw, status), perrno);
3785 
3786 	/*
3787 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3788 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3789 	 */
3790 	if (perrno == -EPERM)
3791 		return (-EACCES);
3792 	else
3793 		return (perrno);
3794 }
3795 
3796 int
3797 ixl_find_i2c_interface(struct ixl_pf *pf)
3798 {
3799 	struct i40e_hw *hw = &pf->hw;
3800 	bool i2c_en, port_matched;
3801 	u32 reg;
3802 
3803 	for (int i = 0; i < 4; i++) {
3804 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3805 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3806 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3807 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3808 		    & BIT(hw->port);
3809 		if (i2c_en && port_matched)
3810 			return (i);
3811 	}
3812 
3813 	return (-1);
3814 }
3815 
3816 static char *
3817 ixl_phy_type_string(u32 bit_pos, bool ext)
3818 {
3819 	static char * phy_types_str[32] = {
3820 		"SGMII",
3821 		"1000BASE-KX",
3822 		"10GBASE-KX4",
3823 		"10GBASE-KR",
3824 		"40GBASE-KR4",
3825 		"XAUI",
3826 		"XFI",
3827 		"SFI",
3828 		"XLAUI",
3829 		"XLPPI",
3830 		"40GBASE-CR4",
3831 		"10GBASE-CR1",
3832 		"SFP+ Active DA",
3833 		"QSFP+ Active DA",
3834 		"Reserved (14)",
3835 		"Reserved (15)",
3836 		"Reserved (16)",
3837 		"100BASE-TX",
3838 		"1000BASE-T",
3839 		"10GBASE-T",
3840 		"10GBASE-SR",
3841 		"10GBASE-LR",
3842 		"10GBASE-SFP+Cu",
3843 		"10GBASE-CR1",
3844 		"40GBASE-CR4",
3845 		"40GBASE-SR4",
3846 		"40GBASE-LR4",
3847 		"1000BASE-SX",
3848 		"1000BASE-LX",
3849 		"1000BASE-T Optical",
3850 		"20GBASE-KR2",
3851 		"Reserved (31)"
3852 	};
3853 	static char * ext_phy_types_str[8] = {
3854 		"25GBASE-KR",
3855 		"25GBASE-CR",
3856 		"25GBASE-SR",
3857 		"25GBASE-LR",
3858 		"25GBASE-AOC",
3859 		"25GBASE-ACC",
3860 		"Reserved (6)",
3861 		"Reserved (7)"
3862 	};
3863 
3864 	if (ext && bit_pos > 7) return "Invalid_Ext";
3865 	if (bit_pos > 31) return "Invalid";
3866 
3867 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3868 }
3869 
3870 /* TODO: ERJ: I don't this is necessary anymore. */
3871 int
3872 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3873 {
3874 	device_t dev = pf->dev;
3875 	struct i40e_hw *hw = &pf->hw;
3876 	struct i40e_aq_desc desc;
3877 	enum i40e_status_code status;
3878 
3879 	struct i40e_aqc_get_link_status *aq_link_status =
3880 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3881 
3882 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3883 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3884 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3885 	if (status) {
3886 		device_printf(dev,
3887 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3888 		    __func__, i40e_stat_str(hw, status),
3889 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3890 		return (EIO);
3891 	}
3892 
3893 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3894 	return (0);
3895 }
3896 
3897 static char *
3898 ixl_phy_type_string_ls(u8 val)
3899 {
3900 	if (val >= 0x1F)
3901 		return ixl_phy_type_string(val - 0x1F, true);
3902 	else
3903 		return ixl_phy_type_string(val, false);
3904 }
3905 
3906 static int
3907 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3908 {
3909 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3910 	device_t dev = pf->dev;
3911 	struct sbuf *buf;
3912 	int error = 0;
3913 
3914 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3915 	if (!buf) {
3916 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3917 		return (ENOMEM);
3918 	}
3919 
3920 	struct i40e_aqc_get_link_status link_status;
3921 	error = ixl_aq_get_link_status(pf, &link_status);
3922 	if (error) {
3923 		sbuf_delete(buf);
3924 		return (error);
3925 	}
3926 
3927 	sbuf_printf(buf, "\n"
3928 	    "PHY Type : 0x%02x<%s>\n"
3929 	    "Speed    : 0x%02x\n"
3930 	    "Link info: 0x%02x\n"
3931 	    "AN info  : 0x%02x\n"
3932 	    "Ext info : 0x%02x\n"
3933 	    "Loopback : 0x%02x\n"
3934 	    "Max Frame: %d\n"
3935 	    "Config   : 0x%02x\n"
3936 	    "Power    : 0x%02x",
3937 	    link_status.phy_type,
3938 	    ixl_phy_type_string_ls(link_status.phy_type),
3939 	    link_status.link_speed,
3940 	    link_status.link_info,
3941 	    link_status.an_info,
3942 	    link_status.ext_info,
3943 	    link_status.loopback,
3944 	    link_status.max_frame_size,
3945 	    link_status.config,
3946 	    link_status.power_desc);
3947 
3948 	error = sbuf_finish(buf);
3949 	if (error)
3950 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3951 
3952 	sbuf_delete(buf);
3953 	return (error);
3954 }
3955 
3956 static int
3957 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3958 {
3959 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3960 	struct i40e_hw *hw = &pf->hw;
3961 	device_t dev = pf->dev;
3962 	enum i40e_status_code status;
3963 	struct i40e_aq_get_phy_abilities_resp abilities;
3964 	struct sbuf *buf;
3965 	int error = 0;
3966 
3967 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3968 	if (!buf) {
3969 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3970 		return (ENOMEM);
3971 	}
3972 
3973 	status = i40e_aq_get_phy_capabilities(hw,
3974 	    FALSE, FALSE, &abilities, NULL);
3975 	if (status) {
3976 		device_printf(dev,
3977 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3978 		    __func__, i40e_stat_str(hw, status),
3979 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3980 		sbuf_delete(buf);
3981 		return (EIO);
3982 	}
3983 
3984 	sbuf_printf(buf, "\n"
3985 	    "PHY Type : %08x",
3986 	    abilities.phy_type);
3987 
3988 	if (abilities.phy_type != 0) {
3989 		sbuf_printf(buf, "<");
3990 		for (int i = 0; i < 32; i++)
3991 			if ((1 << i) & abilities.phy_type)
3992 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3993 		sbuf_printf(buf, ">\n");
3994 	}
3995 
3996 	sbuf_printf(buf, "PHY Ext  : %02x",
3997 	    abilities.phy_type_ext);
3998 
3999 	if (abilities.phy_type_ext != 0) {
4000 		sbuf_printf(buf, "<");
4001 		for (int i = 0; i < 4; i++)
4002 			if ((1 << i) & abilities.phy_type_ext)
4003 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
4004 		sbuf_printf(buf, ">");
4005 	}
4006 	sbuf_printf(buf, "\n");
4007 
4008 	sbuf_printf(buf,
4009 	    "Speed    : %02x\n"
4010 	    "Abilities: %02x\n"
4011 	    "EEE cap  : %04x\n"
4012 	    "EEER reg : %08x\n"
4013 	    "D3 Lpan  : %02x\n"
4014 	    "ID       : %02x %02x %02x %02x\n"
4015 	    "ModType  : %02x %02x %02x\n"
4016 	    "ModType E: %01x\n"
4017 	    "FEC Cfg  : %02x\n"
4018 	    "Ext CC   : %02x",
4019 	    abilities.link_speed,
4020 	    abilities.abilities, abilities.eee_capability,
4021 	    abilities.eeer_val, abilities.d3_lpan,
4022 	    abilities.phy_id[0], abilities.phy_id[1],
4023 	    abilities.phy_id[2], abilities.phy_id[3],
4024 	    abilities.module_type[0], abilities.module_type[1],
4025 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
4026 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
4027 	    abilities.ext_comp_code);
4028 
4029 	error = sbuf_finish(buf);
4030 	if (error)
4031 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4032 
4033 	sbuf_delete(buf);
4034 	return (error);
4035 }
4036 
4037 static int
4038 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4039 {
4040 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4041 	struct ixl_vsi *vsi = &pf->vsi;
4042 	struct ixl_mac_filter *f;
4043 	device_t dev = pf->dev;
4044 	int error = 0, ftl_len = 0, ftl_counter = 0;
4045 
4046 	struct sbuf *buf;
4047 
4048 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4049 	if (!buf) {
4050 		device_printf(dev, "Could not allocate sbuf for output.\n");
4051 		return (ENOMEM);
4052 	}
4053 
4054 	sbuf_printf(buf, "\n");
4055 
4056 	/* Print MAC filters */
4057 	sbuf_printf(buf, "PF Filters:\n");
4058 	SLIST_FOREACH(f, &vsi->ftl, next)
4059 		ftl_len++;
4060 
4061 	if (ftl_len < 1)
4062 		sbuf_printf(buf, "(none)\n");
4063 	else {
4064 		SLIST_FOREACH(f, &vsi->ftl, next) {
4065 			sbuf_printf(buf,
4066 			    MAC_FORMAT ", vlan %4d, flags %#06x",
4067 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4068 			/* don't print '\n' for last entry */
4069 			if (++ftl_counter != ftl_len)
4070 				sbuf_printf(buf, "\n");
4071 		}
4072 	}
4073 
4074 #ifdef PCI_IOV
4075 	/* TODO: Give each VF its own filter list sysctl */
4076 	struct ixl_vf *vf;
4077 	if (pf->num_vfs > 0) {
4078 		sbuf_printf(buf, "\n\n");
4079 		for (int i = 0; i < pf->num_vfs; i++) {
4080 			vf = &pf->vfs[i];
4081 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
4082 				continue;
4083 
4084 			vsi = &vf->vsi;
4085 			ftl_len = 0, ftl_counter = 0;
4086 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4087 			SLIST_FOREACH(f, &vsi->ftl, next)
4088 				ftl_len++;
4089 
4090 			if (ftl_len < 1)
4091 				sbuf_printf(buf, "(none)\n");
4092 			else {
4093 				SLIST_FOREACH(f, &vsi->ftl, next) {
4094 					sbuf_printf(buf,
4095 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
4096 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4097 				}
4098 			}
4099 		}
4100 	}
4101 #endif
4102 
4103 	error = sbuf_finish(buf);
4104 	if (error)
4105 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4106 	sbuf_delete(buf);
4107 
4108 	return (error);
4109 }
4110 
4111 #define IXL_SW_RES_SIZE 0x14
4112 int
4113 ixl_res_alloc_cmp(const void *a, const void *b)
4114 {
4115 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4116 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4117 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4118 
4119 	return ((int)one->resource_type - (int)two->resource_type);
4120 }
4121 
4122 /*
4123  * Longest string length: 25
4124  */
4125 char *
4126 ixl_switch_res_type_string(u8 type)
4127 {
4128 	// TODO: This should be changed to static const
4129 	char * ixl_switch_res_type_strings[0x14] = {
4130 		"VEB",
4131 		"VSI",
4132 		"Perfect Match MAC address",
4133 		"S-tag",
4134 		"(Reserved)",
4135 		"Multicast hash entry",
4136 		"Unicast hash entry",
4137 		"VLAN",
4138 		"VSI List entry",
4139 		"(Reserved)",
4140 		"VLAN Statistic Pool",
4141 		"Mirror Rule",
4142 		"Queue Set",
4143 		"Inner VLAN Forward filter",
4144 		"(Reserved)",
4145 		"Inner MAC",
4146 		"IP",
4147 		"GRE/VN1 Key",
4148 		"VN2 Key",
4149 		"Tunneling Port"
4150 	};
4151 
4152 	if (type < 0x14)
4153 		return ixl_switch_res_type_strings[type];
4154 	else
4155 		return "(Reserved)";
4156 }
4157 
4158 static int
4159 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4160 {
4161 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4162 	struct i40e_hw *hw = &pf->hw;
4163 	device_t dev = pf->dev;
4164 	struct sbuf *buf;
4165 	enum i40e_status_code status;
4166 	int error = 0;
4167 
4168 	u8 num_entries;
4169 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4170 
4171 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4172 	if (!buf) {
4173 		device_printf(dev, "Could not allocate sbuf for output.\n");
4174 		return (ENOMEM);
4175 	}
4176 
4177 	bzero(resp, sizeof(resp));
4178 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4179 				resp,
4180 				IXL_SW_RES_SIZE,
4181 				NULL);
4182 	if (status) {
4183 		device_printf(dev,
4184 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4185 		    __func__, i40e_stat_str(hw, status),
4186 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4187 		sbuf_delete(buf);
4188 		return (error);
4189 	}
4190 
4191 	/* Sort entries by type for display */
4192 	qsort(resp, num_entries,
4193 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4194 	    &ixl_res_alloc_cmp);
4195 
4196 	sbuf_cat(buf, "\n");
4197 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4198 	sbuf_printf(buf,
4199 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4200 	    "                          | (this)     | (all) | (this) | (all)       \n");
4201 	for (int i = 0; i < num_entries; i++) {
4202 		sbuf_printf(buf,
4203 		    "%25s | %10d   %5d   %6d   %12d",
4204 		    ixl_switch_res_type_string(resp[i].resource_type),
4205 		    resp[i].guaranteed,
4206 		    resp[i].total,
4207 		    resp[i].used,
4208 		    resp[i].total_unalloced);
4209 		if (i < num_entries - 1)
4210 			sbuf_cat(buf, "\n");
4211 	}
4212 
4213 	error = sbuf_finish(buf);
4214 	if (error)
4215 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4216 
4217 	sbuf_delete(buf);
4218 	return (error);
4219 }
4220 
4221 /*
4222 ** Caller must init and delete sbuf; this function will clear and
4223 ** finish it for caller.
4224 */
4225 char *
4226 ixl_switch_element_string(struct sbuf *s,
4227     struct i40e_aqc_switch_config_element_resp *element)
4228 {
4229 	sbuf_clear(s);
4230 
4231 	switch (element->element_type) {
4232 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4233 		sbuf_printf(s, "MAC %3d", element->element_info);
4234 		break;
4235 	case I40E_AQ_SW_ELEM_TYPE_PF:
4236 		sbuf_printf(s, "PF  %3d", element->element_info);
4237 		break;
4238 	case I40E_AQ_SW_ELEM_TYPE_VF:
4239 		sbuf_printf(s, "VF  %3d", element->element_info);
4240 		break;
4241 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4242 		sbuf_cat(s, "EMP");
4243 		break;
4244 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4245 		sbuf_cat(s, "BMC");
4246 		break;
4247 	case I40E_AQ_SW_ELEM_TYPE_PV:
4248 		sbuf_cat(s, "PV");
4249 		break;
4250 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4251 		sbuf_cat(s, "VEB");
4252 		break;
4253 	case I40E_AQ_SW_ELEM_TYPE_PA:
4254 		sbuf_cat(s, "PA");
4255 		break;
4256 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4257 		sbuf_printf(s, "VSI %3d", element->element_info);
4258 		break;
4259 	default:
4260 		sbuf_cat(s, "?");
4261 		break;
4262 	}
4263 
4264 	sbuf_finish(s);
4265 	return sbuf_data(s);
4266 }
4267 
4268 static int
4269 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4270 {
4271 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4272 	struct i40e_hw *hw = &pf->hw;
4273 	device_t dev = pf->dev;
4274 	struct sbuf *buf;
4275 	struct sbuf *nmbuf;
4276 	enum i40e_status_code status;
4277 	int error = 0;
4278 	u16 next = 0;
4279 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4280 
4281 	struct i40e_aqc_get_switch_config_resp *sw_config;
4282 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4283 
4284 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4285 	if (!buf) {
4286 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4287 		return (ENOMEM);
4288 	}
4289 
4290 	status = i40e_aq_get_switch_config(hw, sw_config,
4291 	    sizeof(aq_buf), &next, NULL);
4292 	if (status) {
4293 		device_printf(dev,
4294 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4295 		    __func__, i40e_stat_str(hw, status),
4296 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4297 		sbuf_delete(buf);
4298 		return error;
4299 	}
4300 	if (next)
4301 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4302 		    __func__, next);
4303 
4304 	nmbuf = sbuf_new_auto();
4305 	if (!nmbuf) {
4306 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4307 		sbuf_delete(buf);
4308 		return (ENOMEM);
4309 	}
4310 
4311 	sbuf_cat(buf, "\n");
4312 	/* Assuming <= 255 elements in switch */
4313 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4314 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4315 	/* Exclude:
4316 	** Revision -- all elements are revision 1 for now
4317 	*/
4318 	sbuf_printf(buf,
4319 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4320 	    "                |          |          | (uplink)\n");
4321 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4322 		// "%4d (%8s) | %8s   %8s   %#8x",
4323 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4324 		sbuf_cat(buf, " ");
4325 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4326 		    &sw_config->element[i]));
4327 		sbuf_cat(buf, " | ");
4328 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4329 		sbuf_cat(buf, "   ");
4330 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4331 		sbuf_cat(buf, "   ");
4332 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4333 		if (i < sw_config->header.num_reported - 1)
4334 			sbuf_cat(buf, "\n");
4335 	}
4336 	sbuf_delete(nmbuf);
4337 
4338 	error = sbuf_finish(buf);
4339 	if (error)
4340 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4341 
4342 	sbuf_delete(buf);
4343 
4344 	return (error);
4345 }
4346 
4347 static int
4348 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4349 {
4350 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4351 	struct i40e_hw *hw = &pf->hw;
4352 	device_t dev = pf->dev;
4353 	struct sbuf *buf;
4354 	int error = 0;
4355 	enum i40e_status_code status;
4356 	u32 reg;
4357 
4358 	struct i40e_aqc_get_set_rss_key_data key_data;
4359 
4360 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4361 	if (!buf) {
4362 		device_printf(dev, "Could not allocate sbuf for output.\n");
4363 		return (ENOMEM);
4364 	}
4365 
4366 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4367 
4368 	sbuf_cat(buf, "\n");
4369 	if (hw->mac.type == I40E_MAC_X722) {
4370 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4371 		if (status)
4372 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4373 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4374 	} else {
4375 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4376 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4377 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4378 		}
4379 	}
4380 
4381 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4382 
4383 	error = sbuf_finish(buf);
4384 	if (error)
4385 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4386 	sbuf_delete(buf);
4387 
4388 	return (error);
4389 }
4390 
4391 static void
4392 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4393 {
4394 	int i, j, k, width;
4395 	char c;
4396 
4397 	if (length < 1 || buf == NULL) return;
4398 
4399 	int byte_stride = 16;
4400 	int lines = length / byte_stride;
4401 	int rem = length % byte_stride;
4402 	if (rem > 0)
4403 		lines++;
4404 
4405 	for (i = 0; i < lines; i++) {
4406 		width = (rem > 0 && i == lines - 1)
4407 		    ? rem : byte_stride;
4408 
4409 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4410 
4411 		for (j = 0; j < width; j++)
4412 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4413 
4414 		if (width < byte_stride) {
4415 			for (k = 0; k < (byte_stride - width); k++)
4416 				sbuf_printf(sb, "   ");
4417 		}
4418 
4419 		if (!text) {
4420 			sbuf_printf(sb, "\n");
4421 			continue;
4422 		}
4423 
4424 		for (j = 0; j < width; j++) {
4425 			c = (char)buf[i * byte_stride + j];
4426 			if (c < 32 || c > 126)
4427 				sbuf_printf(sb, ".");
4428 			else
4429 				sbuf_printf(sb, "%c", c);
4430 
4431 			if (j == width - 1)
4432 				sbuf_printf(sb, "\n");
4433 		}
4434 	}
4435 }
4436 
4437 static int
4438 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4439 {
4440 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4441 	struct i40e_hw *hw = &pf->hw;
4442 	device_t dev = pf->dev;
4443 	struct sbuf *buf;
4444 	int error = 0;
4445 	enum i40e_status_code status;
4446 	u8 hlut[512];
4447 	u32 reg;
4448 
4449 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4450 	if (!buf) {
4451 		device_printf(dev, "Could not allocate sbuf for output.\n");
4452 		return (ENOMEM);
4453 	}
4454 
4455 	bzero(hlut, sizeof(hlut));
4456 	sbuf_cat(buf, "\n");
4457 	if (hw->mac.type == I40E_MAC_X722) {
4458 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4459 		if (status)
4460 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4461 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4462 	} else {
4463 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4464 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4465 			bcopy(&reg, &hlut[i << 2], 4);
4466 		}
4467 	}
4468 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4469 
4470 	error = sbuf_finish(buf);
4471 	if (error)
4472 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4473 	sbuf_delete(buf);
4474 
4475 	return (error);
4476 }
4477 
4478 static int
4479 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4480 {
4481 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4482 	struct i40e_hw *hw = &pf->hw;
4483 	u64 hena;
4484 
4485 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4486 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4487 
4488 	return sysctl_handle_long(oidp, NULL, hena, req);
4489 }
4490 
4491 /*
4492  * Sysctl to disable firmware's link management
4493  *
4494  * 1 - Disable link management on this port
4495  * 0 - Re-enable link management
4496  *
4497  * On normal NVMs, firmware manages link by default.
4498  */
4499 static int
4500 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4501 {
4502 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4503 	struct i40e_hw *hw = &pf->hw;
4504 	device_t dev = pf->dev;
4505 	int requested_mode = -1;
4506 	enum i40e_status_code status = 0;
4507 	int error = 0;
4508 
4509 	/* Read in new mode */
4510 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4511 	if ((error) || (req->newptr == NULL))
4512 		return (error);
4513 	/* Check for sane value */
4514 	if (requested_mode < 0 || requested_mode > 1) {
4515 		device_printf(dev, "Valid modes are 0 or 1\n");
4516 		return (EINVAL);
4517 	}
4518 
4519 	/* Set new mode */
4520 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4521 	if (status) {
4522 		device_printf(dev,
4523 		    "%s: Error setting new phy debug mode %s,"
4524 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4525 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4526 		return (EIO);
4527 	}
4528 
4529 	return (0);
4530 }
4531 
4532 /*
4533  * Read some diagnostic data from an SFP module
4534  * Bytes 96-99, 102-105 from device address 0xA2
4535  */
4536 static int
4537 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4538 {
4539 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4540 	device_t dev = pf->dev;
4541 	struct sbuf *sbuf;
4542 	int error = 0;
4543 	u8 output;
4544 
4545 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4546 	if (error) {
4547 		device_printf(dev, "Error reading from i2c\n");
4548 		return (error);
4549 	}
4550 	if (output != 0x3) {
4551 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4552 		return (EIO);
4553 	}
4554 
4555 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4556 	if (!(output & 0x60)) {
4557 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4558 		return (EIO);
4559 	}
4560 
4561 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4562 
4563 	for (u8 offset = 96; offset < 100; offset++) {
4564 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4565 		sbuf_printf(sbuf, "%02X ", output);
4566 	}
4567 	for (u8 offset = 102; offset < 106; offset++) {
4568 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4569 		sbuf_printf(sbuf, "%02X ", output);
4570 	}
4571 
4572 	sbuf_finish(sbuf);
4573 	sbuf_delete(sbuf);
4574 
4575 	return (0);
4576 }
4577 
4578 /*
4579  * Sysctl to read a byte from I2C bus.
4580  *
4581  * Input: 32-bit value:
4582  * 	bits 0-7:   device address (0xA0 or 0xA2)
4583  * 	bits 8-15:  offset (0-255)
4584  *	bits 16-31: unused
4585  * Output: 8-bit value read
4586  */
4587 static int
4588 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4589 {
4590 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4591 	device_t dev = pf->dev;
4592 	int input = -1, error = 0;
4593 	u8 dev_addr, offset, output;
4594 
4595 	/* Read in I2C read parameters */
4596 	error = sysctl_handle_int(oidp, &input, 0, req);
4597 	if ((error) || (req->newptr == NULL))
4598 		return (error);
4599 	/* Validate device address */
4600 	dev_addr = input & 0xFF;
4601 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4602 		return (EINVAL);
4603 	}
4604 	offset = (input >> 8) & 0xFF;
4605 
4606 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4607 	if (error)
4608 		return (error);
4609 
4610 	device_printf(dev, "%02X\n", output);
4611 	return (0);
4612 }
4613 
4614 /*
4615  * Sysctl to write a byte to the I2C bus.
4616  *
4617  * Input: 32-bit value:
4618  * 	bits 0-7:   device address (0xA0 or 0xA2)
4619  * 	bits 8-15:  offset (0-255)
4620  *	bits 16-23: value to write
4621  *	bits 24-31: unused
4622  * Output: 8-bit value written
4623  */
4624 static int
4625 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4626 {
4627 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4628 	device_t dev = pf->dev;
4629 	int input = -1, error = 0;
4630 	u8 dev_addr, offset, value;
4631 
4632 	/* Read in I2C write parameters */
4633 	error = sysctl_handle_int(oidp, &input, 0, req);
4634 	if ((error) || (req->newptr == NULL))
4635 		return (error);
4636 	/* Validate device address */
4637 	dev_addr = input & 0xFF;
4638 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4639 		return (EINVAL);
4640 	}
4641 	offset = (input >> 8) & 0xFF;
4642 	value = (input >> 16) & 0xFF;
4643 
4644 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4645 	if (error)
4646 		return (error);
4647 
4648 	device_printf(dev, "%02X written\n", value);
4649 	return (0);
4650 }
4651 
4652 static int
4653 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4654     u8 bit_pos, int *is_set)
4655 {
4656 	device_t dev = pf->dev;
4657 	struct i40e_hw *hw = &pf->hw;
4658 	enum i40e_status_code status;
4659 
4660 	status = i40e_aq_get_phy_capabilities(hw,
4661 	    FALSE, FALSE, abilities, NULL);
4662 	if (status) {
4663 		device_printf(dev,
4664 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4665 		    __func__, i40e_stat_str(hw, status),
4666 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4667 		return (EIO);
4668 	}
4669 
4670 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4671 	return (0);
4672 }
4673 
4674 static int
4675 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4676     u8 bit_pos, int set)
4677 {
4678 	device_t dev = pf->dev;
4679 	struct i40e_hw *hw = &pf->hw;
4680 	struct i40e_aq_set_phy_config config;
4681 	enum i40e_status_code status;
4682 
4683 	/* Set new PHY config */
4684 	memset(&config, 0, sizeof(config));
4685 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4686 	if (set)
4687 		config.fec_config |= bit_pos;
4688 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4689 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4690 		config.phy_type = abilities->phy_type;
4691 		config.phy_type_ext = abilities->phy_type_ext;
4692 		config.link_speed = abilities->link_speed;
4693 		config.eee_capability = abilities->eee_capability;
4694 		config.eeer = abilities->eeer_val;
4695 		config.low_power_ctrl = abilities->d3_lpan;
4696 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4697 
4698 		if (status) {
4699 			device_printf(dev,
4700 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4701 			    __func__, i40e_stat_str(hw, status),
4702 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4703 			return (EIO);
4704 		}
4705 	}
4706 
4707 	return (0);
4708 }
4709 
4710 static int
4711 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4712 {
4713 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4714 	int mode, error = 0;
4715 
4716 	struct i40e_aq_get_phy_abilities_resp abilities;
4717 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4718 	if (error)
4719 		return (error);
4720 	/* Read in new mode */
4721 	error = sysctl_handle_int(oidp, &mode, 0, req);
4722 	if ((error) || (req->newptr == NULL))
4723 		return (error);
4724 
4725 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4726 }
4727 
4728 static int
4729 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4730 {
4731 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4732 	int mode, error = 0;
4733 
4734 	struct i40e_aq_get_phy_abilities_resp abilities;
4735 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4736 	if (error)
4737 		return (error);
4738 	/* Read in new mode */
4739 	error = sysctl_handle_int(oidp, &mode, 0, req);
4740 	if ((error) || (req->newptr == NULL))
4741 		return (error);
4742 
4743 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4744 }
4745 
4746 static int
4747 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4748 {
4749 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4750 	int mode, error = 0;
4751 
4752 	struct i40e_aq_get_phy_abilities_resp abilities;
4753 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4754 	if (error)
4755 		return (error);
4756 	/* Read in new mode */
4757 	error = sysctl_handle_int(oidp, &mode, 0, req);
4758 	if ((error) || (req->newptr == NULL))
4759 		return (error);
4760 
4761 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4762 }
4763 
4764 static int
4765 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4766 {
4767 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4768 	int mode, error = 0;
4769 
4770 	struct i40e_aq_get_phy_abilities_resp abilities;
4771 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4772 	if (error)
4773 		return (error);
4774 	/* Read in new mode */
4775 	error = sysctl_handle_int(oidp, &mode, 0, req);
4776 	if ((error) || (req->newptr == NULL))
4777 		return (error);
4778 
4779 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4780 }
4781 
4782 static int
4783 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4784 {
4785 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4786 	int mode, error = 0;
4787 
4788 	struct i40e_aq_get_phy_abilities_resp abilities;
4789 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4790 	if (error)
4791 		return (error);
4792 	/* Read in new mode */
4793 	error = sysctl_handle_int(oidp, &mode, 0, req);
4794 	if ((error) || (req->newptr == NULL))
4795 		return (error);
4796 
4797 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4798 }
4799 
4800 static int
4801 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4802 {
4803 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4804 	struct i40e_hw *hw = &pf->hw;
4805 	device_t dev = pf->dev;
4806 	struct sbuf *buf;
4807 	int error = 0;
4808 	enum i40e_status_code status;
4809 
4810 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4811 	if (!buf) {
4812 		device_printf(dev, "Could not allocate sbuf for output.\n");
4813 		return (ENOMEM);
4814 	}
4815 
4816 	u8 *final_buff;
4817 	/* This amount is only necessary if reading the entire cluster into memory */
4818 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4819 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4820 	if (final_buff == NULL) {
4821 		device_printf(dev, "Could not allocate memory for output.\n");
4822 		goto out;
4823 	}
4824 	int final_buff_len = 0;
4825 
4826 	u8 cluster_id = 1;
4827 	bool more = true;
4828 
4829 	u8 dump_buf[4096];
4830 	u16 curr_buff_size = 4096;
4831 	u8 curr_next_table = 0;
4832 	u32 curr_next_index = 0;
4833 
4834 	u16 ret_buff_size;
4835 	u8 ret_next_table;
4836 	u32 ret_next_index;
4837 
4838 	sbuf_cat(buf, "\n");
4839 
4840 	while (more) {
4841 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4842 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4843 		if (status) {
4844 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4845 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4846 			goto free_out;
4847 		}
4848 
4849 		/* copy info out of temp buffer */
4850 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4851 		final_buff_len += ret_buff_size;
4852 
4853 		if (ret_next_table != curr_next_table) {
4854 			/* We're done with the current table; we can dump out read data. */
4855 			sbuf_printf(buf, "%d:", curr_next_table);
4856 			int bytes_printed = 0;
4857 			while (bytes_printed <= final_buff_len) {
4858 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4859 				bytes_printed += 16;
4860 			}
4861 				sbuf_cat(buf, "\n");
4862 
4863 			/* The entire cluster has been read; we're finished */
4864 			if (ret_next_table == 0xFF)
4865 				break;
4866 
4867 			/* Otherwise clear the output buffer and continue reading */
4868 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4869 			final_buff_len = 0;
4870 		}
4871 
4872 		if (ret_next_index == 0xFFFFFFFF)
4873 			ret_next_index = 0;
4874 
4875 		bzero(dump_buf, sizeof(dump_buf));
4876 		curr_next_table = ret_next_table;
4877 		curr_next_index = ret_next_index;
4878 	}
4879 
4880 free_out:
4881 	free(final_buff, M_DEVBUF);
4882 out:
4883 	error = sbuf_finish(buf);
4884 	if (error)
4885 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4886 	sbuf_delete(buf);
4887 
4888 	return (error);
4889 }
4890 
4891 static int
4892 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4893 {
4894 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4895 	struct i40e_hw *hw = &pf->hw;
4896 	device_t dev = pf->dev;
4897 	int error = 0;
4898 	int state, new_state;
4899 	enum i40e_status_code status;
4900 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4901 
4902 	/* Read in new mode */
4903 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4904 	if ((error) || (req->newptr == NULL))
4905 		return (error);
4906 
4907 	/* Already in requested state */
4908 	if (new_state == state)
4909 		return (error);
4910 
4911 	if (new_state == 0) {
4912 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4913 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4914 			return (EINVAL);
4915 		}
4916 
4917 		if (pf->hw.aq.api_maj_ver < 1 ||
4918 		    (pf->hw.aq.api_maj_ver == 1 &&
4919 		    pf->hw.aq.api_min_ver < 7)) {
4920 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4921 			return (EINVAL);
4922 		}
4923 
4924 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4925 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4926 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4927 	} else {
4928 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4929 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4930 			device_printf(dev, "FW LLDP agent is already running\n");
4931 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4932 	}
4933 
4934 	return (0);
4935 }
4936 
4937 /*
4938  * Get FW LLDP Agent status
4939  */
4940 int
4941 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4942 {
4943 	enum i40e_status_code ret = I40E_SUCCESS;
4944 	struct i40e_lldp_variables lldp_cfg;
4945 	struct i40e_hw *hw = &pf->hw;
4946 	u8 adminstatus = 0;
4947 
4948 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4949 	if (ret)
4950 		return ret;
4951 
4952 	/* Get the LLDP AdminStatus for the current port */
4953 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4954 	adminstatus &= 0xf;
4955 
4956 	/* Check if LLDP agent is disabled */
4957 	if (!adminstatus) {
4958 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4959 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4960 	} else
4961 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4962 
4963 	return (0);
4964 }
4965 
4966 int
4967 ixl_attach_get_link_status(struct ixl_pf *pf)
4968 {
4969 	struct i40e_hw *hw = &pf->hw;
4970 	device_t dev = pf->dev;
4971 	int error = 0;
4972 
4973 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4974 	    (hw->aq.fw_maj_ver < 4)) {
4975 		i40e_msec_delay(75);
4976 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4977 		if (error) {
4978 			device_printf(dev, "link restart failed, aq_err=%d\n",
4979 			    pf->hw.aq.asq_last_status);
4980 			return error;
4981 		}
4982 	}
4983 
4984 	/* Determine link state */
4985 	hw->phy.get_link_info = TRUE;
4986 	i40e_get_link_status(hw, &pf->link_up);
4987 	return (0);
4988 }
4989 
4990 static int
4991 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4992 {
4993 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4994 	int requested = 0, error = 0;
4995 
4996 	/* Read in new mode */
4997 	error = sysctl_handle_int(oidp, &requested, 0, req);
4998 	if ((error) || (req->newptr == NULL))
4999 		return (error);
5000 
5001 	/* Initiate the PF reset later in the admin task */
5002 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
5003 
5004 	return (error);
5005 }
5006 
5007 static int
5008 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
5009 {
5010 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5011 	struct i40e_hw *hw = &pf->hw;
5012 	int requested = 0, error = 0;
5013 
5014 	/* Read in new mode */
5015 	error = sysctl_handle_int(oidp, &requested, 0, req);
5016 	if ((error) || (req->newptr == NULL))
5017 		return (error);
5018 
5019 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
5020 
5021 	return (error);
5022 }
5023 
5024 static int
5025 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
5026 {
5027 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5028 	struct i40e_hw *hw = &pf->hw;
5029 	int requested = 0, error = 0;
5030 
5031 	/* Read in new mode */
5032 	error = sysctl_handle_int(oidp, &requested, 0, req);
5033 	if ((error) || (req->newptr == NULL))
5034 		return (error);
5035 
5036 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5037 
5038 	return (error);
5039 }
5040 
5041 static int
5042 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5043 {
5044 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5045 	struct i40e_hw *hw = &pf->hw;
5046 	int requested = 0, error = 0;
5047 
5048 	/* Read in new mode */
5049 	error = sysctl_handle_int(oidp, &requested, 0, req);
5050 	if ((error) || (req->newptr == NULL))
5051 		return (error);
5052 
5053 	/* TODO: Find out how to bypass this */
5054 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
5055 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5056 		error = EINVAL;
5057 	} else
5058 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5059 
5060 	return (error);
5061 }
5062 
5063 /*
5064  * Print out mapping of TX queue indexes and Rx queue indexes
5065  * to MSI-X vectors.
5066  */
5067 static int
5068 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5069 {
5070 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5071 	struct ixl_vsi *vsi = &pf->vsi;
5072 	device_t dev = pf->dev;
5073 	struct sbuf *buf;
5074 	int error = 0;
5075 
5076 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5077 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5078 
5079 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5080 	if (!buf) {
5081 		device_printf(dev, "Could not allocate sbuf for output.\n");
5082 		return (ENOMEM);
5083 	}
5084 
5085 	sbuf_cat(buf, "\n");
5086 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5087 		rx_que = &vsi->rx_queues[i];
5088 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5089 	}
5090 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5091 		tx_que = &vsi->tx_queues[i];
5092 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5093 	}
5094 
5095 	error = sbuf_finish(buf);
5096 	if (error)
5097 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5098 	sbuf_delete(buf);
5099 
5100 	return (error);
5101 }
5102