xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision d3d381b2b194b4d24853e92eecef55f262688d1a)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static void	ixl_del_default_hw_filters(struct ixl_vsi *);
50 
51 /* Sysctls */
52 static int	ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
53 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
54 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
55 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
59 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
60 
61 /* Debug Sysctls */
62 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
63 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
66 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
67 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 #ifdef IXL_DEBUG
87 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
88 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
89 #endif
90 
91 #ifdef IXL_IW
92 extern int ixl_enable_iwarp;
93 extern int ixl_limit_iwarp_msix;
94 #endif
95 
96 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
97     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 
99 const char * const ixl_fc_string[6] = {
100 	"None",
101 	"Rx",
102 	"Tx",
103 	"Full",
104 	"Priority",
105 	"Default"
106 };
107 
108 static char *ixl_fec_string[3] = {
109        "CL108 RS-FEC",
110        "CL74 FC-FEC/BASE-R",
111        "None"
112 };
113 
114 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
115 
116 void
117 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
118 {
119 	va_list args;
120 
121 	if (!(mask & pf->dbg_mask))
122 		return;
123 
124 	/* Re-implement device_printf() */
125 	device_print_prettyname(pf->dev);
126 	va_start(args, fmt);
127 	vprintf(fmt, args);
128 	va_end(args);
129 }
130 
131 /*
132 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
133 */
134 void
135 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
136 {
137 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
138 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
139 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
140 
141 	sbuf_printf(buf,
142 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
143 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
144 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
145 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
146 	    IXL_NVM_VERSION_HI_SHIFT,
147 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
148 	    IXL_NVM_VERSION_LO_SHIFT,
149 	    hw->nvm.eetrack,
150 	    oem_ver, oem_build, oem_patch);
151 }
152 
153 void
154 ixl_print_nvm_version(struct ixl_pf *pf)
155 {
156 	struct i40e_hw *hw = &pf->hw;
157 	device_t dev = pf->dev;
158 	struct sbuf *sbuf;
159 
160 	sbuf = sbuf_new_auto();
161 	ixl_nvm_version_str(hw, sbuf);
162 	sbuf_finish(sbuf);
163 	device_printf(dev, "%s\n", sbuf_data(sbuf));
164 	sbuf_delete(sbuf);
165 }
166 
167 static void
168 ixl_configure_tx_itr(struct ixl_pf *pf)
169 {
170 	struct i40e_hw		*hw = &pf->hw;
171 	struct ixl_vsi		*vsi = &pf->vsi;
172 	struct ixl_tx_queue	*que = vsi->tx_queues;
173 
174 	vsi->tx_itr_setting = pf->tx_itr;
175 
176 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
177 		struct tx_ring	*txr = &que->txr;
178 
179 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
180 		    vsi->tx_itr_setting);
181 		txr->itr = vsi->tx_itr_setting;
182 		txr->latency = IXL_AVE_LATENCY;
183 	}
184 }
185 
186 static void
187 ixl_configure_rx_itr(struct ixl_pf *pf)
188 {
189 	struct i40e_hw		*hw = &pf->hw;
190 	struct ixl_vsi		*vsi = &pf->vsi;
191 	struct ixl_rx_queue	*que = vsi->rx_queues;
192 
193 	vsi->rx_itr_setting = pf->rx_itr;
194 
195 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
196 		struct rx_ring 	*rxr = &que->rxr;
197 
198 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
199 		    vsi->rx_itr_setting);
200 		rxr->itr = vsi->rx_itr_setting;
201 		rxr->latency = IXL_AVE_LATENCY;
202 	}
203 }
204 
205 /*
206  * Write PF ITR values to queue ITR registers.
207  */
208 void
209 ixl_configure_itr(struct ixl_pf *pf)
210 {
211 	ixl_configure_tx_itr(pf);
212 	ixl_configure_rx_itr(pf);
213 }
214 
215 /*********************************************************************
216  *
217  *  Get the hardware capabilities
218  *
219  **********************************************************************/
220 
221 int
222 ixl_get_hw_capabilities(struct ixl_pf *pf)
223 {
224 	struct i40e_aqc_list_capabilities_element_resp *buf;
225 	struct i40e_hw	*hw = &pf->hw;
226 	device_t 	dev = pf->dev;
227 	enum i40e_status_code status;
228 	int len, i2c_intfc_num;
229 	bool again = TRUE;
230 	u16 needed;
231 
232 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
233 retry:
234 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
235 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
236 		device_printf(dev, "Unable to allocate cap memory\n");
237                 return (ENOMEM);
238 	}
239 
240 	/* This populates the hw struct */
241         status = i40e_aq_discover_capabilities(hw, buf, len,
242 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
243 	free(buf, M_DEVBUF);
244 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
245 	    (again == TRUE)) {
246 		/* retry once with a larger buffer */
247 		again = FALSE;
248 		len = needed;
249 		goto retry;
250 	} else if (status != I40E_SUCCESS) {
251 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
252 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
253 		return (ENODEV);
254 	}
255 
256 	/*
257 	 * Some devices have both MDIO and I2C; since this isn't reported
258 	 * by the FW, check registers to see if an I2C interface exists.
259 	 */
260 	i2c_intfc_num = ixl_find_i2c_interface(pf);
261 	if (i2c_intfc_num != -1)
262 		pf->has_i2c = true;
263 
264 	/* Determine functions to use for driver I2C accesses */
265 	switch (pf->i2c_access_method) {
266 	case 0: {
267 		if (hw->mac.type == I40E_MAC_XL710 &&
268 		    hw->aq.api_maj_ver == 1 &&
269 		    hw->aq.api_min_ver >= 7) {
270 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
271 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
272 		} else {
273 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
274 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
275 		}
276 		break;
277 	}
278 	case 3:
279 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
280 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
281 		break;
282 	case 2:
283 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
284 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
285 		break;
286 	case 1:
287 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
288 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
289 		break;
290 	default:
291 		/* Should not happen */
292 		device_printf(dev, "Error setting I2C access functions\n");
293 		break;
294 	}
295 
296 	/* Print a subset of the capability information. */
297 	device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
298 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
299 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
300 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
301 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
302 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
303 	    "MDIO shared");
304 
305 	return (0);
306 }
307 
308 /* For the set_advertise sysctl */
309 void
310 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
311 {
312 	device_t dev = pf->dev;
313 	int err;
314 
315 	/* Make sure to initialize the device to the complete list of
316 	 * supported speeds on driver load, to ensure unloading and
317 	 * reloading the driver will restore this value.
318 	 */
319 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
320 	if (err) {
321 		/* Non-fatal error */
322 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
323 			      __func__, err);
324 		return;
325 	}
326 
327 	pf->advertised_speed =
328 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
329 }
330 
331 int
332 ixl_teardown_hw_structs(struct ixl_pf *pf)
333 {
334 	enum i40e_status_code status = 0;
335 	struct i40e_hw *hw = &pf->hw;
336 	device_t dev = pf->dev;
337 
338 	/* Shutdown LAN HMC */
339 	if (hw->hmc.hmc_obj) {
340 		status = i40e_shutdown_lan_hmc(hw);
341 		if (status) {
342 			device_printf(dev,
343 			    "init: LAN HMC shutdown failure; status %s\n",
344 			    i40e_stat_str(hw, status));
345 			goto err_out;
346 		}
347 	}
348 
349 	/* Shutdown admin queue */
350 	ixl_disable_intr0(hw);
351 	status = i40e_shutdown_adminq(hw);
352 	if (status)
353 		device_printf(dev,
354 		    "init: Admin Queue shutdown failure; status %s\n",
355 		    i40e_stat_str(hw, status));
356 
357 err_out:
358 	return (status);
359 }
360 
361 int
362 ixl_reset(struct ixl_pf *pf)
363 {
364 	struct i40e_hw *hw = &pf->hw;
365 	device_t dev = pf->dev;
366 	u32 reg;
367 	int error = 0;
368 
369 	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
370 	i40e_clear_hw(hw);
371 	error = i40e_pf_reset(hw);
372 	if (error) {
373 		device_printf(dev, "init: PF reset failure\n");
374 		error = EIO;
375 		goto err_out;
376 	}
377 
378 	error = i40e_init_adminq(hw);
379 	if (error) {
380 		device_printf(dev, "init: Admin queue init failure;"
381 		    " status code %d\n", error);
382 		error = EIO;
383 		goto err_out;
384 	}
385 
386 	i40e_clear_pxe_mode(hw);
387 
388 #if 0
389 	error = ixl_get_hw_capabilities(pf);
390 	if (error) {
391 		device_printf(dev, "init: Error retrieving HW capabilities;"
392 		    " status code %d\n", error);
393 		goto err_out;
394 	}
395 
396 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
397 	    hw->func_caps.num_rx_qp, 0, 0);
398 	if (error) {
399 		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
400 		    error);
401 		error = EIO;
402 		goto err_out;
403 	}
404 
405 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
406 	if (error) {
407 		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
408 		    error);
409 		error = EIO;
410 		goto err_out;
411 	}
412 
413 	// XXX: possible fix for panic, but our failure recovery is still broken
414 	error = ixl_switch_config(pf);
415 	if (error) {
416 		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
417 		     error);
418 		goto err_out;
419 	}
420 
421 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
422 	    NULL);
423         if (error) {
424 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
425 		    " aq_err %d\n", error, hw->aq.asq_last_status);
426 		error = EIO;
427 		goto err_out;
428 	}
429 
430 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
431 	if (error) {
432 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
433 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
434 		goto err_out;
435 	}
436 
437 	// XXX: (Rebuild VSIs?)
438 
439 	/* Firmware delay workaround */
440 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
441 	    (hw->aq.fw_maj_ver < 4)) {
442 		i40e_msec_delay(75);
443 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
444 		if (error) {
445 			device_printf(dev, "init: link restart failed, aq_err %d\n",
446 			    hw->aq.asq_last_status);
447 			goto err_out;
448 		}
449 	}
450 
451 
452 	/* Re-enable admin queue interrupt */
453 	if (pf->msix > 1) {
454 		ixl_configure_intr0_msix(pf);
455 		ixl_enable_intr0(hw);
456 	}
457 
458 err_out:
459 	return (error);
460 #endif
461 	// TODO: Fix second parameter
462 	ixl_rebuild_hw_structs_after_reset(pf, false);
463 
464 	/* The PF reset should have cleared any critical errors */
465 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
466 	atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
467 
468 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
469 	reg |= IXL_ICR0_CRIT_ERR_MASK;
470 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
471 
472  err_out:
473  	return (error);
474 }
475 
476 /*
477  * TODO: Make sure this properly handles admin queue / single rx queue intr
478  */
479 int
480 ixl_intr(void *arg)
481 {
482 	struct ixl_pf		*pf = arg;
483 	struct i40e_hw		*hw =  &pf->hw;
484 	struct ixl_vsi		*vsi = &pf->vsi;
485 	struct ixl_rx_queue	*que = vsi->rx_queues;
486         u32			icr0;
487 
488 	// pf->admin_irq++
489 	++que->irqs;
490 
491 // TODO: Check against proper field
492 #if 0
493 	/* Clear PBA at start of ISR if using legacy interrupts */
494 	if (pf->msix == 0)
495 		wr32(hw, I40E_PFINT_DYN_CTL0,
496 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
497 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
498 #endif
499 
500 	icr0 = rd32(hw, I40E_PFINT_ICR0);
501 
502 
503 #ifdef PCI_IOV
504 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
505 		iflib_iov_intr_deferred(vsi->ctx);
506 #endif
507 
508 	// TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
509 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
510 		iflib_admin_intr_deferred(vsi->ctx);
511 
512 	// TODO: Is intr0 enabled somewhere else?
513 	ixl_enable_intr0(hw);
514 
515 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
516 		return (FILTER_SCHEDULE_THREAD);
517 	else
518 		return (FILTER_HANDLED);
519 }
520 
521 
522 /*********************************************************************
523  *
524  *  MSIX VSI Interrupt Service routine
525  *
526  **********************************************************************/
527 int
528 ixl_msix_que(void *arg)
529 {
530 	struct ixl_rx_queue *que = arg;
531 
532 	++que->irqs;
533 
534 	ixl_set_queue_rx_itr(que);
535 	// ixl_set_queue_tx_itr(que);
536 
537 	return (FILTER_SCHEDULE_THREAD);
538 }
539 
540 
541 /*********************************************************************
542  *
543  *  MSIX Admin Queue Interrupt Service routine
544  *
545  **********************************************************************/
546 int
547 ixl_msix_adminq(void *arg)
548 {
549 	struct ixl_pf	*pf = arg;
550 	struct i40e_hw	*hw = &pf->hw;
551 	device_t	dev = pf->dev;
552 	u32		reg, mask, rstat_reg;
553 	bool		do_task = FALSE;
554 
555 	DDPRINTF(dev, "begin");
556 
557 	++pf->admin_irq;
558 
559 	reg = rd32(hw, I40E_PFINT_ICR0);
560 	// For masking off interrupt causes that need to be handled before
561 	// they can be re-enabled
562 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
563 
564 	/* Check on the cause */
565 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
566 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
567 		do_task = TRUE;
568 	}
569 
570 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
571 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
572 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
573 		do_task = TRUE;
574 	}
575 
576 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
577 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
578 		device_printf(dev, "Reset Requested!\n");
579 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
580 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
581 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
582 		device_printf(dev, "Reset type: ");
583 		switch (rstat_reg) {
584 		/* These others might be handled similarly to an EMPR reset */
585 		case I40E_RESET_CORER:
586 			printf("CORER\n");
587 			break;
588 		case I40E_RESET_GLOBR:
589 			printf("GLOBR\n");
590 			break;
591 		case I40E_RESET_EMPR:
592 			printf("EMPR\n");
593 			break;
594 		default:
595 			printf("POR\n");
596 			break;
597 		}
598 		/* overload admin queue task to check reset progress */
599 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
600 		do_task = TRUE;
601 	}
602 
603 	/*
604 	 * PE / PCI / ECC exceptions are all handled in the same way:
605 	 * mask out these three causes, then request a PF reset
606 	 *
607 	 * TODO: I think at least ECC error requires a GLOBR, not PFR
608 	 */
609 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
610  		device_printf(dev, "ECC Error detected!\n");
611 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
612 		device_printf(dev, "PCI Exception detected!\n");
613 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
614 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
615 	/* Checks against the conditions above */
616 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
617 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
618 		atomic_set_32(&pf->state,
619 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
620 		do_task = TRUE;
621 	}
622 
623 	// TODO: Linux driver never re-enables this interrupt once it has been detected
624 	// Then what is supposed to happen? A PF reset? Should it never happen?
625 	// TODO: Parse out this error into something human readable
626 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
627 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
628 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
629 			device_printf(dev, "HMC Error detected!\n");
630 			device_printf(dev, "INFO 0x%08x\n", reg);
631 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
632 			device_printf(dev, "DATA 0x%08x\n", reg);
633 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
634 		}
635 	}
636 
637 #ifdef PCI_IOV
638 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
639 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
640 		atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
641 		do_task = TRUE;
642 	}
643 #endif
644 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
645 
646 	if (do_task)
647 		return (FILTER_SCHEDULE_THREAD);
648 	else
649 		return (FILTER_HANDLED);
650 }
651 
652 /*********************************************************************
653  * 	Filter Routines
654  *
655  *	Routines for multicast and vlan filter management.
656  *
657  *********************************************************************/
658 void
659 ixl_add_multi(struct ixl_vsi *vsi)
660 {
661 	struct	ifmultiaddr	*ifma;
662 	struct ifnet		*ifp = vsi->ifp;
663 	struct i40e_hw		*hw = vsi->hw;
664 	int			mcnt = 0, flags;
665 
666 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
667 
668 	if_maddr_rlock(ifp);
669 	/*
670 	** First just get a count, to decide if we
671 	** we simply use multicast promiscuous.
672 	*/
673 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
674 		if (ifma->ifma_addr->sa_family != AF_LINK)
675 			continue;
676 		mcnt++;
677 	}
678 	if_maddr_runlock(ifp);
679 
680 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
681 		/* delete existing MC filters */
682 		ixl_del_hw_filters(vsi, mcnt);
683 		i40e_aq_set_vsi_multicast_promiscuous(hw,
684 		    vsi->seid, TRUE, NULL);
685 		return;
686 	}
687 
688 	mcnt = 0;
689 	if_maddr_rlock(ifp);
690 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
691 		if (ifma->ifma_addr->sa_family != AF_LINK)
692 			continue;
693 		ixl_add_mc_filter(vsi,
694 		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
695 		mcnt++;
696 	}
697 	if_maddr_runlock(ifp);
698 	if (mcnt > 0) {
699 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
700 		ixl_add_hw_filters(vsi, flags, mcnt);
701 	}
702 
703 	IOCTL_DEBUGOUT("ixl_add_multi: end");
704 }
705 
706 void
707 ixl_del_multi(struct ixl_vsi *vsi)
708 {
709 	struct ifnet		*ifp = vsi->ifp;
710 	struct ifmultiaddr	*ifma;
711 	struct ixl_mac_filter	*f;
712 	int			mcnt = 0;
713 	bool		match = FALSE;
714 
715 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
716 
717 	/* Search for removed multicast addresses */
718 	if_maddr_rlock(ifp);
719 	SLIST_FOREACH(f, &vsi->ftl, next) {
720 		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
721 			match = FALSE;
722 			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
723 				if (ifma->ifma_addr->sa_family != AF_LINK)
724 					continue;
725 				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
726 				if (cmp_etheraddr(f->macaddr, mc_addr)) {
727 					match = TRUE;
728 					break;
729 				}
730 			}
731 			if (match == FALSE) {
732 				f->flags |= IXL_FILTER_DEL;
733 				mcnt++;
734 			}
735 		}
736 	}
737 	if_maddr_runlock(ifp);
738 
739 	if (mcnt > 0)
740 		ixl_del_hw_filters(vsi, mcnt);
741 }
742 
743 void
744 ixl_link_up_msg(struct ixl_pf *pf)
745 {
746 	struct i40e_hw *hw = &pf->hw;
747 	struct ifnet *ifp = pf->vsi.ifp;
748 	char *req_fec_string, *neg_fec_string;
749 	u8 fec_abilities;
750 
751 	fec_abilities = hw->phy.link_info.req_fec_info;
752 	/* If both RS and KR are requested, only show RS */
753 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
754 		req_fec_string = ixl_fec_string[0];
755 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
756 		req_fec_string = ixl_fec_string[1];
757 	else
758 		req_fec_string = ixl_fec_string[2];
759 
760 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
761 		neg_fec_string = ixl_fec_string[0];
762 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
763 		neg_fec_string = ixl_fec_string[1];
764 	else
765 		neg_fec_string = ixl_fec_string[2];
766 
767 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
768 	    ifp->if_xname,
769 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
770 	    req_fec_string, neg_fec_string,
771 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
772 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
773 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
774 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
775 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
776 		ixl_fc_string[1] : ixl_fc_string[0]);
777 }
778 
779 /*
780  * Configure admin queue/misc interrupt cause registers in hardware.
781  */
782 void
783 ixl_configure_intr0_msix(struct ixl_pf *pf)
784 {
785 	struct i40e_hw *hw = &pf->hw;
786 	u32 reg;
787 
788 	/* First set up the adminq - vector 0 */
789 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
790 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
791 
792 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
793 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
794 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
795 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
796 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
797 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
798 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
799 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
800 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
801 
802 	/*
803 	 * 0x7FF is the end of the queue list.
804 	 * This means we won't use MSI-X vector 0 for a queue interrupt
805 	 * in MSIX mode.
806 	 */
807 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
808 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
809 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
810 
811 	wr32(hw, I40E_PFINT_DYN_CTL0,
812 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
813 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
814 
815 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
816 }
817 
818 /*
819  * Configure queue interrupt cause registers in hardware.
820  *
821  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
822  */
823 void
824 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
825 {
826 	struct i40e_hw *hw = &pf->hw;
827 	struct ixl_vsi *vsi = &pf->vsi;
828 	u32		reg;
829 	u16		vector = 1;
830 
831 	// TODO: See if max is really necessary
832 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
833 		/* Make sure interrupt is disabled */
834 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
835 		/* Set linked list head to point to corresponding RX queue
836 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
837 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
838 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
839 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
840 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
841 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
842 
843 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
844 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
845 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
846 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
847 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
848 		wr32(hw, I40E_QINT_RQCTL(i), reg);
849 
850 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
851 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
852 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
853 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
854 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
855 		wr32(hw, I40E_QINT_TQCTL(i), reg);
856 	}
857 }
858 
859 /*
860  * Configure for single interrupt vector operation
861  */
862 void
863 ixl_configure_legacy(struct ixl_pf *pf)
864 {
865 	struct i40e_hw	*hw = &pf->hw;
866 	struct ixl_vsi	*vsi = &pf->vsi;
867 	u32 reg;
868 
869 // TODO: Fix
870 #if 0
871 	/* Configure ITR */
872 	vsi->tx_itr_setting = pf->tx_itr;
873 	wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
874 	    vsi->tx_itr_setting);
875 	txr->itr = vsi->tx_itr_setting;
876 
877 	vsi->rx_itr_setting = pf->rx_itr;
878 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
879 	    vsi->rx_itr_setting);
880 	rxr->itr = vsi->rx_itr_setting;
881 	/* XXX: Assuming only 1 queue in single interrupt mode */
882 #endif
883 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
884 
885 	/* Setup "other" causes */
886 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
887 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
888 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
889 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
890 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
891 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
892 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
893 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
894 	    ;
895 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
896 
897 	/* No ITR for non-queue interrupts */
898 	wr32(hw, I40E_PFINT_STAT_CTL0,
899 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
900 
901 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
902 	wr32(hw, I40E_PFINT_LNKLST0, 0);
903 
904 	/* Associate the queue pair to the vector and enable the q int */
905 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
906 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
907 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
908 	wr32(hw, I40E_QINT_RQCTL(0), reg);
909 
910 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
911 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
912 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
913 	wr32(hw, I40E_QINT_TQCTL(0), reg);
914 }
915 
916 void
917 ixl_free_pci_resources(struct ixl_pf *pf)
918 {
919 	struct ixl_vsi		*vsi = &pf->vsi;
920 	device_t		dev = iflib_get_dev(vsi->ctx);
921 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
922 
923 	/* We may get here before stations are setup */
924 	if (rx_que == NULL)
925 		goto early;
926 
927 	/*
928 	**  Release all msix VSI resources:
929 	*/
930 	iflib_irq_free(vsi->ctx, &vsi->irq);
931 
932 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
933 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
934 early:
935 	if (pf->pci_mem != NULL)
936 		bus_release_resource(dev, SYS_RES_MEMORY,
937 		    PCIR_BAR(0), pf->pci_mem);
938 }
939 
940 void
941 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
942 {
943 	/* Display supported media types */
944 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
945 		ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
946 
947 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
948 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
949 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
950 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
951 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
952 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
953 
954 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
955 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
956 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
957 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
958 
959 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
960 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
961 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
962 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
963 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
964 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
965 
966 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
967 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
968 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
969 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
970 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
971 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
972 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
973 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
974 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
975 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
976 
977 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
978 		ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
979 
980 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
981 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
982 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
983 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
984 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
985 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
986 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
987 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
988 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
989 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
990 		ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
991 
992 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
993 		ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
994 
995 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
996 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
997 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
998 		ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
999 
1000 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1001 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1002 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1003 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1004 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1005 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1006 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1007 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1008 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1009 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1010 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1011 		ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1012 }
1013 
1014 /*********************************************************************
1015  *
1016  *  Setup networking device structure and register an interface.
1017  *
1018  **********************************************************************/
1019 int
1020 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1021 {
1022 	struct ixl_vsi *vsi = &pf->vsi;
1023 	if_ctx_t ctx = vsi->ctx;
1024 	struct i40e_hw *hw = &pf->hw;
1025 	struct ifnet *ifp = iflib_get_ifp(ctx);
1026 	struct i40e_aq_get_phy_abilities_resp abilities;
1027 	enum i40e_status_code aq_error = 0;
1028 
1029 	INIT_DBG_DEV(dev, "begin");
1030 
1031 	/* TODO: Remove VLAN_ENCAP_LEN? */
1032 	vsi->shared->isc_max_frame_size =
1033 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1034 	    + ETHER_VLAN_ENCAP_LEN;
1035 
1036 	aq_error = i40e_aq_get_phy_capabilities(hw,
1037 	    FALSE, TRUE, &abilities, NULL);
1038 	/* May need delay to detect fiber correctly */
1039 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1040 		/* TODO: Maybe just retry this in a task... */
1041 		i40e_msec_delay(200);
1042 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1043 		    TRUE, &abilities, NULL);
1044 	}
1045 	if (aq_error) {
1046 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
1047 			device_printf(dev, "Unknown PHY type detected!\n");
1048 		else
1049 			device_printf(dev,
1050 			    "Error getting supported media types, err %d,"
1051 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1052 	} else {
1053 		pf->supported_speeds = abilities.link_speed;
1054 #if __FreeBSD_version >= 1100000
1055 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1056 #else
1057 		if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1058 #endif
1059 
1060 		ixl_add_ifmedia(vsi, hw->phy.phy_types);
1061 	}
1062 
1063 	/* Use autoselect media by default */
1064 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1065 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1066 
1067 	return (0);
1068 }
1069 
1070 /*
1071 ** Run when the Admin Queue gets a link state change interrupt.
1072 */
1073 void
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1075 {
1076 	struct i40e_hw *hw = &pf->hw;
1077 	device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 	struct i40e_aqc_get_link_status *status =
1079 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1080 
1081 	/* Request link status from adapter */
1082 	hw->phy.get_link_info = TRUE;
1083 	i40e_get_link_status(hw, &pf->link_up);
1084 
1085 	/* Print out message if an unqualified module is found */
1086 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 	    (pf->advertised_speed) &&
1088 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 	    (!(status->link_info & I40E_AQ_LINK_UP)))
1090 		device_printf(dev, "Link failed because "
1091 		    "an unqualified module was detected!\n");
1092 
1093 	/* OS link info is updated elsewhere */
1094 }
1095 
1096 /*********************************************************************
1097  *
1098  *  Get Firmware Switch configuration
1099  *	- this will need to be more robust when more complex
1100  *	  switch configurations are enabled.
1101  *
1102  **********************************************************************/
1103 int
1104 ixl_switch_config(struct ixl_pf *pf)
1105 {
1106 	struct i40e_hw	*hw = &pf->hw;
1107 	struct ixl_vsi	*vsi = &pf->vsi;
1108 	device_t 	dev = iflib_get_dev(vsi->ctx);
1109 	struct i40e_aqc_get_switch_config_resp *sw_config;
1110 	u8	aq_buf[I40E_AQ_LARGE_BUF];
1111 	int	ret;
1112 	u16	next = 0;
1113 
1114 	memset(&aq_buf, 0, sizeof(aq_buf));
1115 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 	ret = i40e_aq_get_switch_config(hw, sw_config,
1117 	    sizeof(aq_buf), &next, NULL);
1118 	if (ret) {
1119 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1121 		return (ret);
1122 	}
1123 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1124 		device_printf(dev,
1125 		    "Switch config: header reported: %d in structure, %d total\n",
1126 		    sw_config->header.num_reported, sw_config->header.num_total);
1127 		for (int i = 0; i < sw_config->header.num_reported; i++) {
1128 			device_printf(dev,
1129 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 			    sw_config->element[i].element_type,
1131 			    sw_config->element[i].seid,
1132 			    sw_config->element[i].uplink_seid,
1133 			    sw_config->element[i].downlink_seid);
1134 		}
1135 	}
1136 	/* Simplified due to a single VSI */
1137 	vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 	vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 	vsi->seid = sw_config->element[0].seid;
1140 	return (ret);
1141 }
1142 
1143 /*********************************************************************
1144  *
1145  *  Initialize the VSI:  this handles contexts, which means things
1146  *  			 like the number of descriptors, buffer size,
1147  *			 plus we init the rings thru this function.
1148  *
1149  **********************************************************************/
1150 int
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1152 {
1153 	struct ixl_pf *pf = vsi->back;
1154 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
1155 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
1156 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1157 	device_t		dev = iflib_get_dev(vsi->ctx);
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct i40e_vsi_context	ctxt;
1160 	int 			tc_queues;
1161 	int			err = 0;
1162 
1163 	memset(&ctxt, 0, sizeof(ctxt));
1164 	ctxt.seid = vsi->seid;
1165 	if (pf->veb_seid != 0)
1166 		ctxt.uplink_seid = pf->veb_seid;
1167 	ctxt.pf_num = hw->pf_id;
1168 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1169 	if (err) {
1170 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1172 		return (err);
1173 	}
1174 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 	    ctxt.uplink_seid, ctxt.vsi_number,
1179 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1182 	/*
1183 	** Set the queue and traffic class bits
1184 	**  - when multiple traffic classes are supported
1185 	**    this will need to be more robust.
1186 	*/
1187 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 	ctxt.info.queue_mapping[0] = 0;
1191 	/*
1192 	 * This VSI will only use traffic class 0; start traffic class 0's
1193 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 	 * the driver may not use all of them).
1195 	 */
1196 	tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1201 
1202 	/* Set VLAN receive stripping mode */
1203 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1207 	else
1208 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1209 
1210 #ifdef IXL_IW
1211 	/* Set TCP Enable for iWARP capable VSI */
1212 	if (ixl_enable_iwarp && pf->iw_enabled) {
1213 		ctxt.info.valid_sections |=
1214 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1216 	}
1217 #endif
1218 	/* Save VSI number and info for use later */
1219 	vsi->vsi_num = ctxt.vsi_number;
1220 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1221 
1222 	/* Reset VSI statistics */
1223 	ixl_vsi_reset_stats(vsi);
1224 	vsi->hw_filters_add = 0;
1225 	vsi->hw_filters_del = 0;
1226 
1227 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1228 
1229 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1230 	if (err) {
1231 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 		    " aq_error %d\n", err, hw->aq.asq_last_status);
1233 		return (err);
1234 	}
1235 
1236 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 		struct tx_ring		*txr = &tx_que->txr;
1238 		struct i40e_hmc_obj_txq tctx;
1239 		u32			txctl;
1240 
1241 		/* Setup the HMC TX Context  */
1242 		bzero(&tctx, sizeof(tctx));
1243 		tctx.new_context = 1;
1244 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 		tctx.qlen = scctx->isc_ntxd[0];
1246 		tctx.fc_ena = 0;	/* Disable FCoE */
1247 		/*
1248 		 * This value needs to pulled from the VSI that this queue
1249 		 * is assigned to. Index into array is traffic class.
1250 		 */
1251 		tctx.rdylist = vsi->info.qs_handle[0];
1252 		/*
1253 		 * Set these to enable Head Writeback
1254 		 * - Address is last entry in TX ring (reserved for HWB index)
1255 		 * Leave these as 0 for Descriptor Writeback
1256 		 */
1257 		if (vsi->enable_head_writeback) {
1258 			tctx.head_wb_ena = 1;
1259 			tctx.head_wb_addr = txr->tx_paddr +
1260 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1261 		} else {
1262 			tctx.head_wb_ena = 0;
1263 			tctx.head_wb_addr = 0;
1264 		}
1265 		tctx.rdylist_act = 0;
1266 		err = i40e_clear_lan_tx_queue_context(hw, i);
1267 		if (err) {
1268 			device_printf(dev, "Unable to clear TX context\n");
1269 			break;
1270 		}
1271 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1272 		if (err) {
1273 			device_printf(dev, "Unable to set TX context\n");
1274 			break;
1275 		}
1276 		/* Associate the ring with this PF */
1277 		txctl = I40E_QTX_CTL_PF_QUEUE;
1278 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 		    I40E_QTX_CTL_PF_INDX_MASK);
1280 		wr32(hw, I40E_QTX_CTL(i), txctl);
1281 		ixl_flush(hw);
1282 
1283 		/* Do ring (re)init */
1284 		ixl_init_tx_ring(vsi, tx_que);
1285 	}
1286 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 		struct rx_ring 		*rxr = &rx_que->rxr;
1288 		struct i40e_hmc_obj_rxq rctx;
1289 
1290 		/* Next setup the HMC RX Context  */
1291 		if (scctx->isc_max_frame_size <= MCLBYTES)
1292 			rxr->mbuf_sz = MCLBYTES;
1293 		else
1294 			rxr->mbuf_sz = MJUMPAGESIZE;
1295 
1296 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1297 
1298 		/* Set up an RX context for the HMC */
1299 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1300 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1301 		/* ignore header split for now */
1302 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1303 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1304 		    scctx->isc_max_frame_size : max_rxmax;
1305 		rctx.dtype = 0;
1306 		rctx.dsize = 1;		/* do 32byte descriptors */
1307 		rctx.hsplit_0 = 0;	/* no header split */
1308 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1309 		rctx.qlen = scctx->isc_nrxd[0];
1310 		rctx.tphrdesc_ena = 1;
1311 		rctx.tphwdesc_ena = 1;
1312 		rctx.tphdata_ena = 0;	/* Header Split related */
1313 		rctx.tphhead_ena = 0;	/* Header Split related */
1314 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
1315 		rctx.crcstrip = 1;
1316 		rctx.l2tsel = 1;
1317 		rctx.showiv = 1;	/* Strip inner VLAN header */
1318 		rctx.fc_ena = 0;	/* Disable FCoE */
1319 		rctx.prefena = 1;	/* Prefetch descriptors */
1320 
1321 		err = i40e_clear_lan_rx_queue_context(hw, i);
1322 		if (err) {
1323 			device_printf(dev,
1324 			    "Unable to clear RX context %d\n", i);
1325 			break;
1326 		}
1327 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1328 		if (err) {
1329 			device_printf(dev, "Unable to set RX context %d\n", i);
1330 			break;
1331 		}
1332 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1333 	}
1334 	return (err);
1335 }
1336 
1337 void
1338 ixl_free_mac_filters(struct ixl_vsi *vsi)
1339 {
1340 	struct ixl_mac_filter *f;
1341 
1342 	while (!SLIST_EMPTY(&vsi->ftl)) {
1343 		f = SLIST_FIRST(&vsi->ftl);
1344 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
1345 		free(f, M_DEVBUF);
1346 	}
1347 }
1348 
1349 /*
1350 ** Provide a update to the queue RX
1351 ** interrupt moderation value.
1352 */
1353 void
1354 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1355 {
1356 	struct ixl_vsi	*vsi = que->vsi;
1357 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1358 	struct i40e_hw	*hw = vsi->hw;
1359 	struct rx_ring	*rxr = &que->rxr;
1360 	u16		rx_itr;
1361 	u16		rx_latency = 0;
1362 	int		rx_bytes;
1363 
1364 	/* Idle, do nothing */
1365 	if (rxr->bytes == 0)
1366 		return;
1367 
1368 	if (pf->dynamic_rx_itr) {
1369 		rx_bytes = rxr->bytes/rxr->itr;
1370 		rx_itr = rxr->itr;
1371 
1372 		/* Adjust latency range */
1373 		switch (rxr->latency) {
1374 		case IXL_LOW_LATENCY:
1375 			if (rx_bytes > 10) {
1376 				rx_latency = IXL_AVE_LATENCY;
1377 				rx_itr = IXL_ITR_20K;
1378 			}
1379 			break;
1380 		case IXL_AVE_LATENCY:
1381 			if (rx_bytes > 20) {
1382 				rx_latency = IXL_BULK_LATENCY;
1383 				rx_itr = IXL_ITR_8K;
1384 			} else if (rx_bytes <= 10) {
1385 				rx_latency = IXL_LOW_LATENCY;
1386 				rx_itr = IXL_ITR_100K;
1387 			}
1388 			break;
1389 		case IXL_BULK_LATENCY:
1390 			if (rx_bytes <= 20) {
1391 				rx_latency = IXL_AVE_LATENCY;
1392 				rx_itr = IXL_ITR_20K;
1393 			}
1394 			break;
1395        		 }
1396 
1397 		rxr->latency = rx_latency;
1398 
1399 		if (rx_itr != rxr->itr) {
1400 			/* do an exponential smoothing */
1401 			rx_itr = (10 * rx_itr * rxr->itr) /
1402 			    ((9 * rx_itr) + rxr->itr);
1403 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
1404 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1405 			    rxr->me), rxr->itr);
1406 		}
1407 	} else { /* We may have have toggled to non-dynamic */
1408 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1409 			vsi->rx_itr_setting = pf->rx_itr;
1410 		/* Update the hardware if needed */
1411 		if (rxr->itr != vsi->rx_itr_setting) {
1412 			rxr->itr = vsi->rx_itr_setting;
1413 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1414 			    rxr->me), rxr->itr);
1415 		}
1416 	}
1417 	rxr->bytes = 0;
1418 	rxr->packets = 0;
1419 }
1420 
1421 
1422 /*
1423 ** Provide a update to the queue TX
1424 ** interrupt moderation value.
1425 */
1426 void
1427 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1428 {
1429 	struct ixl_vsi	*vsi = que->vsi;
1430 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1431 	struct i40e_hw	*hw = vsi->hw;
1432 	struct tx_ring	*txr = &que->txr;
1433 	u16		tx_itr;
1434 	u16		tx_latency = 0;
1435 	int		tx_bytes;
1436 
1437 
1438 	/* Idle, do nothing */
1439 	if (txr->bytes == 0)
1440 		return;
1441 
1442 	if (pf->dynamic_tx_itr) {
1443 		tx_bytes = txr->bytes/txr->itr;
1444 		tx_itr = txr->itr;
1445 
1446 		switch (txr->latency) {
1447 		case IXL_LOW_LATENCY:
1448 			if (tx_bytes > 10) {
1449 				tx_latency = IXL_AVE_LATENCY;
1450 				tx_itr = IXL_ITR_20K;
1451 			}
1452 			break;
1453 		case IXL_AVE_LATENCY:
1454 			if (tx_bytes > 20) {
1455 				tx_latency = IXL_BULK_LATENCY;
1456 				tx_itr = IXL_ITR_8K;
1457 			} else if (tx_bytes <= 10) {
1458 				tx_latency = IXL_LOW_LATENCY;
1459 				tx_itr = IXL_ITR_100K;
1460 			}
1461 			break;
1462 		case IXL_BULK_LATENCY:
1463 			if (tx_bytes <= 20) {
1464 				tx_latency = IXL_AVE_LATENCY;
1465 				tx_itr = IXL_ITR_20K;
1466 			}
1467 			break;
1468 		}
1469 
1470 		txr->latency = tx_latency;
1471 
1472 		if (tx_itr != txr->itr) {
1473        	         /* do an exponential smoothing */
1474 			tx_itr = (10 * tx_itr * txr->itr) /
1475 			    ((9 * tx_itr) + txr->itr);
1476 			txr->itr = min(tx_itr, IXL_MAX_ITR);
1477 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1478 			    txr->me), txr->itr);
1479 		}
1480 
1481 	} else { /* We may have have toggled to non-dynamic */
1482 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1483 			vsi->tx_itr_setting = pf->tx_itr;
1484 		/* Update the hardware if needed */
1485 		if (txr->itr != vsi->tx_itr_setting) {
1486 			txr->itr = vsi->tx_itr_setting;
1487 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1488 			    txr->me), txr->itr);
1489 		}
1490 	}
1491 	txr->bytes = 0;
1492 	txr->packets = 0;
1493 	return;
1494 }
1495 
1496 void
1497 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
1498     struct sysctl_ctx_list *ctx, const char *sysctl_name)
1499 {
1500 	struct sysctl_oid *tree;
1501 	struct sysctl_oid_list *child;
1502 	struct sysctl_oid_list *vsi_list;
1503 
1504 	tree = device_get_sysctl_tree(pf->dev);
1505 	child = SYSCTL_CHILDREN(tree);
1506 	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
1507 				   CTLFLAG_RD, NULL, "VSI Number");
1508 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
1509 
1510 	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
1511 }
1512 
1513 #ifdef IXL_DEBUG
1514 /**
1515  * ixl_sysctl_qtx_tail_handler
1516  * Retrieves I40E_QTX_TAIL value from hardware
1517  * for a sysctl.
1518  */
1519 int
1520 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1521 {
1522 	struct ixl_tx_queue *tx_que;
1523 	int error;
1524 	u32 val;
1525 
1526 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1527 	if (!tx_que) return 0;
1528 
1529 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1530 	error = sysctl_handle_int(oidp, &val, 0, req);
1531 	if (error || !req->newptr)
1532 		return error;
1533 	return (0);
1534 }
1535 
1536 /**
1537  * ixl_sysctl_qrx_tail_handler
1538  * Retrieves I40E_QRX_TAIL value from hardware
1539  * for a sysctl.
1540  */
1541 int
1542 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1543 {
1544 	struct ixl_rx_queue *rx_que;
1545 	int error;
1546 	u32 val;
1547 
1548 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1549 	if (!rx_que) return 0;
1550 
1551 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1552 	error = sysctl_handle_int(oidp, &val, 0, req);
1553 	if (error || !req->newptr)
1554 		return error;
1555 	return (0);
1556 }
1557 #endif
1558 
1559 /*
1560  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1561  * Writes to the ITR registers immediately.
1562  */
1563 static int
1564 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1565 {
1566 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1567 	device_t dev = pf->dev;
1568 	int error = 0;
1569 	int requested_tx_itr;
1570 
1571 	requested_tx_itr = pf->tx_itr;
1572 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1573 	if ((error) || (req->newptr == NULL))
1574 		return (error);
1575 	if (pf->dynamic_tx_itr) {
1576 		device_printf(dev,
1577 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
1578 		    return (EINVAL);
1579 	}
1580 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1581 		device_printf(dev,
1582 		    "Invalid TX itr value; value must be between 0 and %d\n",
1583 		        IXL_MAX_ITR);
1584 		return (EINVAL);
1585 	}
1586 
1587 	pf->tx_itr = requested_tx_itr;
1588 	ixl_configure_tx_itr(pf);
1589 
1590 	return (error);
1591 }
1592 
1593 /*
1594  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1595  * Writes to the ITR registers immediately.
1596  */
1597 static int
1598 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1599 {
1600 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1601 	device_t dev = pf->dev;
1602 	int error = 0;
1603 	int requested_rx_itr;
1604 
1605 	requested_rx_itr = pf->rx_itr;
1606 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1607 	if ((error) || (req->newptr == NULL))
1608 		return (error);
1609 	if (pf->dynamic_rx_itr) {
1610 		device_printf(dev,
1611 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1612 		    return (EINVAL);
1613 	}
1614 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1615 		device_printf(dev,
1616 		    "Invalid RX itr value; value must be between 0 and %d\n",
1617 		        IXL_MAX_ITR);
1618 		return (EINVAL);
1619 	}
1620 
1621 	pf->rx_itr = requested_rx_itr;
1622 	ixl_configure_rx_itr(pf);
1623 
1624 	return (error);
1625 }
1626 
1627 void
1628 ixl_add_hw_stats(struct ixl_pf *pf)
1629 {
1630 	struct ixl_vsi *vsi = &pf->vsi;
1631 	device_t dev = iflib_get_dev(vsi->ctx);
1632 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
1633 
1634 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1635 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1636 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1637 	struct sysctl_oid_list *vsi_list, *queue_list;
1638 	struct sysctl_oid *queue_node;
1639 	char queue_namebuf[32];
1640 
1641 	struct ixl_rx_queue *rx_que;
1642 	struct ixl_tx_queue *tx_que;
1643 	struct tx_ring *txr;
1644 	struct rx_ring *rxr;
1645 
1646 	/* Driver statistics */
1647 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
1648 			CTLFLAG_RD, &pf->watchdog_events,
1649 			"Watchdog timeouts");
1650 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1651 			CTLFLAG_RD, &pf->admin_irq,
1652 			"Admin Queue IRQ Handled");
1653 
1654 	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
1655 	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
1656 
1657 	/* Queue statistics */
1658 	for (int q = 0; q < vsi->num_rx_queues; q++) {
1659 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
1660 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1661 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
1662 		queue_list = SYSCTL_CHILDREN(queue_node);
1663 
1664 		rx_que = &(vsi->rx_queues[q]);
1665 		rxr = &(rx_que->rxr);
1666 
1667 
1668 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1669 				CTLFLAG_RD, &(rx_que->irqs),
1670 				"irqs on this queue (both Tx and Rx)");
1671 
1672 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1673 				CTLFLAG_RD, &(rxr->rx_packets),
1674 				"Queue Packets Received");
1675 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1676 				CTLFLAG_RD, &(rxr->rx_bytes),
1677 				"Queue Bytes Received");
1678 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
1679 				CTLFLAG_RD, &(rxr->desc_errs),
1680 				"Queue Rx Descriptor Errors");
1681 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1682 				CTLFLAG_RD, &(rxr->itr), 0,
1683 				"Queue Rx ITR Interval");
1684 #ifdef IXL_DEBUG
1685 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
1686 				CTLTYPE_UINT | CTLFLAG_RD, rx_que,
1687 				sizeof(struct ixl_rx_queue),
1688 				ixl_sysctl_qrx_tail_handler, "IU",
1689 				"Queue Receive Descriptor Tail");
1690 #endif
1691 	}
1692 	for (int q = 0; q < vsi->num_tx_queues; q++) {
1693 		snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
1694 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1695 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
1696 		queue_list = SYSCTL_CHILDREN(queue_node);
1697 
1698 		tx_que = &(vsi->tx_queues[q]);
1699 		txr = &(tx_que->txr);
1700 
1701 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
1702 				CTLFLAG_RD, &(tx_que->tso),
1703 				"TSO");
1704 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
1705 				CTLFLAG_RD, &(txr->mss_too_small),
1706 				"TSO sends with an MSS less than 64");
1707 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1708 				CTLFLAG_RD, &(txr->tx_packets),
1709 				"Queue Packets Transmitted");
1710 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1711 				CTLFLAG_RD, &(txr->tx_bytes),
1712 				"Queue Bytes Transmitted");
1713 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1714 				CTLFLAG_RD, &(txr->itr), 0,
1715 				"Queue Tx ITR Interval");
1716 #ifdef IXL_DEBUG
1717 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
1718 				CTLTYPE_UINT | CTLFLAG_RD, tx_que,
1719 				sizeof(struct ixl_tx_queue),
1720 				ixl_sysctl_qtx_tail_handler, "IU",
1721 				"Queue Transmit Descriptor Tail");
1722 #endif
1723 	}
1724 
1725 	/* MAC stats */
1726 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1727 }
1728 
1729 void
1730 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
1731 	struct sysctl_oid_list *child,
1732 	struct i40e_eth_stats *eth_stats)
1733 {
1734 	struct ixl_sysctl_info ctls[] =
1735 	{
1736 		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
1737 		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
1738 			"Unicast Packets Received"},
1739 		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
1740 			"Multicast Packets Received"},
1741 		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
1742 			"Broadcast Packets Received"},
1743 		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
1744 		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
1745 		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
1746 		{&eth_stats->tx_multicast, "mcast_pkts_txd",
1747 			"Multicast Packets Transmitted"},
1748 		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
1749 			"Broadcast Packets Transmitted"},
1750 		// end
1751 		{0,0,0}
1752 	};
1753 
1754 	struct ixl_sysctl_info *entry = ctls;
1755 	while (entry->stat != 0)
1756 	{
1757 		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
1758 				CTLFLAG_RD, entry->stat,
1759 				entry->description);
1760 		entry++;
1761 	}
1762 }
1763 
1764 void
1765 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1766 	struct sysctl_oid_list *child,
1767 	struct i40e_hw_port_stats *stats)
1768 {
1769 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1770 				    CTLFLAG_RD, NULL, "Mac Statistics");
1771 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1772 
1773 	struct i40e_eth_stats *eth_stats = &stats->eth;
1774 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1775 
1776 	struct ixl_sysctl_info ctls[] =
1777 	{
1778 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1779 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1780 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1781 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1782 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1783 		/* Packet Reception Stats */
1784 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1785 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1786 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1787 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1788 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1789 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1790 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1791 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1792 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1793 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1794 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1795 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1796 		/* Packet Transmission Stats */
1797 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1798 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1799 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1800 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1801 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1802 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1803 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1804 		/* Flow control */
1805 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1806 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1807 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1808 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1809 		/* End */
1810 		{0,0,0}
1811 	};
1812 
1813 	struct ixl_sysctl_info *entry = ctls;
1814 	while (entry->stat != 0)
1815 	{
1816 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1817 				CTLFLAG_RD, entry->stat,
1818 				entry->description);
1819 		entry++;
1820 	}
1821 }
1822 
1823 void
1824 ixl_set_rss_key(struct ixl_pf *pf)
1825 {
1826 	struct i40e_hw *hw = &pf->hw;
1827 	struct ixl_vsi *vsi = &pf->vsi;
1828 	device_t	dev = pf->dev;
1829 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1830 	enum i40e_status_code status;
1831 
1832 #ifdef RSS
1833         /* Fetch the configured RSS key */
1834         rss_getkey((uint8_t *) &rss_seed);
1835 #else
1836 	ixl_get_default_rss_key(rss_seed);
1837 #endif
1838 	/* Fill out hash function seed */
1839 	if (hw->mac.type == I40E_MAC_X722) {
1840 		struct i40e_aqc_get_set_rss_key_data key_data;
1841 		bcopy(rss_seed, &key_data, 52);
1842 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1843 		if (status)
1844 			device_printf(dev,
1845 			    "i40e_aq_set_rss_key status %s, error %s\n",
1846 			    i40e_stat_str(hw, status),
1847 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1848 	} else {
1849 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1850 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1851 	}
1852 }
1853 
1854 /*
1855  * Configure enabled PCTYPES for RSS.
1856  */
1857 void
1858 ixl_set_rss_pctypes(struct ixl_pf *pf)
1859 {
1860 	struct i40e_hw *hw = &pf->hw;
1861 	u64		set_hena = 0, hena;
1862 
1863 #ifdef RSS
1864 	u32		rss_hash_config;
1865 
1866 	rss_hash_config = rss_gethashconfig();
1867 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1868                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1869 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1870                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1871 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1872                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1873 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1874                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1875 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1876 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1877 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1878                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1879         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1880                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1881 #else
1882 	if (hw->mac.type == I40E_MAC_X722)
1883 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1884 	else
1885 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1886 #endif
1887 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1888 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1889 	hena |= set_hena;
1890 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1891 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1892 
1893 }
1894 
1895 void
1896 ixl_set_rss_hlut(struct ixl_pf *pf)
1897 {
1898 	struct i40e_hw	*hw = &pf->hw;
1899 	struct ixl_vsi *vsi = &pf->vsi;
1900 	device_t	dev = iflib_get_dev(vsi->ctx);
1901 	int		i, que_id;
1902 	int		lut_entry_width;
1903 	u32		lut = 0;
1904 	enum i40e_status_code status;
1905 
1906 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1907 
1908 	/* Populate the LUT with max no. of queues in round robin fashion */
1909 	u8 hlut_buf[512];
1910 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1911 #ifdef RSS
1912 		/*
1913 		 * Fetch the RSS bucket id for the given indirection entry.
1914 		 * Cap it at the number of configured buckets (which is
1915 		 * num_queues.)
1916 		 */
1917 		que_id = rss_get_indirection_to_bucket(i);
1918 		que_id = que_id % vsi->num_rx_queues;
1919 #else
1920 		que_id = i % vsi->num_rx_queues;
1921 #endif
1922 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
1923 		hlut_buf[i] = lut;
1924 	}
1925 
1926 	if (hw->mac.type == I40E_MAC_X722) {
1927 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1928 		if (status)
1929 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1930 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1931 	} else {
1932 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1933 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1934 		ixl_flush(hw);
1935 	}
1936 }
1937 
1938 /*
1939 ** Setup the PF's RSS parameters.
1940 */
1941 void
1942 ixl_config_rss(struct ixl_pf *pf)
1943 {
1944 	ixl_set_rss_key(pf);
1945 	ixl_set_rss_pctypes(pf);
1946 	ixl_set_rss_hlut(pf);
1947 }
1948 
1949 /*
1950 ** This routine updates vlan filters, called by init
1951 ** it scans the filter table and then updates the hw
1952 ** after a soft reset.
1953 */
1954 void
1955 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1956 {
1957 	struct ixl_mac_filter	*f;
1958 	int			cnt = 0, flags;
1959 
1960 	if (vsi->num_vlans == 0)
1961 		return;
1962 	/*
1963 	** Scan the filter list for vlan entries,
1964 	** mark them for addition and then call
1965 	** for the AQ update.
1966 	*/
1967 	SLIST_FOREACH(f, &vsi->ftl, next) {
1968 		if (f->flags & IXL_FILTER_VLAN) {
1969 			f->flags |=
1970 			    (IXL_FILTER_ADD |
1971 			    IXL_FILTER_USED);
1972 			cnt++;
1973 		}
1974 	}
1975 	if (cnt == 0) {
1976 		printf("setup vlan: no filters found!\n");
1977 		return;
1978 	}
1979 	flags = IXL_FILTER_VLAN;
1980 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1981 	ixl_add_hw_filters(vsi, flags, cnt);
1982 }
1983 
1984 /*
1985  * In some firmware versions there is default MAC/VLAN filter
1986  * configured which interferes with filters managed by driver.
1987  * Make sure it's removed.
1988  */
1989 static void
1990 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1991 {
1992 	struct i40e_aqc_remove_macvlan_element_data e;
1993 
1994 	bzero(&e, sizeof(e));
1995 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1996 	e.vlan_tag = 0;
1997 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1998 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1999 
2000 	bzero(&e, sizeof(e));
2001 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
2002 	e.vlan_tag = 0;
2003 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2004 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2005 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
2006 }
2007 
2008 /*
2009 ** Initialize filter list and add filters that the hardware
2010 ** needs to know about.
2011 **
2012 ** Requires VSI's filter list & seid to be set before calling.
2013 */
2014 void
2015 ixl_init_filters(struct ixl_vsi *vsi)
2016 {
2017 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2018 
2019 	/* Initialize mac filter list for VSI */
2020 	SLIST_INIT(&vsi->ftl);
2021 
2022 	/* Receive broadcast Ethernet frames */
2023 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
2024 
2025 	ixl_del_default_hw_filters(vsi);
2026 
2027 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
2028 	/*
2029 	 * Prevent Tx flow control frames from being sent out by
2030 	 * non-firmware transmitters.
2031 	 * This affects every VSI in the PF.
2032 	 */
2033 	if (pf->enable_tx_fc_filter)
2034 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
2035 }
2036 
2037 /*
2038 ** This routine adds mulicast filters
2039 */
2040 void
2041 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
2042 {
2043 	struct ixl_mac_filter *f;
2044 
2045 	/* Does one already exist */
2046 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
2047 	if (f != NULL)
2048 		return;
2049 
2050 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
2051 	if (f != NULL)
2052 		f->flags |= IXL_FILTER_MC;
2053 	else
2054 		printf("WARNING: no filter available!!\n");
2055 
2056 	return;
2057 }
2058 
2059 void
2060 ixl_reconfigure_filters(struct ixl_vsi *vsi)
2061 {
2062 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
2063 }
2064 
2065 /*
2066 ** This routine adds macvlan filters
2067 */
2068 void
2069 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2070 {
2071 	struct ixl_mac_filter	*f, *tmp;
2072 	struct ixl_pf		*pf;
2073 	device_t		dev;
2074 
2075 	DEBUGOUT("ixl_add_filter: begin");
2076 
2077 	pf = vsi->back;
2078 	dev = pf->dev;
2079 
2080 	/* Does one already exist */
2081 	f = ixl_find_filter(vsi, macaddr, vlan);
2082 	if (f != NULL)
2083 		return;
2084 	/*
2085 	** Is this the first vlan being registered, if so we
2086 	** need to remove the ANY filter that indicates we are
2087 	** not in a vlan, and replace that with a 0 filter.
2088 	*/
2089 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
2090 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
2091 		if (tmp != NULL) {
2092 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
2093 			ixl_add_filter(vsi, macaddr, 0);
2094 		}
2095 	}
2096 
2097 	f = ixl_new_filter(vsi, macaddr, vlan);
2098 	if (f == NULL) {
2099 		device_printf(dev, "WARNING: no filter available!!\n");
2100 		return;
2101 	}
2102 	if (f->vlan != IXL_VLAN_ANY)
2103 		f->flags |= IXL_FILTER_VLAN;
2104 	else
2105 		vsi->num_macs++;
2106 
2107 	ixl_add_hw_filters(vsi, f->flags, 1);
2108 	return;
2109 }
2110 
2111 void
2112 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2113 {
2114 	struct ixl_mac_filter *f;
2115 
2116 	f = ixl_find_filter(vsi, macaddr, vlan);
2117 	if (f == NULL)
2118 		return;
2119 
2120 	f->flags |= IXL_FILTER_DEL;
2121 	ixl_del_hw_filters(vsi, 1);
2122 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2123 		vsi->num_macs--;
2124 
2125 	/* Check if this is the last vlan removal */
2126 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2127 		/* Switch back to a non-vlan filter */
2128 		ixl_del_filter(vsi, macaddr, 0);
2129 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2130 	}
2131 	return;
2132 }
2133 
2134 /*
2135 ** Find the filter with both matching mac addr and vlan id
2136 */
2137 struct ixl_mac_filter *
2138 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2139 {
2140 	struct ixl_mac_filter	*f;
2141 
2142 	SLIST_FOREACH(f, &vsi->ftl, next) {
2143 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2144 		    && (f->vlan == vlan)) {
2145 			return (f);
2146 		}
2147 	}
2148 
2149 	return (NULL);
2150 }
2151 
2152 /*
2153 ** This routine takes additions to the vsi filter
2154 ** table and creates an Admin Queue call to create
2155 ** the filters in the hardware.
2156 */
2157 void
2158 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2159 {
2160 	struct i40e_aqc_add_macvlan_element_data *a, *b;
2161 	struct ixl_mac_filter	*f;
2162 	struct ixl_pf		*pf;
2163 	struct i40e_hw		*hw;
2164 	device_t		dev;
2165 	enum i40e_status_code	status;
2166 	int			j = 0;
2167 
2168 	MPASS(cnt > 0);
2169 
2170 	pf = vsi->back;
2171 	dev = iflib_get_dev(vsi->ctx);
2172 	hw = &pf->hw;
2173 
2174 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2175 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2176 	if (a == NULL) {
2177 		device_printf(dev, "add_hw_filters failed to get memory\n");
2178 		return;
2179 	}
2180 
2181 	/*
2182 	** Scan the filter list, each time we find one
2183 	** we add it to the admin queue array and turn off
2184 	** the add bit.
2185 	*/
2186 	SLIST_FOREACH(f, &vsi->ftl, next) {
2187 		if ((f->flags & flags) == flags) {
2188 			b = &a[j]; // a pox on fvl long names :)
2189 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2190 			if (f->vlan == IXL_VLAN_ANY) {
2191 				b->vlan_tag = 0;
2192 				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2193 			} else {
2194 				b->vlan_tag = f->vlan;
2195 				b->flags = 0;
2196 			}
2197 			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2198 			f->flags &= ~IXL_FILTER_ADD;
2199 			j++;
2200 		}
2201 		if (j == cnt)
2202 			break;
2203 	}
2204 	if (j > 0) {
2205 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2206 		if (status)
2207 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
2208 			    "error %s\n", i40e_stat_str(hw, status),
2209 			    i40e_aq_str(hw, hw->aq.asq_last_status));
2210 		else
2211 			vsi->hw_filters_add += j;
2212 	}
2213 	free(a, M_DEVBUF);
2214 	return;
2215 }
2216 
2217 /*
2218 ** This routine takes removals in the vsi filter
2219 ** table and creates an Admin Queue call to delete
2220 ** the filters in the hardware.
2221 */
2222 void
2223 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2224 {
2225 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
2226 	struct ixl_pf		*pf;
2227 	struct i40e_hw		*hw;
2228 	device_t		dev;
2229 	struct ixl_mac_filter	*f, *f_temp;
2230 	enum i40e_status_code	status;
2231 	int			j = 0;
2232 
2233 	pf = vsi->back;
2234 	hw = &pf->hw;
2235 	dev = iflib_get_dev(vsi->ctx);
2236 
2237 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2238 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2239 	if (d == NULL) {
2240 		device_printf(dev, "%s: failed to get memory\n", __func__);
2241 		return;
2242 	}
2243 
2244 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2245 		if (f->flags & IXL_FILTER_DEL) {
2246 			e = &d[j]; // a pox on fvl long names :)
2247 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2248 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2249 			if (f->vlan == IXL_VLAN_ANY) {
2250 				e->vlan_tag = 0;
2251 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2252 			} else {
2253 				e->vlan_tag = f->vlan;
2254 			}
2255 			/* delete entry from vsi list */
2256 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2257 			free(f, M_DEVBUF);
2258 			j++;
2259 		}
2260 		if (j == cnt)
2261 			break;
2262 	}
2263 	if (j > 0) {
2264 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2265 		if (status) {
2266 			int sc = 0;
2267 			for (int i = 0; i < j; i++)
2268 				sc += (!d[i].error_code);
2269 			vsi->hw_filters_del += sc;
2270 			device_printf(dev,
2271 			    "Failed to remove %d/%d filters, error %s\n",
2272 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2273 		} else
2274 			vsi->hw_filters_del += j;
2275 	}
2276 	free(d, M_DEVBUF);
2277 	return;
2278 }
2279 
2280 int
2281 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2282 {
2283 	struct i40e_hw	*hw = &pf->hw;
2284 	int		error = 0;
2285 	u32		reg;
2286 	u16		pf_qidx;
2287 
2288 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2289 
2290 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2291 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2292 	    pf_qidx, vsi_qidx);
2293 
2294 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2295 
2296 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2297 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2298 	    I40E_QTX_ENA_QENA_STAT_MASK;
2299 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2300 	/* Verify the enable took */
2301 	for (int j = 0; j < 10; j++) {
2302 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2303 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2304 			break;
2305 		i40e_usec_delay(10);
2306 	}
2307 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2308 		device_printf(pf->dev, "TX queue %d still disabled!\n",
2309 		    pf_qidx);
2310 		error = ETIMEDOUT;
2311 	}
2312 
2313 	return (error);
2314 }
2315 
2316 int
2317 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2318 {
2319 	struct i40e_hw	*hw = &pf->hw;
2320 	int		error = 0;
2321 	u32		reg;
2322 	u16		pf_qidx;
2323 
2324 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2325 
2326 	ixl_dbg(pf, IXL_DBG_EN_DIS,
2327 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2328 	    pf_qidx, vsi_qidx);
2329 
2330 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2331 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2332 	    I40E_QRX_ENA_QENA_STAT_MASK;
2333 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2334 	/* Verify the enable took */
2335 	for (int j = 0; j < 10; j++) {
2336 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2337 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2338 			break;
2339 		i40e_usec_delay(10);
2340 	}
2341 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2342 		device_printf(pf->dev, "RX queue %d still disabled!\n",
2343 		    pf_qidx);
2344 		error = ETIMEDOUT;
2345 	}
2346 
2347 	return (error);
2348 }
2349 
2350 int
2351 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2352 {
2353 	int error = 0;
2354 
2355 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2356 	/* Called function already prints error message */
2357 	if (error)
2358 		return (error);
2359 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2360 	return (error);
2361 }
2362 
2363 /* For PF VSI only */
2364 int
2365 ixl_enable_rings(struct ixl_vsi *vsi)
2366 {
2367 	struct ixl_pf	*pf = vsi->back;
2368 	int		error = 0;
2369 
2370 	for (int i = 0; i < vsi->num_tx_queues; i++)
2371 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2372 
2373 	for (int i = 0; i < vsi->num_rx_queues; i++)
2374 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2375 
2376 	return (error);
2377 }
2378 
2379 /*
2380  * Returns error on first ring that is detected hung.
2381  */
2382 int
2383 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2384 {
2385 	struct i40e_hw	*hw = &pf->hw;
2386 	int		error = 0;
2387 	u32		reg;
2388 	u16		pf_qidx;
2389 
2390 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2391 
2392 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2393 	i40e_usec_delay(500);
2394 
2395 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2396 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2397 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2398 	/* Verify the disable took */
2399 	for (int j = 0; j < 10; j++) {
2400 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2401 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2402 			break;
2403 		i40e_msec_delay(10);
2404 	}
2405 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2406 		device_printf(pf->dev, "TX queue %d still enabled!\n",
2407 		    pf_qidx);
2408 		error = ETIMEDOUT;
2409 	}
2410 
2411 	return (error);
2412 }
2413 
2414 /*
2415  * Returns error on first ring that is detected hung.
2416  */
2417 int
2418 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2419 {
2420 	struct i40e_hw	*hw = &pf->hw;
2421 	int		error = 0;
2422 	u32		reg;
2423 	u16		pf_qidx;
2424 
2425 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2426 
2427 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2428 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2429 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2430 	/* Verify the disable took */
2431 	for (int j = 0; j < 10; j++) {
2432 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2433 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2434 			break;
2435 		i40e_msec_delay(10);
2436 	}
2437 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2438 		device_printf(pf->dev, "RX queue %d still enabled!\n",
2439 		    pf_qidx);
2440 		error = ETIMEDOUT;
2441 	}
2442 
2443 	return (error);
2444 }
2445 
2446 int
2447 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2448 {
2449 	int error = 0;
2450 
2451 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2452 	/* Called function already prints error message */
2453 	if (error)
2454 		return (error);
2455 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2456 	return (error);
2457 }
2458 
2459 /* For PF VSI only */
2460 int
2461 ixl_disable_rings(struct ixl_vsi *vsi)
2462 {
2463 	struct ixl_pf	*pf = vsi->back;
2464 	int		error = 0;
2465 
2466 	for (int i = 0; i < vsi->num_tx_queues; i++)
2467 		error = ixl_disable_tx_ring(pf, &pf->qtag, i);
2468 
2469 	for (int i = 0; i < vsi->num_rx_queues; i++)
2470 		error = ixl_disable_rx_ring(pf, &pf->qtag, i);
2471 
2472 	return (error);
2473 }
2474 
2475 /**
2476  * ixl_handle_mdd_event
2477  *
2478  * Called from interrupt handler to identify possibly malicious vfs
2479  * (But also detects events from the PF, as well)
2480  **/
2481 void
2482 ixl_handle_mdd_event(struct ixl_pf *pf)
2483 {
2484 	struct i40e_hw *hw = &pf->hw;
2485 	device_t dev = pf->dev;
2486 	struct ixl_vf *vf;
2487 	bool mdd_detected = false;
2488 	bool pf_mdd_detected = false;
2489 	bool vf_mdd_detected = false;
2490 	u32 reg;
2491 
2492 	/* find what triggered the MDD event */
2493 	reg = rd32(hw, I40E_GL_MDET_TX);
2494 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2495 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2496 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
2497 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2498 				I40E_GL_MDET_TX_EVENT_SHIFT;
2499 		u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2500 				I40E_GL_MDET_TX_QUEUE_SHIFT;
2501 		device_printf(dev,
2502 		    "Malicious Driver Detection event %d"
2503 		    " on TX queue %d, pf number %d\n",
2504 		    event, queue, pf_num);
2505 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2506 		mdd_detected = true;
2507 	}
2508 	reg = rd32(hw, I40E_GL_MDET_RX);
2509 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2510 		u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2511 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
2512 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2513 				I40E_GL_MDET_RX_EVENT_SHIFT;
2514 		u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2515 				I40E_GL_MDET_RX_QUEUE_SHIFT;
2516 		device_printf(dev,
2517 		    "Malicious Driver Detection event %d"
2518 		    " on RX queue %d, pf number %d\n",
2519 		    event, queue, pf_num);
2520 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2521 		mdd_detected = true;
2522 	}
2523 
2524 	if (mdd_detected) {
2525 		reg = rd32(hw, I40E_PF_MDET_TX);
2526 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2527 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2528 			device_printf(dev,
2529 			    "MDD TX event is for this function!\n");
2530 			pf_mdd_detected = true;
2531 		}
2532 		reg = rd32(hw, I40E_PF_MDET_RX);
2533 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2534 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2535 			device_printf(dev,
2536 			    "MDD RX event is for this function!\n");
2537 			pf_mdd_detected = true;
2538 		}
2539 	}
2540 
2541 	if (pf_mdd_detected) {
2542 		atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
2543 		goto end;
2544 	}
2545 
2546 	// Handle VF detection
2547 	for (int i = 0; i < pf->num_vfs && mdd_detected; i++) {
2548 		vf = &(pf->vfs[i]);
2549 		reg = rd32(hw, I40E_VP_MDET_TX(i));
2550 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2551 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2552 			vf->num_mdd_events++;
2553 			device_printf(dev, "MDD TX event is for VF %d\n", i);
2554 			vf_mdd_detected = true;
2555 		}
2556 
2557 		reg = rd32(hw, I40E_VP_MDET_RX(i));
2558 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2559 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2560 			vf->num_mdd_events++;
2561 			device_printf(dev, "MDD RX event is for VF %d\n", i);
2562 			vf_mdd_detected = true;
2563 		}
2564 
2565 		// TODO: Disable VF if there are too many MDD events from it
2566 	}
2567 
2568 	if (vf_mdd_detected)
2569 		atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
2570 
2571 end:
2572 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2573 
2574 	/* re-enable mdd interrupt cause */
2575 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2576 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2577 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2578 	ixl_flush(hw);
2579 }
2580 
2581 /* This only enables HW interrupts for the RX queues */
2582 void
2583 ixl_enable_intr(struct ixl_vsi *vsi)
2584 {
2585 	struct i40e_hw		*hw = vsi->hw;
2586 	struct ixl_rx_queue	*que = vsi->rx_queues;
2587 
2588 	// TODO: Check iflib interrupt mode instead?
2589 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2590 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2591 			ixl_enable_queue(hw, que->rxr.me);
2592 	} else
2593 		ixl_enable_intr0(hw);
2594 }
2595 
2596 void
2597 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2598 {
2599 	struct i40e_hw		*hw = vsi->hw;
2600 	struct ixl_rx_queue	*que = vsi->rx_queues;
2601 
2602 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2603 		ixl_disable_queue(hw, que->rxr.me);
2604 }
2605 
2606 void
2607 ixl_enable_intr0(struct i40e_hw *hw)
2608 {
2609 	u32		reg;
2610 
2611 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2612 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2613 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2614 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2615 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2616 }
2617 
2618 void
2619 ixl_disable_intr0(struct i40e_hw *hw)
2620 {
2621 	u32		reg;
2622 
2623 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2624 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2625 	ixl_flush(hw);
2626 }
2627 
2628 void
2629 ixl_enable_queue(struct i40e_hw *hw, int id)
2630 {
2631 	u32		reg;
2632 
2633 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2634 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2635 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2636 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2637 }
2638 
2639 void
2640 ixl_disable_queue(struct i40e_hw *hw, int id)
2641 {
2642 	u32		reg;
2643 
2644 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2645 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2646 }
2647 
2648 void
2649 ixl_update_stats_counters(struct ixl_pf *pf)
2650 {
2651 	struct i40e_hw	*hw = &pf->hw;
2652 	struct ixl_vsi	*vsi = &pf->vsi;
2653 	struct ixl_vf	*vf;
2654 
2655 	struct i40e_hw_port_stats *nsd = &pf->stats;
2656 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2657 
2658 	/* Update hw stats */
2659 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2660 			   pf->stat_offsets_loaded,
2661 			   &osd->crc_errors, &nsd->crc_errors);
2662 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2663 			   pf->stat_offsets_loaded,
2664 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2665 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2666 			   I40E_GLPRT_GORCL(hw->port),
2667 			   pf->stat_offsets_loaded,
2668 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2669 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2670 			   I40E_GLPRT_GOTCL(hw->port),
2671 			   pf->stat_offsets_loaded,
2672 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2673 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2674 			   pf->stat_offsets_loaded,
2675 			   &osd->eth.rx_discards,
2676 			   &nsd->eth.rx_discards);
2677 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2678 			   I40E_GLPRT_UPRCL(hw->port),
2679 			   pf->stat_offsets_loaded,
2680 			   &osd->eth.rx_unicast,
2681 			   &nsd->eth.rx_unicast);
2682 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2683 			   I40E_GLPRT_UPTCL(hw->port),
2684 			   pf->stat_offsets_loaded,
2685 			   &osd->eth.tx_unicast,
2686 			   &nsd->eth.tx_unicast);
2687 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2688 			   I40E_GLPRT_MPRCL(hw->port),
2689 			   pf->stat_offsets_loaded,
2690 			   &osd->eth.rx_multicast,
2691 			   &nsd->eth.rx_multicast);
2692 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2693 			   I40E_GLPRT_MPTCL(hw->port),
2694 			   pf->stat_offsets_loaded,
2695 			   &osd->eth.tx_multicast,
2696 			   &nsd->eth.tx_multicast);
2697 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2698 			   I40E_GLPRT_BPRCL(hw->port),
2699 			   pf->stat_offsets_loaded,
2700 			   &osd->eth.rx_broadcast,
2701 			   &nsd->eth.rx_broadcast);
2702 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2703 			   I40E_GLPRT_BPTCL(hw->port),
2704 			   pf->stat_offsets_loaded,
2705 			   &osd->eth.tx_broadcast,
2706 			   &nsd->eth.tx_broadcast);
2707 
2708 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2709 			   pf->stat_offsets_loaded,
2710 			   &osd->tx_dropped_link_down,
2711 			   &nsd->tx_dropped_link_down);
2712 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2713 			   pf->stat_offsets_loaded,
2714 			   &osd->mac_local_faults,
2715 			   &nsd->mac_local_faults);
2716 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2717 			   pf->stat_offsets_loaded,
2718 			   &osd->mac_remote_faults,
2719 			   &nsd->mac_remote_faults);
2720 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2721 			   pf->stat_offsets_loaded,
2722 			   &osd->rx_length_errors,
2723 			   &nsd->rx_length_errors);
2724 
2725 	/* Flow control (LFC) stats */
2726 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2727 			   pf->stat_offsets_loaded,
2728 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2729 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2730 			   pf->stat_offsets_loaded,
2731 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2732 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2733 			   pf->stat_offsets_loaded,
2734 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2735 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2736 			   pf->stat_offsets_loaded,
2737 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2738 
2739 	/* Packet size stats rx */
2740 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2741 			   I40E_GLPRT_PRC64L(hw->port),
2742 			   pf->stat_offsets_loaded,
2743 			   &osd->rx_size_64, &nsd->rx_size_64);
2744 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2745 			   I40E_GLPRT_PRC127L(hw->port),
2746 			   pf->stat_offsets_loaded,
2747 			   &osd->rx_size_127, &nsd->rx_size_127);
2748 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2749 			   I40E_GLPRT_PRC255L(hw->port),
2750 			   pf->stat_offsets_loaded,
2751 			   &osd->rx_size_255, &nsd->rx_size_255);
2752 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2753 			   I40E_GLPRT_PRC511L(hw->port),
2754 			   pf->stat_offsets_loaded,
2755 			   &osd->rx_size_511, &nsd->rx_size_511);
2756 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2757 			   I40E_GLPRT_PRC1023L(hw->port),
2758 			   pf->stat_offsets_loaded,
2759 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2760 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2761 			   I40E_GLPRT_PRC1522L(hw->port),
2762 			   pf->stat_offsets_loaded,
2763 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2764 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2765 			   I40E_GLPRT_PRC9522L(hw->port),
2766 			   pf->stat_offsets_loaded,
2767 			   &osd->rx_size_big, &nsd->rx_size_big);
2768 
2769 	/* Packet size stats tx */
2770 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2771 			   I40E_GLPRT_PTC64L(hw->port),
2772 			   pf->stat_offsets_loaded,
2773 			   &osd->tx_size_64, &nsd->tx_size_64);
2774 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2775 			   I40E_GLPRT_PTC127L(hw->port),
2776 			   pf->stat_offsets_loaded,
2777 			   &osd->tx_size_127, &nsd->tx_size_127);
2778 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2779 			   I40E_GLPRT_PTC255L(hw->port),
2780 			   pf->stat_offsets_loaded,
2781 			   &osd->tx_size_255, &nsd->tx_size_255);
2782 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2783 			   I40E_GLPRT_PTC511L(hw->port),
2784 			   pf->stat_offsets_loaded,
2785 			   &osd->tx_size_511, &nsd->tx_size_511);
2786 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2787 			   I40E_GLPRT_PTC1023L(hw->port),
2788 			   pf->stat_offsets_loaded,
2789 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2790 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2791 			   I40E_GLPRT_PTC1522L(hw->port),
2792 			   pf->stat_offsets_loaded,
2793 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2794 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2795 			   I40E_GLPRT_PTC9522L(hw->port),
2796 			   pf->stat_offsets_loaded,
2797 			   &osd->tx_size_big, &nsd->tx_size_big);
2798 
2799 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2800 			   pf->stat_offsets_loaded,
2801 			   &osd->rx_undersize, &nsd->rx_undersize);
2802 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2803 			   pf->stat_offsets_loaded,
2804 			   &osd->rx_fragments, &nsd->rx_fragments);
2805 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2806 			   pf->stat_offsets_loaded,
2807 			   &osd->rx_oversize, &nsd->rx_oversize);
2808 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2809 			   pf->stat_offsets_loaded,
2810 			   &osd->rx_jabber, &nsd->rx_jabber);
2811 	pf->stat_offsets_loaded = true;
2812 	/* End hw stats */
2813 
2814 	/* Update vsi stats */
2815 	ixl_update_vsi_stats(vsi);
2816 
2817 	for (int i = 0; i < pf->num_vfs; i++) {
2818 		vf = &pf->vfs[i];
2819 		if (vf->vf_flags & VF_FLAG_ENABLED)
2820 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2821 	}
2822 }
2823 
2824 int
2825 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2826 {
2827 	struct i40e_hw *hw = &pf->hw;
2828 	device_t dev = pf->dev;
2829 	int error = 0;
2830 
2831 	error = i40e_shutdown_lan_hmc(hw);
2832 	if (error)
2833 		device_printf(dev,
2834 		    "Shutdown LAN HMC failed with code %d\n", error);
2835 
2836 	ixl_disable_intr0(hw);
2837 
2838 	error = i40e_shutdown_adminq(hw);
2839 	if (error)
2840 		device_printf(dev,
2841 		    "Shutdown Admin queue failed with code %d\n", error);
2842 
2843 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2844 	return (error);
2845 }
2846 
2847 int
2848 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
2849 {
2850 	struct i40e_hw *hw = &pf->hw;
2851 	struct ixl_vsi *vsi = &pf->vsi;
2852 	device_t dev = pf->dev;
2853 	int error = 0;
2854 
2855 	device_printf(dev, "Rebuilding driver state...\n");
2856 
2857 	error = i40e_pf_reset(hw);
2858 	if (error) {
2859 		device_printf(dev, "PF reset failure %s\n",
2860 		    i40e_stat_str(hw, error));
2861 		goto ixl_rebuild_hw_structs_after_reset_err;
2862 	}
2863 
2864 	/* Setup */
2865 	error = i40e_init_adminq(hw);
2866 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2867 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2868 		    error);
2869 		goto ixl_rebuild_hw_structs_after_reset_err;
2870 	}
2871 
2872 	i40e_clear_pxe_mode(hw);
2873 
2874 	error = ixl_get_hw_capabilities(pf);
2875 	if (error) {
2876 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2877 		goto ixl_rebuild_hw_structs_after_reset_err;
2878 	}
2879 
2880 	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2881 	    hw->func_caps.num_rx_qp, 0, 0);
2882 	if (error) {
2883 		device_printf(dev, "init_lan_hmc failed: %d\n", error);
2884 		goto ixl_rebuild_hw_structs_after_reset_err;
2885 	}
2886 
2887 	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2888 	if (error) {
2889 		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2890 		goto ixl_rebuild_hw_structs_after_reset_err;
2891 	}
2892 
2893 	/* reserve a contiguous allocation for the PF's VSI */
2894 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2895 	if (error) {
2896 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2897 		    error);
2898 		/* TODO: error handling */
2899 	}
2900 
2901 	error = ixl_switch_config(pf);
2902 	if (error) {
2903 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2904 		     error);
2905 		goto ixl_rebuild_hw_structs_after_reset_err;
2906 	}
2907 
2908 	/* Remove default filters reinstalled by FW on reset */
2909 	ixl_del_default_hw_filters(vsi);
2910 
2911 	/* Determine link state */
2912 	if (ixl_attach_get_link_status(pf)) {
2913 		error = EINVAL;
2914 		/* TODO: error handling */
2915 	}
2916 
2917 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2918 	ixl_get_fw_lldp_status(pf);
2919 
2920 	/* Keep admin queue interrupts active while driver is loaded */
2921 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2922  		ixl_configure_intr0_msix(pf);
2923  		ixl_enable_intr0(hw);
2924 	}
2925 
2926 	device_printf(dev, "Rebuilding driver state done.\n");
2927 	return (0);
2928 
2929 ixl_rebuild_hw_structs_after_reset_err:
2930 	device_printf(dev, "Reload the driver to recover\n");
2931 	return (error);
2932 }
2933 
2934 void
2935 ixl_handle_empr_reset(struct ixl_pf *pf)
2936 {
2937 	struct ixl_vsi	*vsi = &pf->vsi;
2938 	struct i40e_hw	*hw = &pf->hw;
2939 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2940 	int count = 0;
2941 	u32 reg;
2942 
2943 	ixl_prepare_for_reset(pf, is_up);
2944 
2945 	/* Typically finishes within 3-4 seconds */
2946 	while (count++ < 100) {
2947 		reg = rd32(hw, I40E_GLGEN_RSTAT)
2948 			& I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2949 		if (reg)
2950 			i40e_msec_delay(100);
2951 		else
2952 			break;
2953 	}
2954 	ixl_dbg(pf, IXL_DBG_INFO,
2955 			"Reset wait count: %d\n", count);
2956 
2957 	ixl_rebuild_hw_structs_after_reset(pf, is_up);
2958 
2959 	atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2960 }
2961 
2962 /**
2963  * Update VSI-specific ethernet statistics counters.
2964  **/
2965 void
2966 ixl_update_eth_stats(struct ixl_vsi *vsi)
2967 {
2968 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2969 	struct i40e_hw *hw = &pf->hw;
2970 	struct i40e_eth_stats *es;
2971 	struct i40e_eth_stats *oes;
2972 	struct i40e_hw_port_stats *nsd;
2973 	u16 stat_idx = vsi->info.stat_counter_idx;
2974 
2975 	es = &vsi->eth_stats;
2976 	oes = &vsi->eth_stats_offsets;
2977 	nsd = &pf->stats;
2978 
2979 	/* Gather up the stats that the hw collects */
2980 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2981 			   vsi->stat_offsets_loaded,
2982 			   &oes->tx_errors, &es->tx_errors);
2983 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2984 			   vsi->stat_offsets_loaded,
2985 			   &oes->rx_discards, &es->rx_discards);
2986 
2987 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2988 			   I40E_GLV_GORCL(stat_idx),
2989 			   vsi->stat_offsets_loaded,
2990 			   &oes->rx_bytes, &es->rx_bytes);
2991 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2992 			   I40E_GLV_UPRCL(stat_idx),
2993 			   vsi->stat_offsets_loaded,
2994 			   &oes->rx_unicast, &es->rx_unicast);
2995 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2996 			   I40E_GLV_MPRCL(stat_idx),
2997 			   vsi->stat_offsets_loaded,
2998 			   &oes->rx_multicast, &es->rx_multicast);
2999 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
3000 			   I40E_GLV_BPRCL(stat_idx),
3001 			   vsi->stat_offsets_loaded,
3002 			   &oes->rx_broadcast, &es->rx_broadcast);
3003 
3004 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
3005 			   I40E_GLV_GOTCL(stat_idx),
3006 			   vsi->stat_offsets_loaded,
3007 			   &oes->tx_bytes, &es->tx_bytes);
3008 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
3009 			   I40E_GLV_UPTCL(stat_idx),
3010 			   vsi->stat_offsets_loaded,
3011 			   &oes->tx_unicast, &es->tx_unicast);
3012 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
3013 			   I40E_GLV_MPTCL(stat_idx),
3014 			   vsi->stat_offsets_loaded,
3015 			   &oes->tx_multicast, &es->tx_multicast);
3016 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
3017 			   I40E_GLV_BPTCL(stat_idx),
3018 			   vsi->stat_offsets_loaded,
3019 			   &oes->tx_broadcast, &es->tx_broadcast);
3020 	vsi->stat_offsets_loaded = true;
3021 }
3022 
3023 void
3024 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3025 {
3026 	struct ixl_pf		*pf;
3027 	struct ifnet		*ifp;
3028 	struct i40e_eth_stats	*es;
3029 	u64			tx_discards;
3030 
3031 	struct i40e_hw_port_stats *nsd;
3032 
3033 	pf = vsi->back;
3034 	ifp = vsi->ifp;
3035 	es = &vsi->eth_stats;
3036 	nsd = &pf->stats;
3037 
3038 	ixl_update_eth_stats(vsi);
3039 
3040 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3041 
3042 	/* Update ifnet stats */
3043 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
3044 	                   es->rx_multicast +
3045 			   es->rx_broadcast);
3046 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
3047 	                   es->tx_multicast +
3048 			   es->tx_broadcast);
3049 	IXL_SET_IBYTES(vsi, es->rx_bytes);
3050 	IXL_SET_OBYTES(vsi, es->tx_bytes);
3051 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
3052 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
3053 
3054 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3055 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3056 	    nsd->rx_jabber);
3057 	IXL_SET_OERRORS(vsi, es->tx_errors);
3058 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3059 	IXL_SET_OQDROPS(vsi, tx_discards);
3060 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3061 	IXL_SET_COLLISIONS(vsi, 0);
3062 }
3063 
3064 /**
3065  * Reset all of the stats for the given pf
3066  **/
3067 void
3068 ixl_pf_reset_stats(struct ixl_pf *pf)
3069 {
3070 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3071 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3072 	pf->stat_offsets_loaded = false;
3073 }
3074 
3075 /**
3076  * Resets all stats of the given vsi
3077  **/
3078 void
3079 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3080 {
3081 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3082 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3083 	vsi->stat_offsets_loaded = false;
3084 }
3085 
3086 /**
3087  * Read and update a 48 bit stat from the hw
3088  *
3089  * Since the device stats are not reset at PFReset, they likely will not
3090  * be zeroed when the driver starts.  We'll save the first values read
3091  * and use them as offsets to be subtracted from the raw values in order
3092  * to report stats that count from zero.
3093  **/
3094 void
3095 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3096 	bool offset_loaded, u64 *offset, u64 *stat)
3097 {
3098 	u64 new_data;
3099 
3100 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3101 	new_data = rd64(hw, loreg);
3102 #else
3103 	/*
3104 	 * Use two rd32's instead of one rd64; FreeBSD versions before
3105 	 * 10 don't support 64-bit bus reads/writes.
3106 	 */
3107 	new_data = rd32(hw, loreg);
3108 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3109 #endif
3110 
3111 	if (!offset_loaded)
3112 		*offset = new_data;
3113 	if (new_data >= *offset)
3114 		*stat = new_data - *offset;
3115 	else
3116 		*stat = (new_data + ((u64)1 << 48)) - *offset;
3117 	*stat &= 0xFFFFFFFFFFFFULL;
3118 }
3119 
3120 /**
3121  * Read and update a 32 bit stat from the hw
3122  **/
3123 void
3124 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3125 	bool offset_loaded, u64 *offset, u64 *stat)
3126 {
3127 	u32 new_data;
3128 
3129 	new_data = rd32(hw, reg);
3130 	if (!offset_loaded)
3131 		*offset = new_data;
3132 	if (new_data >= *offset)
3133 		*stat = (u32)(new_data - *offset);
3134 	else
3135 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3136 }
3137 
3138 void
3139 ixl_add_device_sysctls(struct ixl_pf *pf)
3140 {
3141 	device_t dev = pf->dev;
3142 	struct i40e_hw *hw = &pf->hw;
3143 
3144 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3145 	struct sysctl_oid_list *ctx_list =
3146 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3147 
3148 	struct sysctl_oid *debug_node;
3149 	struct sysctl_oid_list *debug_list;
3150 
3151 	struct sysctl_oid *fec_node;
3152 	struct sysctl_oid_list *fec_list;
3153 
3154 	/* Set up sysctls */
3155 	SYSCTL_ADD_PROC(ctx, ctx_list,
3156 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3157 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3158 
3159 	SYSCTL_ADD_PROC(ctx, ctx_list,
3160 	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3161 	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3162 
3163 	SYSCTL_ADD_PROC(ctx, ctx_list,
3164 	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3165 	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3166 
3167 	SYSCTL_ADD_PROC(ctx, ctx_list,
3168 	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3169 	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3170 
3171 	SYSCTL_ADD_PROC(ctx, ctx_list,
3172 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3173 	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3174 
3175 	SYSCTL_ADD_PROC(ctx, ctx_list,
3176 	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3177 	    pf, 0, ixl_sysctl_unallocated_queues, "I",
3178 	    "Queues not allocated to a PF or VF");
3179 
3180 	SYSCTL_ADD_PROC(ctx, ctx_list,
3181 	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3182 	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
3183 	    "Immediately set TX ITR value for all queues");
3184 
3185 	SYSCTL_ADD_PROC(ctx, ctx_list,
3186 	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3187 	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
3188 	    "Immediately set RX ITR value for all queues");
3189 
3190 	SYSCTL_ADD_INT(ctx, ctx_list,
3191 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3192 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3193 
3194 	SYSCTL_ADD_INT(ctx, ctx_list,
3195 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3196 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3197 
3198 	/* Add FEC sysctls for 25G adapters */
3199 	if (i40e_is_25G_device(hw->device_id)) {
3200 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3201 		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3202 		fec_list = SYSCTL_CHILDREN(fec_node);
3203 
3204 		SYSCTL_ADD_PROC(ctx, fec_list,
3205 		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3206 		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3207 
3208 		SYSCTL_ADD_PROC(ctx, fec_list,
3209 		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3210 		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3211 
3212 		SYSCTL_ADD_PROC(ctx, fec_list,
3213 		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3214 		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3215 
3216 		SYSCTL_ADD_PROC(ctx, fec_list,
3217 		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3218 		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3219 
3220 		SYSCTL_ADD_PROC(ctx, fec_list,
3221 		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3222 		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3223 	}
3224 
3225 	SYSCTL_ADD_PROC(ctx, ctx_list,
3226 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3227 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3228 
3229 	/* Add sysctls meant to print debug information, but don't list them
3230 	 * in "sysctl -a" output. */
3231 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3232 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3233 	debug_list = SYSCTL_CHILDREN(debug_node);
3234 
3235 	SYSCTL_ADD_UINT(ctx, debug_list,
3236 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3237 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
3238 
3239 	SYSCTL_ADD_UINT(ctx, debug_list,
3240 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3241 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
3242 
3243 	SYSCTL_ADD_PROC(ctx, debug_list,
3244 	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3245 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3246 
3247 	SYSCTL_ADD_PROC(ctx, debug_list,
3248 	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3249 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3250 
3251 	SYSCTL_ADD_PROC(ctx, debug_list,
3252 	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3253 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3254 
3255 	SYSCTL_ADD_PROC(ctx, debug_list,
3256 	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3257 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3258 
3259 	SYSCTL_ADD_PROC(ctx, debug_list,
3260 	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3261 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3262 
3263 	SYSCTL_ADD_PROC(ctx, debug_list,
3264 	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3265 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3266 
3267 	SYSCTL_ADD_PROC(ctx, debug_list,
3268 	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3269 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3270 
3271 	SYSCTL_ADD_PROC(ctx, debug_list,
3272 	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3273 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3274 
3275 	SYSCTL_ADD_PROC(ctx, debug_list,
3276 	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3277 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3278 
3279 	SYSCTL_ADD_PROC(ctx, debug_list,
3280 	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3281 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3282 
3283 	SYSCTL_ADD_PROC(ctx, debug_list,
3284 	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3285 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3286 
3287 	SYSCTL_ADD_PROC(ctx, debug_list,
3288 	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3289 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3290 
3291 	SYSCTL_ADD_PROC(ctx, debug_list,
3292 	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3293 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3294 
3295 	SYSCTL_ADD_PROC(ctx, debug_list,
3296 	    OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3297 	    pf, 0, ixl_sysctl_do_emp_reset, "I",
3298 	    "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3299 
3300 	SYSCTL_ADD_PROC(ctx, debug_list,
3301 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3302 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3303 
3304 	if (pf->has_i2c) {
3305 		SYSCTL_ADD_PROC(ctx, debug_list,
3306 		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3307 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3308 
3309 		SYSCTL_ADD_PROC(ctx, debug_list,
3310 		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3311 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3312 
3313 		SYSCTL_ADD_PROC(ctx, debug_list,
3314 		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3315 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3316 	}
3317 
3318 #ifdef PCI_IOV
3319 	SYSCTL_ADD_UINT(ctx, debug_list,
3320 	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
3321 	    0, "PF/VF Virtual Channel debug level");
3322 #endif
3323 }
3324 
3325 /*
3326  * Primarily for finding out how many queues can be assigned to VFs,
3327  * at runtime.
3328  */
3329 static int
3330 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3331 {
3332 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3333 	int queues;
3334 
3335 	//IXL_PF_LOCK(pf);
3336 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3337 	//IXL_PF_UNLOCK(pf);
3338 
3339 	return sysctl_handle_int(oidp, NULL, queues, req);
3340 }
3341 
3342 /*
3343 ** Set flow control using sysctl:
3344 ** 	0 - off
3345 **	1 - rx pause
3346 **	2 - tx pause
3347 **	3 - full
3348 */
3349 int
3350 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3351 {
3352 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3353 	struct i40e_hw *hw = &pf->hw;
3354 	device_t dev = pf->dev;
3355 	int requested_fc, error = 0;
3356 	enum i40e_status_code aq_error = 0;
3357 	u8 fc_aq_err = 0;
3358 
3359 	/* Get request */
3360 	requested_fc = pf->fc;
3361 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3362 	if ((error) || (req->newptr == NULL))
3363 		return (error);
3364 	if (requested_fc < 0 || requested_fc > 3) {
3365 		device_printf(dev,
3366 		    "Invalid fc mode; valid modes are 0 through 3\n");
3367 		return (EINVAL);
3368 	}
3369 
3370 	/* Set fc ability for port */
3371 	hw->fc.requested_mode = requested_fc;
3372 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3373 	if (aq_error) {
3374 		device_printf(dev,
3375 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
3376 		    __func__, aq_error, fc_aq_err);
3377 		return (EIO);
3378 	}
3379 	pf->fc = requested_fc;
3380 
3381 	return (0);
3382 }
3383 
3384 char *
3385 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3386 {
3387 	int index;
3388 
3389 	char *speeds[] = {
3390 		"Unknown",
3391 		"100 Mbps",
3392 		"1 Gbps",
3393 		"10 Gbps",
3394 		"40 Gbps",
3395 		"20 Gbps",
3396 		"25 Gbps",
3397 	};
3398 
3399 	switch (link_speed) {
3400 	case I40E_LINK_SPEED_100MB:
3401 		index = 1;
3402 		break;
3403 	case I40E_LINK_SPEED_1GB:
3404 		index = 2;
3405 		break;
3406 	case I40E_LINK_SPEED_10GB:
3407 		index = 3;
3408 		break;
3409 	case I40E_LINK_SPEED_40GB:
3410 		index = 4;
3411 		break;
3412 	case I40E_LINK_SPEED_20GB:
3413 		index = 5;
3414 		break;
3415 	case I40E_LINK_SPEED_25GB:
3416 		index = 6;
3417 		break;
3418 	case I40E_LINK_SPEED_UNKNOWN:
3419 	default:
3420 		index = 0;
3421 		break;
3422 	}
3423 
3424 	return speeds[index];
3425 }
3426 
3427 int
3428 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3429 {
3430 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3431 	struct i40e_hw *hw = &pf->hw;
3432 	int error = 0;
3433 
3434 	ixl_update_link_status(pf);
3435 
3436 	error = sysctl_handle_string(oidp,
3437 	    ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3438 	    8, req);
3439 	return (error);
3440 }
3441 
3442 /*
3443  * Converts 8-bit speeds value to and from sysctl flags and
3444  * Admin Queue flags.
3445  */
3446 static u8
3447 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3448 {
3449 	static u16 speedmap[6] = {
3450 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
3451 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
3452 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
3453 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
3454 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
3455 		(I40E_LINK_SPEED_40GB  | (0x20 << 8))
3456 	};
3457 	u8 retval = 0;
3458 
3459 	for (int i = 0; i < 6; i++) {
3460 		if (to_aq)
3461 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3462 		else
3463 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3464 	}
3465 
3466 	return (retval);
3467 }
3468 
3469 int
3470 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3471 {
3472 	struct i40e_hw *hw = &pf->hw;
3473 	device_t dev = pf->dev;
3474 	struct i40e_aq_get_phy_abilities_resp abilities;
3475 	struct i40e_aq_set_phy_config config;
3476 	enum i40e_status_code aq_error = 0;
3477 
3478 	/* Get current capability information */
3479 	aq_error = i40e_aq_get_phy_capabilities(hw,
3480 	    FALSE, FALSE, &abilities, NULL);
3481 	if (aq_error) {
3482 		device_printf(dev,
3483 		    "%s: Error getting phy capabilities %d,"
3484 		    " aq error: %d\n", __func__, aq_error,
3485 		    hw->aq.asq_last_status);
3486 		return (EIO);
3487 	}
3488 
3489 	/* Prepare new config */
3490 	bzero(&config, sizeof(config));
3491 	if (from_aq)
3492 		config.link_speed = speeds;
3493 	else
3494 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3495 	config.phy_type = abilities.phy_type;
3496 	config.phy_type_ext = abilities.phy_type_ext;
3497 	config.abilities = abilities.abilities
3498 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3499 	config.eee_capability = abilities.eee_capability;
3500 	config.eeer = abilities.eeer_val;
3501 	config.low_power_ctrl = abilities.d3_lpan;
3502 	config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3503 
3504 	/* Do aq command & restart link */
3505 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3506 	if (aq_error) {
3507 		device_printf(dev,
3508 		    "%s: Error setting new phy config %d,"
3509 		    " aq error: %d\n", __func__, aq_error,
3510 		    hw->aq.asq_last_status);
3511 		return (EIO);
3512 	}
3513 
3514 	return (0);
3515 }
3516 
3517 /*
3518 ** Supported link speedsL
3519 **	Flags:
3520 **	 0x1 - 100 Mb
3521 **	 0x2 - 1G
3522 **	 0x4 - 10G
3523 **	 0x8 - 20G
3524 **	0x10 - 25G
3525 **	0x20 - 40G
3526 */
3527 static int
3528 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3529 {
3530 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3531 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3532 
3533 	return sysctl_handle_int(oidp, NULL, supported, req);
3534 }
3535 
3536 /*
3537 ** Control link advertise speed:
3538 **	Flags:
3539 **	 0x1 - advertise 100 Mb
3540 **	 0x2 - advertise 1G
3541 **	 0x4 - advertise 10G
3542 **	 0x8 - advertise 20G
3543 **	0x10 - advertise 25G
3544 **	0x20 - advertise 40G
3545 **
3546 **	Set to 0 to disable link
3547 */
3548 int
3549 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3550 {
3551 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3552 	device_t dev = pf->dev;
3553 	u8 converted_speeds;
3554 	int requested_ls = 0;
3555 	int error = 0;
3556 
3557 	/* Read in new mode */
3558 	requested_ls = pf->advertised_speed;
3559 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3560 	if ((error) || (req->newptr == NULL))
3561 		return (error);
3562 
3563 	/* Error out if bits outside of possible flag range are set */
3564 	if ((requested_ls & ~((u8)0x3F)) != 0) {
3565 		device_printf(dev, "Input advertised speed out of range; "
3566 		    "valid flags are: 0x%02x\n",
3567 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3568 		return (EINVAL);
3569 	}
3570 
3571 	/* Check if adapter supports input value */
3572 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3573 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3574 		device_printf(dev, "Invalid advertised speed; "
3575 		    "valid flags are: 0x%02x\n",
3576 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3577 		return (EINVAL);
3578 	}
3579 
3580 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3581 	if (error)
3582 		return (error);
3583 
3584 	pf->advertised_speed = requested_ls;
3585 	ixl_update_link_status(pf);
3586 	return (0);
3587 }
3588 
3589 /*
3590 ** Get the width and transaction speed of
3591 ** the bus this adapter is plugged into.
3592 */
3593 void
3594 ixl_get_bus_info(struct ixl_pf *pf)
3595 {
3596 	struct i40e_hw *hw = &pf->hw;
3597 	device_t dev = pf->dev;
3598         u16 link;
3599         u32 offset, num_ports;
3600 	u64 max_speed;
3601 
3602 	/* Some devices don't use PCIE */
3603 	if (hw->mac.type == I40E_MAC_X722)
3604 		return;
3605 
3606         /* Read PCI Express Capabilities Link Status Register */
3607         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3608         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3609 
3610 	/* Fill out hw struct with PCIE info */
3611 	i40e_set_pci_config_data(hw, link);
3612 
3613 	/* Use info to print out bandwidth messages */
3614         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3615             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3616             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3617             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3618             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3619             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3620             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3621             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3622             ("Unknown"));
3623 
3624 	/*
3625 	 * If adapter is in slot with maximum supported speed,
3626 	 * no warning message needs to be printed out.
3627 	 */
3628 	if (hw->bus.speed >= i40e_bus_speed_8000
3629 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3630 		return;
3631 
3632 	num_ports = bitcount32(hw->func_caps.valid_functions);
3633 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3634 
3635 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3636                 device_printf(dev, "PCI-Express bandwidth available"
3637                     " for this device may be insufficient for"
3638                     " optimal performance.\n");
3639                 device_printf(dev, "Please move the device to a different"
3640 		    " PCI-e link with more lanes and/or higher"
3641 		    " transfer rate.\n");
3642         }
3643 }
3644 
3645 static int
3646 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3647 {
3648 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3649 	struct i40e_hw	*hw = &pf->hw;
3650 	struct sbuf	*sbuf;
3651 
3652 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3653 	ixl_nvm_version_str(hw, sbuf);
3654 	sbuf_finish(sbuf);
3655 	sbuf_delete(sbuf);
3656 
3657 	return (0);
3658 }
3659 
3660 void
3661 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3662 {
3663 	if ((nvma->command == I40E_NVM_READ) &&
3664 	    ((nvma->config & 0xFF) == 0xF) &&
3665 	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
3666 	    (nvma->offset == 0) &&
3667 	    (nvma->data_size == 1)) {
3668 		// device_printf(dev, "- Get Driver Status Command\n");
3669 	}
3670 	else if (nvma->command == I40E_NVM_READ) {
3671 
3672 	}
3673 	else {
3674 		switch (nvma->command) {
3675 		case 0xB:
3676 			device_printf(dev, "- command: I40E_NVM_READ\n");
3677 			break;
3678 		case 0xC:
3679 			device_printf(dev, "- command: I40E_NVM_WRITE\n");
3680 			break;
3681 		default:
3682 			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3683 			break;
3684 		}
3685 
3686 		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
3687 		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3688 		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3689 		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3690 	}
3691 }
3692 
3693 int
3694 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3695 {
3696 	struct i40e_hw *hw = &pf->hw;
3697 	struct i40e_nvm_access *nvma;
3698 	device_t dev = pf->dev;
3699 	enum i40e_status_code status = 0;
3700 	int perrno;
3701 
3702 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3703 
3704 	/* Sanity checks */
3705 	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
3706 	    ifd->ifd_data == NULL) {
3707 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3708 		    __func__);
3709 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3710 		    __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
3711 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3712 		    ifd->ifd_data);
3713 		return (EINVAL);
3714 	}
3715 
3716 	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
3717 
3718 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3719 		ixl_print_nvm_cmd(dev, nvma);
3720 
3721 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3722 		int count = 0;
3723 		while (count++ < 100) {
3724 			i40e_msec_delay(100);
3725 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3726 				break;
3727 		}
3728 	}
3729 
3730 	if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) {
3731 		// TODO: Might need a different lock here
3732 		// IXL_PF_LOCK(pf);
3733 		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3734 		// IXL_PF_UNLOCK(pf);
3735 	} else {
3736 		perrno = -EBUSY;
3737 	}
3738 
3739 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3740 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3741 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3742 		    i40e_stat_str(hw, status), perrno);
3743 
3744 	/*
3745 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3746 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3747 	 */
3748 	if (perrno == -EPERM)
3749 		return (-EACCES);
3750 	else
3751 		return (perrno);
3752 }
3753 
3754 int
3755 ixl_find_i2c_interface(struct ixl_pf *pf)
3756 {
3757 	struct i40e_hw *hw = &pf->hw;
3758 	bool i2c_en, port_matched;
3759 	u32 reg;
3760 
3761 	for (int i = 0; i < 4; i++) {
3762 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3763 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3764 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3765 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3766 		    & BIT(hw->port);
3767 		if (i2c_en && port_matched)
3768 			return (i);
3769 	}
3770 
3771 	return (-1);
3772 }
3773 
3774 static char *
3775 ixl_phy_type_string(u32 bit_pos, bool ext)
3776 {
3777 	static char * phy_types_str[32] = {
3778 		"SGMII",
3779 		"1000BASE-KX",
3780 		"10GBASE-KX4",
3781 		"10GBASE-KR",
3782 		"40GBASE-KR4",
3783 		"XAUI",
3784 		"XFI",
3785 		"SFI",
3786 		"XLAUI",
3787 		"XLPPI",
3788 		"40GBASE-CR4",
3789 		"10GBASE-CR1",
3790 		"SFP+ Active DA",
3791 		"QSFP+ Active DA",
3792 		"Reserved (14)",
3793 		"Reserved (15)",
3794 		"Reserved (16)",
3795 		"100BASE-TX",
3796 		"1000BASE-T",
3797 		"10GBASE-T",
3798 		"10GBASE-SR",
3799 		"10GBASE-LR",
3800 		"10GBASE-SFP+Cu",
3801 		"10GBASE-CR1",
3802 		"40GBASE-CR4",
3803 		"40GBASE-SR4",
3804 		"40GBASE-LR4",
3805 		"1000BASE-SX",
3806 		"1000BASE-LX",
3807 		"1000BASE-T Optical",
3808 		"20GBASE-KR2",
3809 		"Reserved (31)"
3810 	};
3811 	static char * ext_phy_types_str[8] = {
3812 		"25GBASE-KR",
3813 		"25GBASE-CR",
3814 		"25GBASE-SR",
3815 		"25GBASE-LR",
3816 		"25GBASE-AOC",
3817 		"25GBASE-ACC",
3818 		"Reserved (6)",
3819 		"Reserved (7)"
3820 	};
3821 
3822 	if (ext && bit_pos > 7) return "Invalid_Ext";
3823 	if (bit_pos > 31) return "Invalid";
3824 
3825 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3826 }
3827 
3828 /* TODO: ERJ: I don't this is necessary anymore. */
3829 int
3830 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3831 {
3832 	device_t dev = pf->dev;
3833 	struct i40e_hw *hw = &pf->hw;
3834 	struct i40e_aq_desc desc;
3835 	enum i40e_status_code status;
3836 
3837 	struct i40e_aqc_get_link_status *aq_link_status =
3838 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3839 
3840 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3841 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3842 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3843 	if (status) {
3844 		device_printf(dev,
3845 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3846 		    __func__, i40e_stat_str(hw, status),
3847 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3848 		return (EIO);
3849 	}
3850 
3851 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3852 	return (0);
3853 }
3854 
3855 static char *
3856 ixl_phy_type_string_ls(u8 val)
3857 {
3858 	if (val >= 0x1F)
3859 		return ixl_phy_type_string(val - 0x1F, true);
3860 	else
3861 		return ixl_phy_type_string(val, false);
3862 }
3863 
3864 static int
3865 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3866 {
3867 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3868 	device_t dev = pf->dev;
3869 	struct sbuf *buf;
3870 	int error = 0;
3871 
3872 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3873 	if (!buf) {
3874 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3875 		return (ENOMEM);
3876 	}
3877 
3878 	struct i40e_aqc_get_link_status link_status;
3879 	error = ixl_aq_get_link_status(pf, &link_status);
3880 	if (error) {
3881 		sbuf_delete(buf);
3882 		return (error);
3883 	}
3884 
3885 	sbuf_printf(buf, "\n"
3886 	    "PHY Type : 0x%02x<%s>\n"
3887 	    "Speed    : 0x%02x\n"
3888 	    "Link info: 0x%02x\n"
3889 	    "AN info  : 0x%02x\n"
3890 	    "Ext info : 0x%02x\n"
3891 	    "Loopback : 0x%02x\n"
3892 	    "Max Frame: %d\n"
3893 	    "Config   : 0x%02x\n"
3894 	    "Power    : 0x%02x",
3895 	    link_status.phy_type,
3896 	    ixl_phy_type_string_ls(link_status.phy_type),
3897 	    link_status.link_speed,
3898 	    link_status.link_info,
3899 	    link_status.an_info,
3900 	    link_status.ext_info,
3901 	    link_status.loopback,
3902 	    link_status.max_frame_size,
3903 	    link_status.config,
3904 	    link_status.power_desc);
3905 
3906 	error = sbuf_finish(buf);
3907 	if (error)
3908 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3909 
3910 	sbuf_delete(buf);
3911 	return (error);
3912 }
3913 
3914 static int
3915 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3916 {
3917 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3918 	struct i40e_hw *hw = &pf->hw;
3919 	device_t dev = pf->dev;
3920 	enum i40e_status_code status;
3921 	struct i40e_aq_get_phy_abilities_resp abilities;
3922 	struct sbuf *buf;
3923 	int error = 0;
3924 
3925 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3926 	if (!buf) {
3927 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3928 		return (ENOMEM);
3929 	}
3930 
3931 	status = i40e_aq_get_phy_capabilities(hw,
3932 	    FALSE, FALSE, &abilities, NULL);
3933 	if (status) {
3934 		device_printf(dev,
3935 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3936 		    __func__, i40e_stat_str(hw, status),
3937 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3938 		sbuf_delete(buf);
3939 		return (EIO);
3940 	}
3941 
3942 	sbuf_printf(buf, "\n"
3943 	    "PHY Type : %08x",
3944 	    abilities.phy_type);
3945 
3946 	if (abilities.phy_type != 0) {
3947 		sbuf_printf(buf, "<");
3948 		for (int i = 0; i < 32; i++)
3949 			if ((1 << i) & abilities.phy_type)
3950 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3951 		sbuf_printf(buf, ">\n");
3952 	}
3953 
3954 	sbuf_printf(buf, "PHY Ext  : %02x",
3955 	    abilities.phy_type_ext);
3956 
3957 	if (abilities.phy_type_ext != 0) {
3958 		sbuf_printf(buf, "<");
3959 		for (int i = 0; i < 4; i++)
3960 			if ((1 << i) & abilities.phy_type_ext)
3961 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3962 		sbuf_printf(buf, ">");
3963 	}
3964 	sbuf_printf(buf, "\n");
3965 
3966 	sbuf_printf(buf,
3967 	    "Speed    : %02x\n"
3968 	    "Abilities: %02x\n"
3969 	    "EEE cap  : %04x\n"
3970 	    "EEER reg : %08x\n"
3971 	    "D3 Lpan  : %02x\n"
3972 	    "ID       : %02x %02x %02x %02x\n"
3973 	    "ModType  : %02x %02x %02x\n"
3974 	    "ModType E: %01x\n"
3975 	    "FEC Cfg  : %02x\n"
3976 	    "Ext CC   : %02x",
3977 	    abilities.link_speed,
3978 	    abilities.abilities, abilities.eee_capability,
3979 	    abilities.eeer_val, abilities.d3_lpan,
3980 	    abilities.phy_id[0], abilities.phy_id[1],
3981 	    abilities.phy_id[2], abilities.phy_id[3],
3982 	    abilities.module_type[0], abilities.module_type[1],
3983 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3984 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3985 	    abilities.ext_comp_code);
3986 
3987 	error = sbuf_finish(buf);
3988 	if (error)
3989 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3990 
3991 	sbuf_delete(buf);
3992 	return (error);
3993 }
3994 
3995 static int
3996 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3997 {
3998 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3999 	struct ixl_vsi *vsi = &pf->vsi;
4000 	struct ixl_mac_filter *f;
4001 	char *buf, *buf_i;
4002 
4003 	int error = 0;
4004 	int ftl_len = 0;
4005 	int ftl_counter = 0;
4006 	int buf_len = 0;
4007 	int entry_len = 42;
4008 
4009 	SLIST_FOREACH(f, &vsi->ftl, next) {
4010 		ftl_len++;
4011 	}
4012 
4013 	if (ftl_len < 1) {
4014 		sysctl_handle_string(oidp, "(none)", 6, req);
4015 		return (0);
4016 	}
4017 
4018 	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4019 	buf = buf_i = malloc(buf_len, M_DEVBUF, M_WAITOK);
4020 
4021 	sprintf(buf_i++, "\n");
4022 	SLIST_FOREACH(f, &vsi->ftl, next) {
4023 		sprintf(buf_i,
4024 		    MAC_FORMAT ", vlan %4d, flags %#06x",
4025 		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4026 		buf_i += entry_len;
4027 		/* don't print '\n' for last entry */
4028 		if (++ftl_counter != ftl_len) {
4029 			sprintf(buf_i, "\n");
4030 			buf_i++;
4031 		}
4032 	}
4033 
4034 	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4035 	if (error)
4036 		printf("sysctl error: %d\n", error);
4037 	free(buf, M_DEVBUF);
4038 	return error;
4039 }
4040 
4041 #define IXL_SW_RES_SIZE 0x14
4042 int
4043 ixl_res_alloc_cmp(const void *a, const void *b)
4044 {
4045 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4046 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4047 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4048 
4049 	return ((int)one->resource_type - (int)two->resource_type);
4050 }
4051 
4052 /*
4053  * Longest string length: 25
4054  */
4055 char *
4056 ixl_switch_res_type_string(u8 type)
4057 {
4058 	// TODO: This should be changed to static const
4059 	char * ixl_switch_res_type_strings[0x14] = {
4060 		"VEB",
4061 		"VSI",
4062 		"Perfect Match MAC address",
4063 		"S-tag",
4064 		"(Reserved)",
4065 		"Multicast hash entry",
4066 		"Unicast hash entry",
4067 		"VLAN",
4068 		"VSI List entry",
4069 		"(Reserved)",
4070 		"VLAN Statistic Pool",
4071 		"Mirror Rule",
4072 		"Queue Set",
4073 		"Inner VLAN Forward filter",
4074 		"(Reserved)",
4075 		"Inner MAC",
4076 		"IP",
4077 		"GRE/VN1 Key",
4078 		"VN2 Key",
4079 		"Tunneling Port"
4080 	};
4081 
4082 	if (type < 0x14)
4083 		return ixl_switch_res_type_strings[type];
4084 	else
4085 		return "(Reserved)";
4086 }
4087 
4088 static int
4089 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4090 {
4091 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4092 	struct i40e_hw *hw = &pf->hw;
4093 	device_t dev = pf->dev;
4094 	struct sbuf *buf;
4095 	enum i40e_status_code status;
4096 	int error = 0;
4097 
4098 	u8 num_entries;
4099 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4100 
4101 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4102 	if (!buf) {
4103 		device_printf(dev, "Could not allocate sbuf for output.\n");
4104 		return (ENOMEM);
4105 	}
4106 
4107 	bzero(resp, sizeof(resp));
4108 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4109 				resp,
4110 				IXL_SW_RES_SIZE,
4111 				NULL);
4112 	if (status) {
4113 		device_printf(dev,
4114 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4115 		    __func__, i40e_stat_str(hw, status),
4116 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4117 		sbuf_delete(buf);
4118 		return (error);
4119 	}
4120 
4121 	/* Sort entries by type for display */
4122 	qsort(resp, num_entries,
4123 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4124 	    &ixl_res_alloc_cmp);
4125 
4126 	sbuf_cat(buf, "\n");
4127 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4128 	sbuf_printf(buf,
4129 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
4130 	    "                          | (this)     | (all) | (this) | (all)       \n");
4131 	for (int i = 0; i < num_entries; i++) {
4132 		sbuf_printf(buf,
4133 		    "%25s | %10d   %5d   %6d   %12d",
4134 		    ixl_switch_res_type_string(resp[i].resource_type),
4135 		    resp[i].guaranteed,
4136 		    resp[i].total,
4137 		    resp[i].used,
4138 		    resp[i].total_unalloced);
4139 		if (i < num_entries - 1)
4140 			sbuf_cat(buf, "\n");
4141 	}
4142 
4143 	error = sbuf_finish(buf);
4144 	if (error)
4145 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4146 
4147 	sbuf_delete(buf);
4148 	return (error);
4149 }
4150 
4151 /*
4152 ** Caller must init and delete sbuf; this function will clear and
4153 ** finish it for caller.
4154 */
4155 char *
4156 ixl_switch_element_string(struct sbuf *s,
4157     struct i40e_aqc_switch_config_element_resp *element)
4158 {
4159 	sbuf_clear(s);
4160 
4161 	switch (element->element_type) {
4162 	case I40E_AQ_SW_ELEM_TYPE_MAC:
4163 		sbuf_printf(s, "MAC %3d", element->element_info);
4164 		break;
4165 	case I40E_AQ_SW_ELEM_TYPE_PF:
4166 		sbuf_printf(s, "PF  %3d", element->element_info);
4167 		break;
4168 	case I40E_AQ_SW_ELEM_TYPE_VF:
4169 		sbuf_printf(s, "VF  %3d", element->element_info);
4170 		break;
4171 	case I40E_AQ_SW_ELEM_TYPE_EMP:
4172 		sbuf_cat(s, "EMP");
4173 		break;
4174 	case I40E_AQ_SW_ELEM_TYPE_BMC:
4175 		sbuf_cat(s, "BMC");
4176 		break;
4177 	case I40E_AQ_SW_ELEM_TYPE_PV:
4178 		sbuf_cat(s, "PV");
4179 		break;
4180 	case I40E_AQ_SW_ELEM_TYPE_VEB:
4181 		sbuf_cat(s, "VEB");
4182 		break;
4183 	case I40E_AQ_SW_ELEM_TYPE_PA:
4184 		sbuf_cat(s, "PA");
4185 		break;
4186 	case I40E_AQ_SW_ELEM_TYPE_VSI:
4187 		sbuf_printf(s, "VSI %3d", element->element_info);
4188 		break;
4189 	default:
4190 		sbuf_cat(s, "?");
4191 		break;
4192 	}
4193 
4194 	sbuf_finish(s);
4195 	return sbuf_data(s);
4196 }
4197 
4198 static int
4199 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4200 {
4201 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4202 	struct i40e_hw *hw = &pf->hw;
4203 	device_t dev = pf->dev;
4204 	struct sbuf *buf;
4205 	struct sbuf *nmbuf;
4206 	enum i40e_status_code status;
4207 	int error = 0;
4208 	u16 next = 0;
4209 	u8 aq_buf[I40E_AQ_LARGE_BUF];
4210 
4211 	struct i40e_aqc_get_switch_config_resp *sw_config;
4212 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4213 
4214 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4215 	if (!buf) {
4216 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4217 		return (ENOMEM);
4218 	}
4219 
4220 	status = i40e_aq_get_switch_config(hw, sw_config,
4221 	    sizeof(aq_buf), &next, NULL);
4222 	if (status) {
4223 		device_printf(dev,
4224 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
4225 		    __func__, i40e_stat_str(hw, status),
4226 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4227 		sbuf_delete(buf);
4228 		return error;
4229 	}
4230 	if (next)
4231 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4232 		    __func__, next);
4233 
4234 	nmbuf = sbuf_new_auto();
4235 	if (!nmbuf) {
4236 		device_printf(dev, "Could not allocate sbuf for name output.\n");
4237 		sbuf_delete(buf);
4238 		return (ENOMEM);
4239 	}
4240 
4241 	sbuf_cat(buf, "\n");
4242 	/* Assuming <= 255 elements in switch */
4243 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4244 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4245 	/* Exclude:
4246 	** Revision -- all elements are revision 1 for now
4247 	*/
4248 	sbuf_printf(buf,
4249 	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4250 	    "                |          |          | (uplink)\n");
4251 	for (int i = 0; i < sw_config->header.num_reported; i++) {
4252 		// "%4d (%8s) | %8s   %8s   %#8x",
4253 		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4254 		sbuf_cat(buf, " ");
4255 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4256 		    &sw_config->element[i]));
4257 		sbuf_cat(buf, " | ");
4258 		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4259 		sbuf_cat(buf, "   ");
4260 		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4261 		sbuf_cat(buf, "   ");
4262 		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4263 		if (i < sw_config->header.num_reported - 1)
4264 			sbuf_cat(buf, "\n");
4265 	}
4266 	sbuf_delete(nmbuf);
4267 
4268 	error = sbuf_finish(buf);
4269 	if (error)
4270 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4271 
4272 	sbuf_delete(buf);
4273 
4274 	return (error);
4275 }
4276 
4277 static int
4278 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4279 {
4280 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4281 	struct i40e_hw *hw = &pf->hw;
4282 	device_t dev = pf->dev;
4283 	struct sbuf *buf;
4284 	int error = 0;
4285 	enum i40e_status_code status;
4286 	u32 reg;
4287 
4288 	struct i40e_aqc_get_set_rss_key_data key_data;
4289 
4290 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4291 	if (!buf) {
4292 		device_printf(dev, "Could not allocate sbuf for output.\n");
4293 		return (ENOMEM);
4294 	}
4295 
4296 	bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4297 
4298 	sbuf_cat(buf, "\n");
4299 	if (hw->mac.type == I40E_MAC_X722) {
4300 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4301 		if (status)
4302 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4303 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4304 	} else {
4305 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4306 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4307 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4308 		}
4309 	}
4310 
4311 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4312 
4313 	error = sbuf_finish(buf);
4314 	if (error)
4315 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4316 	sbuf_delete(buf);
4317 
4318 	return (error);
4319 }
4320 
4321 static void
4322 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4323 {
4324 	int i, j, k, width;
4325 	char c;
4326 
4327 	if (length < 1 || buf == NULL) return;
4328 
4329 	int byte_stride = 16;
4330 	int lines = length / byte_stride;
4331 	int rem = length % byte_stride;
4332 	if (rem > 0)
4333 		lines++;
4334 
4335 	for (i = 0; i < lines; i++) {
4336 		width = (rem > 0 && i == lines - 1)
4337 		    ? rem : byte_stride;
4338 
4339 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4340 
4341 		for (j = 0; j < width; j++)
4342 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4343 
4344 		if (width < byte_stride) {
4345 			for (k = 0; k < (byte_stride - width); k++)
4346 				sbuf_printf(sb, "   ");
4347 		}
4348 
4349 		if (!text) {
4350 			sbuf_printf(sb, "\n");
4351 			continue;
4352 		}
4353 
4354 		for (j = 0; j < width; j++) {
4355 			c = (char)buf[i * byte_stride + j];
4356 			if (c < 32 || c > 126)
4357 				sbuf_printf(sb, ".");
4358 			else
4359 				sbuf_printf(sb, "%c", c);
4360 
4361 			if (j == width - 1)
4362 				sbuf_printf(sb, "\n");
4363 		}
4364 	}
4365 }
4366 
4367 static int
4368 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4369 {
4370 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4371 	struct i40e_hw *hw = &pf->hw;
4372 	device_t dev = pf->dev;
4373 	struct sbuf *buf;
4374 	int error = 0;
4375 	enum i40e_status_code status;
4376 	u8 hlut[512];
4377 	u32 reg;
4378 
4379 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4380 	if (!buf) {
4381 		device_printf(dev, "Could not allocate sbuf for output.\n");
4382 		return (ENOMEM);
4383 	}
4384 
4385 	bzero(hlut, sizeof(hlut));
4386 	sbuf_cat(buf, "\n");
4387 	if (hw->mac.type == I40E_MAC_X722) {
4388 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4389 		if (status)
4390 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4391 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4392 	} else {
4393 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4394 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4395 			bcopy(&reg, &hlut[i << 2], 4);
4396 		}
4397 	}
4398 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4399 
4400 	error = sbuf_finish(buf);
4401 	if (error)
4402 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4403 	sbuf_delete(buf);
4404 
4405 	return (error);
4406 }
4407 
4408 static int
4409 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4410 {
4411 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4412 	struct i40e_hw *hw = &pf->hw;
4413 	u64 hena;
4414 
4415 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4416 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4417 
4418 	return sysctl_handle_long(oidp, NULL, hena, req);
4419 }
4420 
4421 /*
4422  * Sysctl to disable firmware's link management
4423  *
4424  * 1 - Disable link management on this port
4425  * 0 - Re-enable link management
4426  *
4427  * On normal NVMs, firmware manages link by default.
4428  */
4429 static int
4430 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4431 {
4432 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4433 	struct i40e_hw *hw = &pf->hw;
4434 	device_t dev = pf->dev;
4435 	int requested_mode = -1;
4436 	enum i40e_status_code status = 0;
4437 	int error = 0;
4438 
4439 	/* Read in new mode */
4440 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4441 	if ((error) || (req->newptr == NULL))
4442 		return (error);
4443 	/* Check for sane value */
4444 	if (requested_mode < 0 || requested_mode > 1) {
4445 		device_printf(dev, "Valid modes are 0 or 1\n");
4446 		return (EINVAL);
4447 	}
4448 
4449 	/* Set new mode */
4450 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4451 	if (status) {
4452 		device_printf(dev,
4453 		    "%s: Error setting new phy debug mode %s,"
4454 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4455 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4456 		return (EIO);
4457 	}
4458 
4459 	return (0);
4460 }
4461 
4462 /*
4463  * Read some diagnostic data from an SFP module
4464  * Bytes 96-99, 102-105 from device address 0xA2
4465  */
4466 static int
4467 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4468 {
4469 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4470 	device_t dev = pf->dev;
4471 	struct sbuf *sbuf;
4472 	int error = 0;
4473 	u8 output;
4474 
4475 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4476 	if (error) {
4477 		device_printf(dev, "Error reading from i2c\n");
4478 		return (error);
4479 	}
4480 	if (output != 0x3) {
4481 		device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4482 		return (EIO);
4483 	}
4484 
4485 	pf->read_i2c_byte(pf, 92, 0xA0, &output);
4486 	if (!(output & 0x60)) {
4487 		device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4488 		return (EIO);
4489 	}
4490 
4491 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4492 
4493 	for (u8 offset = 96; offset < 100; offset++) {
4494 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4495 		sbuf_printf(sbuf, "%02X ", output);
4496 	}
4497 	for (u8 offset = 102; offset < 106; offset++) {
4498 		pf->read_i2c_byte(pf, offset, 0xA2, &output);
4499 		sbuf_printf(sbuf, "%02X ", output);
4500 	}
4501 
4502 	sbuf_finish(sbuf);
4503 	sbuf_delete(sbuf);
4504 
4505 	return (0);
4506 }
4507 
4508 /*
4509  * Sysctl to read a byte from I2C bus.
4510  *
4511  * Input: 32-bit value:
4512  * 	bits 0-7:   device address (0xA0 or 0xA2)
4513  * 	bits 8-15:  offset (0-255)
4514  *	bits 16-31: unused
4515  * Output: 8-bit value read
4516  */
4517 static int
4518 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4519 {
4520 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4521 	device_t dev = pf->dev;
4522 	int input = -1, error = 0;
4523 	u8 dev_addr, offset, output;
4524 
4525 	/* Read in I2C read parameters */
4526 	error = sysctl_handle_int(oidp, &input, 0, req);
4527 	if ((error) || (req->newptr == NULL))
4528 		return (error);
4529 	/* Validate device address */
4530 	dev_addr = input & 0xFF;
4531 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4532 		return (EINVAL);
4533 	}
4534 	offset = (input >> 8) & 0xFF;
4535 
4536 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4537 	if (error)
4538 		return (error);
4539 
4540 	device_printf(dev, "%02X\n", output);
4541 	return (0);
4542 }
4543 
4544 /*
4545  * Sysctl to write a byte to the I2C bus.
4546  *
4547  * Input: 32-bit value:
4548  * 	bits 0-7:   device address (0xA0 or 0xA2)
4549  * 	bits 8-15:  offset (0-255)
4550  *	bits 16-23: value to write
4551  *	bits 24-31: unused
4552  * Output: 8-bit value written
4553  */
4554 static int
4555 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4556 {
4557 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4558 	device_t dev = pf->dev;
4559 	int input = -1, error = 0;
4560 	u8 dev_addr, offset, value;
4561 
4562 	/* Read in I2C write parameters */
4563 	error = sysctl_handle_int(oidp, &input, 0, req);
4564 	if ((error) || (req->newptr == NULL))
4565 		return (error);
4566 	/* Validate device address */
4567 	dev_addr = input & 0xFF;
4568 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4569 		return (EINVAL);
4570 	}
4571 	offset = (input >> 8) & 0xFF;
4572 	value = (input >> 16) & 0xFF;
4573 
4574 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4575 	if (error)
4576 		return (error);
4577 
4578 	device_printf(dev, "%02X written\n", value);
4579 	return (0);
4580 }
4581 
4582 static int
4583 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4584     u8 bit_pos, int *is_set)
4585 {
4586 	device_t dev = pf->dev;
4587 	struct i40e_hw *hw = &pf->hw;
4588 	enum i40e_status_code status;
4589 
4590 	status = i40e_aq_get_phy_capabilities(hw,
4591 	    FALSE, FALSE, abilities, NULL);
4592 	if (status) {
4593 		device_printf(dev,
4594 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4595 		    __func__, i40e_stat_str(hw, status),
4596 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4597 		return (EIO);
4598 	}
4599 
4600 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4601 	return (0);
4602 }
4603 
4604 static int
4605 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4606     u8 bit_pos, int set)
4607 {
4608 	device_t dev = pf->dev;
4609 	struct i40e_hw *hw = &pf->hw;
4610 	struct i40e_aq_set_phy_config config;
4611 	enum i40e_status_code status;
4612 
4613 	/* Set new PHY config */
4614 	memset(&config, 0, sizeof(config));
4615 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4616 	if (set)
4617 		config.fec_config |= bit_pos;
4618 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4619 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4620 		config.phy_type = abilities->phy_type;
4621 		config.phy_type_ext = abilities->phy_type_ext;
4622 		config.link_speed = abilities->link_speed;
4623 		config.eee_capability = abilities->eee_capability;
4624 		config.eeer = abilities->eeer_val;
4625 		config.low_power_ctrl = abilities->d3_lpan;
4626 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4627 
4628 		if (status) {
4629 			device_printf(dev,
4630 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4631 			    __func__, i40e_stat_str(hw, status),
4632 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4633 			return (EIO);
4634 		}
4635 	}
4636 
4637 	return (0);
4638 }
4639 
4640 static int
4641 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4642 {
4643 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4644 	int mode, error = 0;
4645 
4646 	struct i40e_aq_get_phy_abilities_resp abilities;
4647 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4648 	if (error)
4649 		return (error);
4650 	/* Read in new mode */
4651 	error = sysctl_handle_int(oidp, &mode, 0, req);
4652 	if ((error) || (req->newptr == NULL))
4653 		return (error);
4654 
4655 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4656 }
4657 
4658 static int
4659 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4660 {
4661 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4662 	int mode, error = 0;
4663 
4664 	struct i40e_aq_get_phy_abilities_resp abilities;
4665 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4666 	if (error)
4667 		return (error);
4668 	/* Read in new mode */
4669 	error = sysctl_handle_int(oidp, &mode, 0, req);
4670 	if ((error) || (req->newptr == NULL))
4671 		return (error);
4672 
4673 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4674 }
4675 
4676 static int
4677 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4678 {
4679 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4680 	int mode, error = 0;
4681 
4682 	struct i40e_aq_get_phy_abilities_resp abilities;
4683 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4684 	if (error)
4685 		return (error);
4686 	/* Read in new mode */
4687 	error = sysctl_handle_int(oidp, &mode, 0, req);
4688 	if ((error) || (req->newptr == NULL))
4689 		return (error);
4690 
4691 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4692 }
4693 
4694 static int
4695 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4696 {
4697 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4698 	int mode, error = 0;
4699 
4700 	struct i40e_aq_get_phy_abilities_resp abilities;
4701 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4702 	if (error)
4703 		return (error);
4704 	/* Read in new mode */
4705 	error = sysctl_handle_int(oidp, &mode, 0, req);
4706 	if ((error) || (req->newptr == NULL))
4707 		return (error);
4708 
4709 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4710 }
4711 
4712 static int
4713 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4714 {
4715 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4716 	int mode, error = 0;
4717 
4718 	struct i40e_aq_get_phy_abilities_resp abilities;
4719 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4720 	if (error)
4721 		return (error);
4722 	/* Read in new mode */
4723 	error = sysctl_handle_int(oidp, &mode, 0, req);
4724 	if ((error) || (req->newptr == NULL))
4725 		return (error);
4726 
4727 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4728 }
4729 
4730 static int
4731 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4732 {
4733 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4734 	struct i40e_hw *hw = &pf->hw;
4735 	device_t dev = pf->dev;
4736 	struct sbuf *buf;
4737 	int error = 0;
4738 	enum i40e_status_code status;
4739 
4740 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4741 	if (!buf) {
4742 		device_printf(dev, "Could not allocate sbuf for output.\n");
4743 		return (ENOMEM);
4744 	}
4745 
4746 	u8 *final_buff;
4747 	/* This amount is only necessary if reading the entire cluster into memory */
4748 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4749 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4750 	if (final_buff == NULL) {
4751 		device_printf(dev, "Could not allocate memory for output.\n");
4752 		goto out;
4753 	}
4754 	int final_buff_len = 0;
4755 
4756 	u8 cluster_id = 1;
4757 	bool more = true;
4758 
4759 	u8 dump_buf[4096];
4760 	u16 curr_buff_size = 4096;
4761 	u8 curr_next_table = 0;
4762 	u32 curr_next_index = 0;
4763 
4764 	u16 ret_buff_size;
4765 	u8 ret_next_table;
4766 	u32 ret_next_index;
4767 
4768 	sbuf_cat(buf, "\n");
4769 
4770 	while (more) {
4771 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4772 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4773 		if (status) {
4774 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4775 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4776 			goto free_out;
4777 		}
4778 
4779 		/* copy info out of temp buffer */
4780 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4781 		final_buff_len += ret_buff_size;
4782 
4783 		if (ret_next_table != curr_next_table) {
4784 			/* We're done with the current table; we can dump out read data. */
4785 			sbuf_printf(buf, "%d:", curr_next_table);
4786 			int bytes_printed = 0;
4787 			while (bytes_printed <= final_buff_len) {
4788 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4789 				bytes_printed += 16;
4790 			}
4791 				sbuf_cat(buf, "\n");
4792 
4793 			/* The entire cluster has been read; we're finished */
4794 			if (ret_next_table == 0xFF)
4795 				break;
4796 
4797 			/* Otherwise clear the output buffer and continue reading */
4798 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4799 			final_buff_len = 0;
4800 		}
4801 
4802 		if (ret_next_index == 0xFFFFFFFF)
4803 			ret_next_index = 0;
4804 
4805 		bzero(dump_buf, sizeof(dump_buf));
4806 		curr_next_table = ret_next_table;
4807 		curr_next_index = ret_next_index;
4808 	}
4809 
4810 free_out:
4811 	free(final_buff, M_DEVBUF);
4812 out:
4813 	error = sbuf_finish(buf);
4814 	if (error)
4815 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4816 	sbuf_delete(buf);
4817 
4818 	return (error);
4819 }
4820 
4821 static int
4822 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4823 {
4824 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4825 	struct i40e_hw *hw = &pf->hw;
4826 	device_t dev = pf->dev;
4827 	int error = 0;
4828 	int state, new_state;
4829 	enum i40e_status_code status;
4830 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4831 
4832 	/* Read in new mode */
4833 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4834 	if ((error) || (req->newptr == NULL))
4835 		return (error);
4836 
4837 	/* Already in requested state */
4838 	if (new_state == state)
4839 		return (error);
4840 
4841 	if (new_state == 0) {
4842 		if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4843 			device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4844 			return (EINVAL);
4845 		}
4846 
4847 		if (pf->hw.aq.api_maj_ver < 1 ||
4848 		    (pf->hw.aq.api_maj_ver == 1 &&
4849 		    pf->hw.aq.api_min_ver < 7)) {
4850 			device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4851 			return (EINVAL);
4852 		}
4853 
4854 		i40e_aq_stop_lldp(&pf->hw, true, NULL);
4855 		i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4856 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4857 	} else {
4858 		status = i40e_aq_start_lldp(&pf->hw, NULL);
4859 		if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4860 			device_printf(dev, "FW LLDP agent is already running\n");
4861 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4862 	}
4863 
4864 	return (0);
4865 }
4866 
4867 /*
4868  * Get FW LLDP Agent status
4869  */
4870 int
4871 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4872 {
4873 	enum i40e_status_code ret = I40E_SUCCESS;
4874 	struct i40e_lldp_variables lldp_cfg;
4875 	struct i40e_hw *hw = &pf->hw;
4876 	u8 adminstatus = 0;
4877 
4878 	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4879 	if (ret)
4880 		return ret;
4881 
4882 	/* Get the LLDP AdminStatus for the current port */
4883 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4884 	adminstatus &= 0xf;
4885 
4886 	/* Check if LLDP agent is disabled */
4887 	if (!adminstatus) {
4888 		device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4889 		atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4890 	} else
4891 		atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4892 
4893 	return (0);
4894 }
4895 
4896 int
4897 ixl_attach_get_link_status(struct ixl_pf *pf)
4898 {
4899 	struct i40e_hw *hw = &pf->hw;
4900 	device_t dev = pf->dev;
4901 	int error = 0;
4902 
4903 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4904 	    (hw->aq.fw_maj_ver < 4)) {
4905 		i40e_msec_delay(75);
4906 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4907 		if (error) {
4908 			device_printf(dev, "link restart failed, aq_err=%d\n",
4909 			    pf->hw.aq.asq_last_status);
4910 			return error;
4911 		}
4912 	}
4913 
4914 	/* Determine link state */
4915 	hw->phy.get_link_info = TRUE;
4916 	i40e_get_link_status(hw, &pf->link_up);
4917 	return (0);
4918 }
4919 
4920 static int
4921 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4922 {
4923 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4924 	int requested = 0, error = 0;
4925 
4926 	/* Read in new mode */
4927 	error = sysctl_handle_int(oidp, &requested, 0, req);
4928 	if ((error) || (req->newptr == NULL))
4929 		return (error);
4930 
4931 	/* Initiate the PF reset later in the admin task */
4932 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4933 
4934 	return (error);
4935 }
4936 
4937 static int
4938 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4939 {
4940 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4941 	struct i40e_hw *hw = &pf->hw;
4942 	int requested = 0, error = 0;
4943 
4944 	/* Read in new mode */
4945 	error = sysctl_handle_int(oidp, &requested, 0, req);
4946 	if ((error) || (req->newptr == NULL))
4947 		return (error);
4948 
4949 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4950 
4951 	return (error);
4952 }
4953 
4954 static int
4955 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4956 {
4957 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4958 	struct i40e_hw *hw = &pf->hw;
4959 	int requested = 0, error = 0;
4960 
4961 	/* Read in new mode */
4962 	error = sysctl_handle_int(oidp, &requested, 0, req);
4963 	if ((error) || (req->newptr == NULL))
4964 		return (error);
4965 
4966 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4967 
4968 	return (error);
4969 }
4970 
4971 static int
4972 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
4973 {
4974 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4975 	struct i40e_hw *hw = &pf->hw;
4976 	int requested = 0, error = 0;
4977 
4978 	/* Read in new mode */
4979 	error = sysctl_handle_int(oidp, &requested, 0, req);
4980 	if ((error) || (req->newptr == NULL))
4981 		return (error);
4982 
4983 	/* TODO: Find out how to bypass this */
4984 	if (!(rd32(hw, 0x000B818C) & 0x1)) {
4985 		device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
4986 		error = EINVAL;
4987 	} else
4988 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
4989 
4990 	return (error);
4991 }
4992 
4993 /*
4994  * Print out mapping of TX queue indexes and Rx queue indexes
4995  * to MSI-X vectors.
4996  */
4997 static int
4998 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4999 {
5000 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5001 	struct ixl_vsi *vsi = &pf->vsi;
5002 	device_t dev = pf->dev;
5003 	struct sbuf *buf;
5004 	int error = 0;
5005 
5006 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
5007 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
5008 
5009 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5010 	if (!buf) {
5011 		device_printf(dev, "Could not allocate sbuf for output.\n");
5012 		return (ENOMEM);
5013 	}
5014 
5015 	sbuf_cat(buf, "\n");
5016 	for (int i = 0; i < vsi->num_rx_queues; i++) {
5017 		rx_que = &vsi->rx_queues[i];
5018 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5019 	}
5020 	for (int i = 0; i < vsi->num_tx_queues; i++) {
5021 		tx_que = &vsi->tx_queues[i];
5022 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5023 	}
5024 
5025 	error = sbuf_finish(buf);
5026 	if (error)
5027 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5028 	sbuf_delete(buf);
5029 
5030 	return (error);
5031 }
5032