xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision b8f51b8c5423af0795429836a00f2a968e791f6e)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int	ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int	ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54 
55 /* Sysctls */
56 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63 
64 static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65 static int	ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
66 
67 /* Debug Sysctls */
68 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88 
89 /* Debug Sysctls */
90 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 #ifdef IXL_DEBUG
95 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97 #endif
98 
99 #ifdef IXL_IW
100 extern int ixl_enable_iwarp;
101 extern int ixl_limit_iwarp_msix;
102 #endif
103 
104 static const char * const ixl_fc_string[6] = {
105 	"None",
106 	"Rx",
107 	"Tx",
108 	"Full",
109 	"Priority",
110 	"Default"
111 };
112 
113 static char *ixl_fec_string[3] = {
114        "CL108 RS-FEC",
115        "CL74 FC-FEC/BASE-R",
116        "None"
117 };
118 
119 /* Functions for setting and checking driver state. Note the functions take
120  * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
121  * operations require bitmasks. This can easily lead to programming error, so
122  * we provide wrapper functions to avoid this.
123  */
124 
125 /**
126  * ixl_set_state - Set the specified state
127  * @s: the state bitmap
128  * @bit: the state to set
129  *
130  * Atomically update the state bitmap with the specified bit set.
131  */
132 inline void
133 ixl_set_state(volatile u32 *s, enum ixl_state bit)
134 {
135 	/* atomic_set_32 expects a bitmask */
136 	atomic_set_32(s, BIT(bit));
137 }
138 
139 /**
140  * ixl_clear_state - Clear the specified state
141  * @s: the state bitmap
142  * @bit: the state to clear
143  *
144  * Atomically update the state bitmap with the specified bit cleared.
145  */
146 inline void
147 ixl_clear_state(volatile u32 *s, enum ixl_state bit)
148 {
149 	/* atomic_clear_32 expects a bitmask */
150 	atomic_clear_32(s, BIT(bit));
151 }
152 
153 /**
154  * ixl_test_state - Test the specified state
155  * @s: the state bitmap
156  * @bit: the bit to test
157  *
158  * Return true if the state is set, false otherwise. Use this only if the flow
159  * does not need to update the state. If you must update the state as well,
160  * prefer ixl_testandset_state.
161  */
162 inline bool
163 ixl_test_state(volatile u32 *s, enum ixl_state bit)
164 {
165 	return !!(*s & BIT(bit));
166 }
167 
168 /**
169  * ixl_testandset_state - Test and set the specified state
170  * @s: the state bitmap
171  * @bit: the bit to test
172  *
173  * Atomically update the state bitmap, setting the specified bit. Returns the
174  * previous value of the bit.
175  */
176 inline u32
177 ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
178 {
179 	/* atomic_testandset_32 expects a bit position, as opposed to bitmask
180 	expected by other atomic functions */
181 	return atomic_testandset_32(s, bit);
182 }
183 
184 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
185 
186 /*
187 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
188 */
189 void
190 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
191 {
192 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
193 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
194 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
195 
196 	sbuf_printf(buf,
197 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
198 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
199 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
200 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
201 	    IXL_NVM_VERSION_HI_SHIFT,
202 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
203 	    IXL_NVM_VERSION_LO_SHIFT,
204 	    hw->nvm.eetrack,
205 	    oem_ver, oem_build, oem_patch);
206 }
207 
208 void
209 ixl_print_nvm_version(struct ixl_pf *pf)
210 {
211 	struct i40e_hw *hw = &pf->hw;
212 	device_t dev = pf->dev;
213 	struct sbuf *sbuf;
214 
215 	sbuf = sbuf_new_auto();
216 	ixl_nvm_version_str(hw, sbuf);
217 	sbuf_finish(sbuf);
218 	device_printf(dev, "%s\n", sbuf_data(sbuf));
219 	sbuf_delete(sbuf);
220 }
221 
222 /**
223  * ixl_get_fw_mode - Check the state of FW
224  * @hw: device hardware structure
225  *
226  * Identify state of FW. It might be in a recovery mode
227  * which limits functionality and requires special handling
228  * from the driver.
229  *
230  * @returns FW mode (normal, recovery, unexpected EMP reset)
231  */
232 static enum ixl_fw_mode
233 ixl_get_fw_mode(struct ixl_pf *pf)
234 {
235 	struct i40e_hw *hw = &pf->hw;
236 	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
237 	u32 fwsts;
238 
239 #ifdef IXL_DEBUG
240 	if (pf->recovery_mode)
241 		return IXL_FW_MODE_RECOVERY;
242 #endif
243 	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
244 
245 	/* Is set and has one of expected values */
246 	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
247 	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
248 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
249 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
250 		fw_mode = IXL_FW_MODE_RECOVERY;
251 	else {
252 		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
253 		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
254 			fw_mode = IXL_FW_MODE_UEMPR;
255 	}
256 	return (fw_mode);
257 }
258 
259 /**
260  * ixl_pf_reset - Reset the PF
261  * @pf: PF structure
262  *
263  * Ensure that FW is in the right state and do the reset
264  * if needed.
265  *
266  * @returns zero on success, or an error code on failure.
267  */
268 int
269 ixl_pf_reset(struct ixl_pf *pf)
270 {
271 	struct i40e_hw *hw = &pf->hw;
272 	enum i40e_status_code status;
273 	enum ixl_fw_mode fw_mode;
274 
275 	fw_mode = ixl_get_fw_mode(pf);
276 	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
277 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
278 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
279 		/* Don't try to reset device if it's in recovery mode */
280 		return (0);
281 	}
282 
283 	status = i40e_pf_reset(hw);
284 	if (status == I40E_SUCCESS)
285 		return (0);
286 
287 	/* Check FW mode again in case it has changed while
288 	 * waiting for reset to complete */
289 	fw_mode = ixl_get_fw_mode(pf);
290 	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
291 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
292 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
293 		return (0);
294 	}
295 
296 	if (fw_mode == IXL_FW_MODE_UEMPR)
297 		device_printf(pf->dev,
298 		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
299 	else
300 		device_printf(pf->dev, "PF reset failure %s\n",
301 		    i40e_stat_str(hw, status));
302 	return (EIO);
303 }
304 
305 /**
306  * ixl_setup_hmc - Setup LAN Host Memory Cache
307  * @pf: PF structure
308  *
309  * Init and configure LAN Host Memory Cache
310  *
311  * @returns 0 on success, EIO on error
312  */
313 int
314 ixl_setup_hmc(struct ixl_pf *pf)
315 {
316 	struct i40e_hw *hw = &pf->hw;
317 	enum i40e_status_code status;
318 
319 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
320 	    hw->func_caps.num_rx_qp, 0, 0);
321 	if (status) {
322 		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
323 		    i40e_stat_str(hw, status));
324 		return (EIO);
325 	}
326 
327 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
328 	if (status) {
329 		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
330 		    i40e_stat_str(hw, status));
331 		return (EIO);
332 	}
333 
334 	return (0);
335 }
336 
337 /**
338  * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
339  * @pf: PF structure
340  *
341  * Shutdown Host Memory Cache if configured.
342  *
343  */
344 void
345 ixl_shutdown_hmc(struct ixl_pf *pf)
346 {
347 	struct i40e_hw *hw = &pf->hw;
348 	enum i40e_status_code status;
349 
350 	/* HMC not configured, no need to shutdown */
351 	if (hw->hmc.hmc_obj == NULL)
352 		return;
353 
354 	status = i40e_shutdown_lan_hmc(hw);
355 	if (status)
356 		device_printf(pf->dev,
357 		    "Shutdown LAN HMC failed with code %s\n",
358 		    i40e_stat_str(hw, status));
359 }
360 /*
361  * Write PF ITR values to queue ITR registers.
362  */
363 void
364 ixl_configure_itr(struct ixl_pf *pf)
365 {
366 	ixl_configure_tx_itr(pf);
367 	ixl_configure_rx_itr(pf);
368 }
369 
370 /*********************************************************************
371  *
372  *  Get the hardware capabilities
373  *
374  **********************************************************************/
375 
376 int
377 ixl_get_hw_capabilities(struct ixl_pf *pf)
378 {
379 	struct i40e_aqc_list_capabilities_element_resp *buf;
380 	struct i40e_hw	*hw = &pf->hw;
381 	device_t 	dev = pf->dev;
382 	enum i40e_status_code status;
383 	int len, i2c_intfc_num;
384 	bool again = TRUE;
385 	u16 needed;
386 
387 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
388 		hw->func_caps.iwarp = 0;
389 		return (0);
390 	}
391 
392 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
393 retry:
394 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
395 	    malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
396 		device_printf(dev, "Unable to allocate cap memory\n");
397                 return (ENOMEM);
398 	}
399 
400 	/* This populates the hw struct */
401         status = i40e_aq_discover_capabilities(hw, buf, len,
402 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
403 	free(buf, M_IXL);
404 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
405 	    (again == TRUE)) {
406 		/* retry once with a larger buffer */
407 		again = FALSE;
408 		len = needed;
409 		goto retry;
410 	} else if (status != I40E_SUCCESS) {
411 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
412 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
413 		return (ENODEV);
414 	}
415 
416 	/*
417 	 * Some devices have both MDIO and I2C; since this isn't reported
418 	 * by the FW, check registers to see if an I2C interface exists.
419 	 */
420 	i2c_intfc_num = ixl_find_i2c_interface(pf);
421 	if (i2c_intfc_num != -1)
422 		pf->has_i2c = true;
423 
424 	/* Determine functions to use for driver I2C accesses */
425 	switch (pf->i2c_access_method) {
426 	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
427 		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
428 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
429 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
430 		} else {
431 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
432 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
433 		}
434 		break;
435 	}
436 	case IXL_I2C_ACCESS_METHOD_AQ:
437 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
438 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
439 		break;
440 	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
441 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
442 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
443 		break;
444 	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
445 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
446 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
447 		break;
448 	default:
449 		/* Should not happen */
450 		device_printf(dev, "Error setting I2C access functions\n");
451 		break;
452 	}
453 
454 	/* Keep link active by default */
455 	ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
456 
457 	/* Print a subset of the capability information. */
458 	device_printf(dev,
459 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
460 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
461 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
462 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
463 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
464 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
465 	    "MDIO shared");
466 
467 	return (0);
468 }
469 
470 /* For the set_advertise sysctl */
471 void
472 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
473 {
474 	device_t dev = pf->dev;
475 	int err;
476 
477 	/* Make sure to initialize the device to the complete list of
478 	 * supported speeds on driver load, to ensure unloading and
479 	 * reloading the driver will restore this value.
480 	 */
481 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
482 	if (err) {
483 		/* Non-fatal error */
484 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
485 			      __func__, err);
486 		return;
487 	}
488 
489 	pf->advertised_speed =
490 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
491 }
492 
493 int
494 ixl_teardown_hw_structs(struct ixl_pf *pf)
495 {
496 	enum i40e_status_code status = 0;
497 	struct i40e_hw *hw = &pf->hw;
498 	device_t dev = pf->dev;
499 
500 	/* Shutdown LAN HMC */
501 	if (hw->hmc.hmc_obj) {
502 		status = i40e_shutdown_lan_hmc(hw);
503 		if (status) {
504 			device_printf(dev,
505 			    "init: LAN HMC shutdown failure; status %s\n",
506 			    i40e_stat_str(hw, status));
507 			goto err_out;
508 		}
509 	}
510 
511 	/* Shutdown admin queue */
512 	ixl_disable_intr0(hw);
513 	status = i40e_shutdown_adminq(hw);
514 	if (status)
515 		device_printf(dev,
516 		    "init: Admin Queue shutdown failure; status %s\n",
517 		    i40e_stat_str(hw, status));
518 
519 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
520 err_out:
521 	return (status);
522 }
523 
524 /*
525 ** Creates new filter with given MAC address and VLAN ID
526 */
527 static struct ixl_mac_filter *
528 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
529 {
530 	struct ixl_mac_filter  *f;
531 
532 	/* create a new empty filter */
533 	f = malloc(sizeof(struct ixl_mac_filter),
534 	    M_IXL, M_NOWAIT | M_ZERO);
535 	if (f) {
536 		LIST_INSERT_HEAD(headp, f, ftle);
537 		bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
538 		f->vlan = vlan;
539 	}
540 
541 	return (f);
542 }
543 
544 /**
545  * ixl_free_filters - Free all filters in given list
546  * headp - pointer to list head
547  *
548  * Frees memory used by each entry in the list.
549  * Does not remove filters from HW.
550  */
551 void
552 ixl_free_filters(struct ixl_ftl_head *headp)
553 {
554 	struct ixl_mac_filter *f, *nf;
555 
556 	f = LIST_FIRST(headp);
557 	while (f != NULL) {
558 		nf = LIST_NEXT(f, ftle);
559 		free(f, M_IXL);
560 		f = nf;
561 	}
562 
563 	LIST_INIT(headp);
564 }
565 
566 static u_int
567 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
568 {
569 	struct ixl_add_maddr_arg *ama = arg;
570 	struct ixl_vsi *vsi = ama->vsi;
571 	const u8 *macaddr = (u8*)LLADDR(sdl);
572 	struct ixl_mac_filter *f;
573 
574 	/* Does one already exist */
575 	f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
576 	if (f != NULL)
577 		return (0);
578 
579 	f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
580 	if (f == NULL) {
581 		device_printf(vsi->dev, "WARNING: no filter available!!\n");
582 		return (0);
583 	}
584 	f->flags |= IXL_FILTER_MC;
585 
586 	return (1);
587 }
588 
589 /*********************************************************************
590  * 	Filter Routines
591  *
592  *	Routines for multicast and vlan filter management.
593  *
594  *********************************************************************/
595 void
596 ixl_add_multi(struct ixl_vsi *vsi)
597 {
598 	if_t			ifp = vsi->ifp;
599 	struct i40e_hw		*hw = vsi->hw;
600 	int			mcnt = 0;
601 	struct ixl_add_maddr_arg cb_arg;
602 
603 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
604 
605 	mcnt = if_llmaddr_count(ifp);
606 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
607 		i40e_aq_set_vsi_multicast_promiscuous(hw,
608 		    vsi->seid, TRUE, NULL);
609 		/* delete all existing MC filters */
610 		ixl_del_multi(vsi, true);
611 		return;
612 	}
613 
614 	cb_arg.vsi = vsi;
615 	LIST_INIT(&cb_arg.to_add);
616 
617 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
618 	if (mcnt > 0)
619 		ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
620 
621 	IOCTL_DEBUGOUT("ixl_add_multi: end");
622 }
623 
624 static u_int
625 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
626 {
627 	struct ixl_mac_filter *f = arg;
628 
629 	if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
630 		return (1);
631 	else
632 		return (0);
633 }
634 
635 void
636 ixl_del_multi(struct ixl_vsi *vsi, bool all)
637 {
638 	struct ixl_ftl_head	to_del;
639 	if_t			ifp = vsi->ifp;
640 	struct ixl_mac_filter	*f, *fn;
641 	int			mcnt = 0;
642 
643 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
644 
645 	LIST_INIT(&to_del);
646 	/* Search for removed multicast addresses */
647 	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
648 		if ((f->flags & IXL_FILTER_MC) == 0 ||
649 		    (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
650 			continue;
651 
652 		LIST_REMOVE(f, ftle);
653 		LIST_INSERT_HEAD(&to_del, f, ftle);
654 		mcnt++;
655 	}
656 
657 	if (mcnt > 0)
658 		ixl_del_hw_filters(vsi, &to_del, mcnt);
659 }
660 
661 void
662 ixl_link_up_msg(struct ixl_pf *pf)
663 {
664 	struct i40e_hw *hw = &pf->hw;
665 	if_t ifp = pf->vsi.ifp;
666 	char *req_fec_string, *neg_fec_string;
667 	u8 fec_abilities;
668 
669 	fec_abilities = hw->phy.link_info.req_fec_info;
670 	/* If both RS and KR are requested, only show RS */
671 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
672 		req_fec_string = ixl_fec_string[0];
673 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
674 		req_fec_string = ixl_fec_string[1];
675 	else
676 		req_fec_string = ixl_fec_string[2];
677 
678 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
679 		neg_fec_string = ixl_fec_string[0];
680 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
681 		neg_fec_string = ixl_fec_string[1];
682 	else
683 		neg_fec_string = ixl_fec_string[2];
684 
685 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
686 	    if_name(ifp),
687 	    ixl_link_speed_string(hw->phy.link_info.link_speed),
688 	    req_fec_string, neg_fec_string,
689 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
690 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
691 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
692 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
693 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
694 		ixl_fc_string[1] : ixl_fc_string[0]);
695 }
696 
697 /*
698  * Configure admin queue/misc interrupt cause registers in hardware.
699  */
700 void
701 ixl_configure_intr0_msix(struct ixl_pf *pf)
702 {
703 	struct i40e_hw *hw = &pf->hw;
704 	u32 reg;
705 
706 	/* First set up the adminq - vector 0 */
707 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
708 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
709 
710 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
711 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
712 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
713 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
714 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
715 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
716 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
717 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
718 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
719 
720 	/*
721 	 * 0x7FF is the end of the queue list.
722 	 * This means we won't use MSI-X vector 0 for a queue interrupt
723 	 * in MSI-X mode.
724 	 */
725 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
726 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
727 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
728 
729 	wr32(hw, I40E_PFINT_DYN_CTL0,
730 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
731 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
732 
733 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
734 }
735 
736 void
737 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
738 {
739 	/* Display supported media types */
740 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
741 		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
742 
743 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
744 		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
745 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
746 		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
747 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
748 		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
749 
750 	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
751 		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
752 
753 	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
754 		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
755 
756 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
757 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
758 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
759 		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
760 
761 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
762 		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
763 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
764 		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
765 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
766 		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
767 
768 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
769 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
770 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
771 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
772 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
773 		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
774 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
775 		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
776 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
777 		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
778 
779 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
780 		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
781 
782 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
783 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
784 		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
785 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
786 		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
787 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
788 		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
789 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
790 		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
791 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
792 		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
793 
794 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
795 		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
796 
797 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
798 		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
799 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
800 		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
801 
802 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
803 		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
804 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
805 		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
806 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
807 		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
808 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
809 		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
810 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
811 		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
812 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
813 		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
814 }
815 
816 /*********************************************************************
817  *
818  *  Get Firmware Switch configuration
819  *	- this will need to be more robust when more complex
820  *	  switch configurations are enabled.
821  *
822  **********************************************************************/
823 int
824 ixl_switch_config(struct ixl_pf *pf)
825 {
826 	struct i40e_hw	*hw = &pf->hw;
827 	struct ixl_vsi	*vsi = &pf->vsi;
828 	device_t 	dev = iflib_get_dev(vsi->ctx);
829 	struct i40e_aqc_get_switch_config_resp *sw_config;
830 	u8	aq_buf[I40E_AQ_LARGE_BUF];
831 	int	ret;
832 	u16	next = 0;
833 
834 	memset(&aq_buf, 0, sizeof(aq_buf));
835 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
836 	ret = i40e_aq_get_switch_config(hw, sw_config,
837 	    sizeof(aq_buf), &next, NULL);
838 	if (ret) {
839 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
840 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
841 		return (ret);
842 	}
843 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
844 		device_printf(dev,
845 		    "Switch config: header reported: %d in structure, %d total\n",
846 		    LE16_TO_CPU(sw_config->header.num_reported),
847 		    LE16_TO_CPU(sw_config->header.num_total));
848 		for (int i = 0;
849 		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
850 			device_printf(dev,
851 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
852 			    sw_config->element[i].element_type,
853 			    LE16_TO_CPU(sw_config->element[i].seid),
854 			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
855 			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
856 		}
857 	}
858 	/* Simplified due to a single VSI */
859 	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
860 	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
861 	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
862 	return (ret);
863 }
864 
865 void
866 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
867 {
868 	struct sysctl_oid *tree;
869 	struct sysctl_oid_list *child;
870 	struct sysctl_oid_list *vsi_list;
871 
872 	tree = device_get_sysctl_tree(vsi->dev);
873 	child = SYSCTL_CHILDREN(tree);
874 	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
875 			CTLFLAG_RD, NULL, "VSI Number");
876 
877 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
878 	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
879 
880 	/* Copy of netstat RX errors counter for validation purposes */
881 	SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
882 			CTLFLAG_RD, &vsi->ierrors,
883 			"RX packet errors");
884 
885 	if (queues_sysctls)
886 		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
887 }
888 
889 /*
890  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
891  * Writes to the ITR registers immediately.
892  */
893 static int
894 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
895 {
896 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
897 	device_t dev = pf->dev;
898 	int error = 0;
899 	int requested_tx_itr;
900 
901 	requested_tx_itr = pf->tx_itr;
902 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
903 	if ((error) || (req->newptr == NULL))
904 		return (error);
905 	if (pf->dynamic_tx_itr) {
906 		device_printf(dev,
907 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
908 		    return (EINVAL);
909 	}
910 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
911 		device_printf(dev,
912 		    "Invalid TX itr value; value must be between 0 and %d\n",
913 		        IXL_MAX_ITR);
914 		return (EINVAL);
915 	}
916 
917 	pf->tx_itr = requested_tx_itr;
918 	ixl_configure_tx_itr(pf);
919 
920 	return (error);
921 }
922 
923 /*
924  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
925  * Writes to the ITR registers immediately.
926  */
927 static int
928 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
929 {
930 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
931 	device_t dev = pf->dev;
932 	int error = 0;
933 	int requested_rx_itr;
934 
935 	requested_rx_itr = pf->rx_itr;
936 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
937 	if ((error) || (req->newptr == NULL))
938 		return (error);
939 	if (pf->dynamic_rx_itr) {
940 		device_printf(dev,
941 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
942 		    return (EINVAL);
943 	}
944 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
945 		device_printf(dev,
946 		    "Invalid RX itr value; value must be between 0 and %d\n",
947 		        IXL_MAX_ITR);
948 		return (EINVAL);
949 	}
950 
951 	pf->rx_itr = requested_rx_itr;
952 	ixl_configure_rx_itr(pf);
953 
954 	return (error);
955 }
956 
957 void
958 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
959 	struct sysctl_oid_list *child,
960 	struct i40e_hw_port_stats *stats)
961 {
962 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
963 	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
964 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
965 
966 	struct i40e_eth_stats *eth_stats = &stats->eth;
967 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
968 
969 	struct ixl_sysctl_info ctls[] =
970 	{
971 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
972 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
973 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
974 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
975 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
976 		/* Packet Reception Stats */
977 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
978 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
979 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
980 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
981 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
982 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
983 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
984 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
985 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
986 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
987 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
988 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
989 		/* Packet Transmission Stats */
990 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
991 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
992 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
993 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
994 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
995 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
996 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
997 		/* Flow control */
998 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
999 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1000 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1001 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1002 		/* End */
1003 		{0,0,0}
1004 	};
1005 
1006 	struct ixl_sysctl_info *entry = ctls;
1007 	while (entry->stat != 0)
1008 	{
1009 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1010 				CTLFLAG_RD, entry->stat,
1011 				entry->description);
1012 		entry++;
1013 	}
1014 }
1015 
1016 void
1017 ixl_set_rss_key(struct ixl_pf *pf)
1018 {
1019 	struct i40e_hw *hw = &pf->hw;
1020 	struct ixl_vsi *vsi = &pf->vsi;
1021 	device_t	dev = pf->dev;
1022 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1023 	enum i40e_status_code status;
1024 
1025 #ifdef RSS
1026         /* Fetch the configured RSS key */
1027         rss_getkey((uint8_t *) &rss_seed);
1028 #else
1029 	ixl_get_default_rss_key(rss_seed);
1030 #endif
1031 	/* Fill out hash function seed */
1032 	if (hw->mac.type == I40E_MAC_X722) {
1033 		struct i40e_aqc_get_set_rss_key_data key_data;
1034 		bcopy(rss_seed, &key_data, 52);
1035 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1036 		if (status)
1037 			device_printf(dev,
1038 			    "i40e_aq_set_rss_key status %s, error %s\n",
1039 			    i40e_stat_str(hw, status),
1040 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1041 	} else {
1042 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1043 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1044 	}
1045 }
1046 
1047 /*
1048  * Configure enabled PCTYPES for RSS.
1049  */
1050 void
1051 ixl_set_rss_pctypes(struct ixl_pf *pf)
1052 {
1053 	struct i40e_hw *hw = &pf->hw;
1054 	u64		set_hena = 0, hena;
1055 
1056 #ifdef RSS
1057 	u32		rss_hash_config;
1058 
1059 	rss_hash_config = rss_gethashconfig();
1060 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1061                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1062 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1063                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1064 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1065                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1066 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1067                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1068 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1069 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1070 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1071                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1072         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1073                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1074 #else
1075 	if (hw->mac.type == I40E_MAC_X722)
1076 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1077 	else
1078 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1079 #endif
1080 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1081 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1082 	hena |= set_hena;
1083 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1084 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1085 
1086 }
1087 
1088 /*
1089 ** Setup the PF's RSS parameters.
1090 */
1091 void
1092 ixl_config_rss(struct ixl_pf *pf)
1093 {
1094 	ixl_set_rss_key(pf);
1095 	ixl_set_rss_pctypes(pf);
1096 	ixl_set_rss_hlut(pf);
1097 }
1098 
1099 /*
1100  * In some firmware versions there is default MAC/VLAN filter
1101  * configured which interferes with filters managed by driver.
1102  * Make sure it's removed.
1103  */
1104 void
1105 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1106 {
1107 	struct i40e_aqc_remove_macvlan_element_data e;
1108 
1109 	bzero(&e, sizeof(e));
1110 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1111 	e.vlan_tag = 0;
1112 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1113 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1114 
1115 	bzero(&e, sizeof(e));
1116 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1117 	e.vlan_tag = 0;
1118 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1119 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1120 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1121 }
1122 
1123 /*
1124 ** Initialize filter list and add filters that the hardware
1125 ** needs to know about.
1126 **
1127 ** Requires VSI's seid to be set before calling.
1128 */
1129 void
1130 ixl_init_filters(struct ixl_vsi *vsi)
1131 {
1132 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1133 
1134 	ixl_dbg_filter(pf, "%s: start\n", __func__);
1135 
1136 	/* Initialize mac filter list for VSI */
1137 	LIST_INIT(&vsi->ftl);
1138 	vsi->num_hw_filters = 0;
1139 
1140 	/* Receive broadcast Ethernet frames */
1141 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1142 
1143 	if (IXL_VSI_IS_VF(vsi))
1144 		return;
1145 
1146 	ixl_del_default_hw_filters(vsi);
1147 
1148 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1149 
1150 	/*
1151 	 * Prevent Tx flow control frames from being sent out by
1152 	 * non-firmware transmitters.
1153 	 * This affects every VSI in the PF.
1154 	 */
1155 #ifndef IXL_DEBUG_FC
1156 	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1157 #else
1158 	if (pf->enable_tx_fc_filter)
1159 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1160 #endif
1161 }
1162 
1163 void
1164 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1165 {
1166 	struct i40e_hw *hw = vsi->hw;
1167 	struct ixl_ftl_head tmp;
1168 	int cnt;
1169 
1170 	/*
1171 	 * The ixl_add_hw_filters function adds filters configured
1172 	 * in HW to a list in VSI. Move all filters to a temporary
1173 	 * list to avoid corrupting it by concatenating to itself.
1174 	 */
1175 	LIST_INIT(&tmp);
1176 	LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1177 	cnt = vsi->num_hw_filters;
1178 	vsi->num_hw_filters = 0;
1179 
1180 	ixl_add_hw_filters(vsi, &tmp, cnt);
1181 
1182 	/*
1183 	 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1184 	 * will be NULL. Furthermore, the ftl of such vsi already contains
1185 	 * IXL_VLAN_ANY filter so we can skip that as well.
1186 	 */
1187 	if (hw == NULL)
1188 		return;
1189 
1190 	/* Filter could be removed if MAC address was changed */
1191 	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1192 
1193 	if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1194 		return;
1195 	/*
1196 	 * VLAN HW filtering is enabled, make sure that filters
1197 	 * for all registered VLAN tags are configured
1198 	 */
1199 	ixl_add_vlan_filters(vsi, hw->mac.addr);
1200 }
1201 
1202 /*
1203  * This routine adds a MAC/VLAN filter to the software filter
1204  * list, then adds that new filter to the HW if it doesn't already
1205  * exist in the SW filter list.
1206  */
1207 void
1208 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1209 {
1210 	struct ixl_mac_filter	*f, *tmp;
1211 	struct ixl_pf		*pf;
1212 	device_t		dev;
1213 	struct ixl_ftl_head	to_add;
1214 	int			to_add_cnt;
1215 
1216 	pf = vsi->back;
1217 	dev = pf->dev;
1218 	to_add_cnt = 1;
1219 
1220 	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1221 	    MAC_FORMAT_ARGS(macaddr), vlan);
1222 
1223 	/* Does one already exist */
1224 	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1225 	if (f != NULL)
1226 		return;
1227 
1228 	LIST_INIT(&to_add);
1229 	f = ixl_new_filter(&to_add, macaddr, vlan);
1230 	if (f == NULL) {
1231 		device_printf(dev, "WARNING: no filter available!!\n");
1232 		return;
1233 	}
1234 	if (f->vlan != IXL_VLAN_ANY)
1235 		f->flags |= IXL_FILTER_VLAN;
1236 	else
1237 		vsi->num_macs++;
1238 
1239 	/*
1240 	** Is this the first vlan being registered, if so we
1241 	** need to remove the ANY filter that indicates we are
1242 	** not in a vlan, and replace that with a 0 filter.
1243 	*/
1244 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1245 		tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1246 		if (tmp != NULL) {
1247 			struct ixl_ftl_head to_del;
1248 
1249 			/* Prepare new filter first to avoid removing
1250 			 * VLAN_ANY filter if allocation fails */
1251 			f = ixl_new_filter(&to_add, macaddr, 0);
1252 			if (f == NULL) {
1253 				device_printf(dev, "WARNING: no filter available!!\n");
1254 				free(LIST_FIRST(&to_add), M_IXL);
1255 				return;
1256 			}
1257 			to_add_cnt++;
1258 
1259 			LIST_REMOVE(tmp, ftle);
1260 			LIST_INIT(&to_del);
1261 			LIST_INSERT_HEAD(&to_del, tmp, ftle);
1262 			ixl_del_hw_filters(vsi, &to_del, 1);
1263 		}
1264 	}
1265 
1266 	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1267 }
1268 
1269 /**
1270  * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1271  * @vsi: pointer to VSI
1272  * @macaddr: MAC address
1273  *
1274  * Adds MAC/VLAN filter for each VLAN configured on the interface
1275  * if there is enough HW filters. Otherwise adds a single filter
1276  * for all tagged and untagged frames to allow all configured VLANs
1277  * to recieve traffic.
1278  */
1279 void
1280 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1281 {
1282 	struct ixl_ftl_head to_add;
1283 	struct ixl_mac_filter *f;
1284 	int to_add_cnt = 0;
1285 	int i, vlan = 0;
1286 
1287 	if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1288 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1289 		return;
1290 	}
1291 	LIST_INIT(&to_add);
1292 
1293 	/* Add filter for untagged frames if it does not exist yet */
1294 	f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1295 	if (f == NULL) {
1296 		f = ixl_new_filter(&to_add, macaddr, 0);
1297 		if (f == NULL) {
1298 			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1299 			return;
1300 		}
1301 		to_add_cnt++;
1302 	}
1303 
1304 	for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1305 		bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1306 		if (vlan == -1)
1307 			break;
1308 
1309 		/* Does one already exist */
1310 		f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1311 		if (f != NULL)
1312 			continue;
1313 
1314 		f = ixl_new_filter(&to_add, macaddr, vlan);
1315 		if (f == NULL) {
1316 			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1317 			ixl_free_filters(&to_add);
1318 			return;
1319 		}
1320 		to_add_cnt++;
1321 	}
1322 
1323 	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1324 }
1325 
1326 void
1327 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1328 {
1329 	struct ixl_mac_filter *f, *tmp;
1330 	struct ixl_ftl_head ftl_head;
1331 	int to_del_cnt = 1;
1332 
1333 	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1334 	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1335 	    MAC_FORMAT_ARGS(macaddr), vlan);
1336 
1337 	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1338 	if (f == NULL)
1339 		return;
1340 
1341 	LIST_REMOVE(f, ftle);
1342 	LIST_INIT(&ftl_head);
1343 	LIST_INSERT_HEAD(&ftl_head, f, ftle);
1344 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1345 		vsi->num_macs--;
1346 
1347 	/* If this is not the last vlan just remove the filter */
1348 	if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1349 		ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1350 		return;
1351 	}
1352 
1353 	/* It's the last vlan, we need to switch back to a non-vlan filter */
1354 	tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1355 	if (tmp != NULL) {
1356 		LIST_REMOVE(tmp, ftle);
1357 		LIST_INSERT_AFTER(f, tmp, ftle);
1358 		to_del_cnt++;
1359 	}
1360 	ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1361 
1362 	ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1363 }
1364 
1365 /**
1366  * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1367  * @vsi: VSI which filters need to be removed
1368  * @macaddr: MAC address
1369  *
1370  * Remove all MAC/VLAN filters with a given MAC address. For multicast
1371  * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1372  * so skip them to speed up processing. Those filters should be removed
1373  * using ixl_del_filter function.
1374  */
1375 void
1376 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1377 {
1378 	struct ixl_mac_filter *f, *tmp;
1379 	struct ixl_ftl_head to_del;
1380 	int to_del_cnt = 0;
1381 
1382 	LIST_INIT(&to_del);
1383 
1384 	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1385 		if ((f->flags & IXL_FILTER_MC) != 0 ||
1386 		    !ixl_ether_is_equal(f->macaddr, macaddr))
1387 			continue;
1388 
1389 		LIST_REMOVE(f, ftle);
1390 		LIST_INSERT_HEAD(&to_del, f, ftle);
1391 		to_del_cnt++;
1392 	}
1393 
1394 	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1395 	    "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1396 	    __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1397 	if (to_del_cnt > 0)
1398 		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1399 }
1400 
1401 /*
1402 ** Find the filter with both matching mac addr and vlan id
1403 */
1404 struct ixl_mac_filter *
1405 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1406 {
1407 	struct ixl_mac_filter	*f;
1408 
1409 	LIST_FOREACH(f, headp, ftle) {
1410 		if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1411 		    (f->vlan == vlan)) {
1412 			return (f);
1413 		}
1414 	}
1415 
1416 	return (NULL);
1417 }
1418 
1419 /*
1420 ** This routine takes additions to the vsi filter
1421 ** table and creates an Admin Queue call to create
1422 ** the filters in the hardware.
1423 */
1424 void
1425 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1426 {
1427 	struct i40e_aqc_add_macvlan_element_data *a, *b;
1428 	struct ixl_mac_filter	*f, *fn;
1429 	struct ixl_pf		*pf;
1430 	struct i40e_hw		*hw;
1431 	device_t		dev;
1432 	enum i40e_status_code	status;
1433 	int			j = 0;
1434 
1435 	pf = vsi->back;
1436 	dev = vsi->dev;
1437 	hw = &pf->hw;
1438 
1439 	ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1440 
1441 	if (cnt < 1) {
1442 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1443 		return;
1444 	}
1445 
1446 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1447 	    M_IXL, M_NOWAIT | M_ZERO);
1448 	if (a == NULL) {
1449 		device_printf(dev, "add_hw_filters failed to get memory\n");
1450 		return;
1451 	}
1452 
1453 	LIST_FOREACH(f, to_add, ftle) {
1454 		b = &a[j]; // a pox on fvl long names :)
1455 		bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1456 		if (f->vlan == IXL_VLAN_ANY) {
1457 			b->vlan_tag = 0;
1458 			b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1459 		} else {
1460 			b->vlan_tag = f->vlan;
1461 			b->flags = 0;
1462 		}
1463 		b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1464 		/* Some FW versions do not set match method
1465 		 * when adding filters fails. Initialize it with
1466 		 * expected error value to allow detection which
1467 		 * filters were not added */
1468 		b->match_method = I40E_AQC_MM_ERR_NO_RES;
1469 		ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1470 		    MAC_FORMAT_ARGS(f->macaddr));
1471 
1472 		if (++j == cnt)
1473 			break;
1474 	}
1475 	if (j != cnt) {
1476 		/* Something went wrong */
1477 		device_printf(dev,
1478 		    "%s ERROR: list of filters to short expected: %d, found: %d\n",
1479 		    __func__, cnt, j);
1480 		ixl_free_filters(to_add);
1481 		goto out_free;
1482 	}
1483 
1484 	status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1485 	if (status == I40E_SUCCESS) {
1486 		LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1487 		vsi->num_hw_filters += j;
1488 		goto out_free;
1489 	}
1490 
1491 	device_printf(dev,
1492 	    "i40e_aq_add_macvlan status %s, error %s\n",
1493 	    i40e_stat_str(hw, status),
1494 	    i40e_aq_str(hw, hw->aq.asq_last_status));
1495 	j = 0;
1496 
1497 	/* Verify which filters were actually configured in HW
1498 	 * and add them to the list */
1499 	LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1500 		LIST_REMOVE(f, ftle);
1501 		if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1502 			ixl_dbg_filter(pf,
1503 			    "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1504 			    __func__,
1505 			    MAC_FORMAT_ARGS(f->macaddr),
1506 			    f->vlan);
1507 			free(f, M_IXL);
1508 		} else {
1509 			LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1510 			vsi->num_hw_filters++;
1511 		}
1512 		j++;
1513 	}
1514 
1515 out_free:
1516 	free(a, M_IXL);
1517 }
1518 
1519 /*
1520 ** This routine takes removals in the vsi filter
1521 ** table and creates an Admin Queue call to delete
1522 ** the filters in the hardware.
1523 */
1524 void
1525 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1526 {
1527 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1528 	struct ixl_pf		*pf;
1529 	struct i40e_hw		*hw;
1530 	device_t		dev;
1531 	struct ixl_mac_filter	*f, *f_temp;
1532 	enum i40e_status_code	status;
1533 	int			j = 0;
1534 
1535 	pf = vsi->back;
1536 	hw = &pf->hw;
1537 	dev = vsi->dev;
1538 
1539 	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1540 
1541 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1542 	    M_IXL, M_NOWAIT | M_ZERO);
1543 	if (d == NULL) {
1544 		device_printf(dev, "%s: failed to get memory\n", __func__);
1545 		return;
1546 	}
1547 
1548 	LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1549 		e = &d[j]; // a pox on fvl long names :)
1550 		bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1551 		e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1552 		if (f->vlan == IXL_VLAN_ANY) {
1553 			e->vlan_tag = 0;
1554 			e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1555 		} else {
1556 			e->vlan_tag = f->vlan;
1557 		}
1558 
1559 		ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1560 		    MAC_FORMAT_ARGS(f->macaddr));
1561 
1562 		/* delete entry from the list */
1563 		LIST_REMOVE(f, ftle);
1564 		free(f, M_IXL);
1565 		if (++j == cnt)
1566 			break;
1567 	}
1568 	if (j != cnt || !LIST_EMPTY(to_del)) {
1569 		/* Something went wrong */
1570 		device_printf(dev,
1571 		    "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1572 		    __func__, cnt, j);
1573 		ixl_free_filters(to_del);
1574 		goto out_free;
1575 	}
1576 	status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1577 	if (status) {
1578 		device_printf(dev,
1579 		    "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1580 		    __func__, i40e_stat_str(hw, status),
1581 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1582 		for (int i = 0; i < j; i++) {
1583 			if (d[i].error_code == 0)
1584 				continue;
1585 			device_printf(dev,
1586 			    "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1587 			    __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1588 			    d[i].vlan_tag);
1589 		}
1590 	}
1591 
1592 	vsi->num_hw_filters -= j;
1593 
1594 out_free:
1595 	free(d, M_IXL);
1596 
1597 	ixl_dbg_filter(pf, "%s: end\n", __func__);
1598 }
1599 
1600 int
1601 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1602 {
1603 	struct i40e_hw	*hw = &pf->hw;
1604 	int		error = 0;
1605 	u32		reg;
1606 	u16		pf_qidx;
1607 
1608 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1609 
1610 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1611 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1612 	    pf_qidx, vsi_qidx);
1613 
1614 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1615 
1616 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1617 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1618 	    I40E_QTX_ENA_QENA_STAT_MASK;
1619 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1620 	/* Verify the enable took */
1621 	for (int j = 0; j < 10; j++) {
1622 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1623 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1624 			break;
1625 		i40e_usec_delay(10);
1626 	}
1627 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1628 		device_printf(pf->dev, "TX queue %d still disabled!\n",
1629 		    pf_qidx);
1630 		error = ETIMEDOUT;
1631 	}
1632 
1633 	return (error);
1634 }
1635 
1636 int
1637 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1638 {
1639 	struct i40e_hw	*hw = &pf->hw;
1640 	int		error = 0;
1641 	u32		reg;
1642 	u16		pf_qidx;
1643 
1644 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1645 
1646 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1647 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1648 	    pf_qidx, vsi_qidx);
1649 
1650 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1651 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1652 	    I40E_QRX_ENA_QENA_STAT_MASK;
1653 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1654 	/* Verify the enable took */
1655 	for (int j = 0; j < 10; j++) {
1656 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1657 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1658 			break;
1659 		i40e_usec_delay(10);
1660 	}
1661 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1662 		device_printf(pf->dev, "RX queue %d still disabled!\n",
1663 		    pf_qidx);
1664 		error = ETIMEDOUT;
1665 	}
1666 
1667 	return (error);
1668 }
1669 
1670 int
1671 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1672 {
1673 	int error = 0;
1674 
1675 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1676 	/* Called function already prints error message */
1677 	if (error)
1678 		return (error);
1679 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1680 	return (error);
1681 }
1682 
1683 /*
1684  * Returns error on first ring that is detected hung.
1685  */
1686 int
1687 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1688 {
1689 	struct i40e_hw	*hw = &pf->hw;
1690 	int		error = 0;
1691 	u32		reg;
1692 	u16		pf_qidx;
1693 
1694 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1695 
1696 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1697 	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1698 	    pf_qidx, vsi_qidx);
1699 
1700 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1701 	i40e_usec_delay(500);
1702 
1703 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1704 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1705 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1706 	/* Verify the disable took */
1707 	for (int j = 0; j < 10; j++) {
1708 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1709 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1710 			break;
1711 		i40e_msec_delay(10);
1712 	}
1713 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1714 		device_printf(pf->dev, "TX queue %d still enabled!\n",
1715 		    pf_qidx);
1716 		error = ETIMEDOUT;
1717 	}
1718 
1719 	return (error);
1720 }
1721 
1722 /*
1723  * Returns error on first ring that is detected hung.
1724  */
1725 int
1726 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1727 {
1728 	struct i40e_hw	*hw = &pf->hw;
1729 	int		error = 0;
1730 	u32		reg;
1731 	u16		pf_qidx;
1732 
1733 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1734 
1735 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1736 	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1737 	    pf_qidx, vsi_qidx);
1738 
1739 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1740 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1741 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1742 	/* Verify the disable took */
1743 	for (int j = 0; j < 10; j++) {
1744 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1745 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1746 			break;
1747 		i40e_msec_delay(10);
1748 	}
1749 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1750 		device_printf(pf->dev, "RX queue %d still enabled!\n",
1751 		    pf_qidx);
1752 		error = ETIMEDOUT;
1753 	}
1754 
1755 	return (error);
1756 }
1757 
1758 int
1759 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1760 {
1761 	int error = 0;
1762 
1763 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1764 	/* Called function already prints error message */
1765 	if (error)
1766 		return (error);
1767 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1768 	return (error);
1769 }
1770 
1771 static void
1772 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1773 {
1774 	struct i40e_hw *hw = &pf->hw;
1775 	device_t dev = pf->dev;
1776 	struct ixl_vf *vf;
1777 	bool mdd_detected = false;
1778 	bool pf_mdd_detected = false;
1779 	bool vf_mdd_detected = false;
1780 	u16 vf_num, queue;
1781 	u8 pf_num, event;
1782 	u8 pf_mdet_num, vp_mdet_num;
1783 	u32 reg;
1784 
1785 	/* find what triggered the MDD event */
1786 	reg = rd32(hw, I40E_GL_MDET_TX);
1787 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1788 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1789 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1790 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1791 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1792 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1793 		    I40E_GL_MDET_TX_EVENT_SHIFT;
1794 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1795 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1796 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1797 		mdd_detected = true;
1798 	}
1799 
1800 	if (!mdd_detected)
1801 		return;
1802 
1803 	reg = rd32(hw, I40E_PF_MDET_TX);
1804 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1805 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1806 		pf_mdet_num = hw->pf_id;
1807 		pf_mdd_detected = true;
1808 	}
1809 
1810 	/* Check if MDD was caused by a VF */
1811 	for (int i = 0; i < pf->num_vfs; i++) {
1812 		vf = &(pf->vfs[i]);
1813 		reg = rd32(hw, I40E_VP_MDET_TX(i));
1814 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1815 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1816 			vp_mdet_num = i;
1817 			vf->num_mdd_events++;
1818 			vf_mdd_detected = true;
1819 		}
1820 	}
1821 
1822 	/* Print out an error message */
1823 	if (vf_mdd_detected && pf_mdd_detected)
1824 		device_printf(dev,
1825 		    "Malicious Driver Detection event %d"
1826 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1827 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1828 	else if (vf_mdd_detected && !pf_mdd_detected)
1829 		device_printf(dev,
1830 		    "Malicious Driver Detection event %d"
1831 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1832 		    event, queue, pf_num, vf_num, vp_mdet_num);
1833 	else if (!vf_mdd_detected && pf_mdd_detected)
1834 		device_printf(dev,
1835 		    "Malicious Driver Detection event %d"
1836 		    " on TX queue %d, pf number %d (PF-%d)\n",
1837 		    event, queue, pf_num, pf_mdet_num);
1838 	/* Theoretically shouldn't happen */
1839 	else
1840 		device_printf(dev,
1841 		    "TX Malicious Driver Detection event (unknown)\n");
1842 }
1843 
1844 static void
1845 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1846 {
1847 	struct i40e_hw *hw = &pf->hw;
1848 	device_t dev = pf->dev;
1849 	struct ixl_vf *vf;
1850 	bool mdd_detected = false;
1851 	bool pf_mdd_detected = false;
1852 	bool vf_mdd_detected = false;
1853 	u16 queue;
1854 	u8 pf_num, event;
1855 	u8 pf_mdet_num, vp_mdet_num;
1856 	u32 reg;
1857 
1858 	/*
1859 	 * GL_MDET_RX doesn't contain VF number information, unlike
1860 	 * GL_MDET_TX.
1861 	 */
1862 	reg = rd32(hw, I40E_GL_MDET_RX);
1863 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1864 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1865 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1866 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1867 		    I40E_GL_MDET_RX_EVENT_SHIFT;
1868 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1869 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1870 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1871 		mdd_detected = true;
1872 	}
1873 
1874 	if (!mdd_detected)
1875 		return;
1876 
1877 	reg = rd32(hw, I40E_PF_MDET_RX);
1878 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1879 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1880 		pf_mdet_num = hw->pf_id;
1881 		pf_mdd_detected = true;
1882 	}
1883 
1884 	/* Check if MDD was caused by a VF */
1885 	for (int i = 0; i < pf->num_vfs; i++) {
1886 		vf = &(pf->vfs[i]);
1887 		reg = rd32(hw, I40E_VP_MDET_RX(i));
1888 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1889 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1890 			vp_mdet_num = i;
1891 			vf->num_mdd_events++;
1892 			vf_mdd_detected = true;
1893 		}
1894 	}
1895 
1896 	/* Print out an error message */
1897 	if (vf_mdd_detected && pf_mdd_detected)
1898 		device_printf(dev,
1899 		    "Malicious Driver Detection event %d"
1900 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1901 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1902 	else if (vf_mdd_detected && !pf_mdd_detected)
1903 		device_printf(dev,
1904 		    "Malicious Driver Detection event %d"
1905 		    " on RX queue %d, pf number %d, (VF-%d)\n",
1906 		    event, queue, pf_num, vp_mdet_num);
1907 	else if (!vf_mdd_detected && pf_mdd_detected)
1908 		device_printf(dev,
1909 		    "Malicious Driver Detection event %d"
1910 		    " on RX queue %d, pf number %d (PF-%d)\n",
1911 		    event, queue, pf_num, pf_mdet_num);
1912 	/* Theoretically shouldn't happen */
1913 	else
1914 		device_printf(dev,
1915 		    "RX Malicious Driver Detection event (unknown)\n");
1916 }
1917 
1918 /**
1919  * ixl_handle_mdd_event
1920  *
1921  * Called from interrupt handler to identify possibly malicious vfs
1922  * (But also detects events from the PF, as well)
1923  **/
1924 void
1925 ixl_handle_mdd_event(struct ixl_pf *pf)
1926 {
1927 	struct i40e_hw *hw = &pf->hw;
1928 	u32 reg;
1929 
1930 	/*
1931 	 * Handle both TX/RX because it's possible they could
1932 	 * both trigger in the same interrupt.
1933 	 */
1934 	ixl_handle_tx_mdd_event(pf);
1935 	ixl_handle_rx_mdd_event(pf);
1936 
1937 	ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
1938 
1939 	/* re-enable mdd interrupt cause */
1940 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1941 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1942 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1943 	ixl_flush(hw);
1944 }
1945 
1946 void
1947 ixl_enable_intr0(struct i40e_hw *hw)
1948 {
1949 	u32		reg;
1950 
1951 	/* Use IXL_ITR_NONE so ITR isn't updated here */
1952 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1953 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1954 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1955 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1956 }
1957 
1958 void
1959 ixl_disable_intr0(struct i40e_hw *hw)
1960 {
1961 	u32		reg;
1962 
1963 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1964 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1965 	ixl_flush(hw);
1966 }
1967 
1968 void
1969 ixl_enable_queue(struct i40e_hw *hw, int id)
1970 {
1971 	u32		reg;
1972 
1973 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1974 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1975 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1976 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1977 }
1978 
1979 void
1980 ixl_disable_queue(struct i40e_hw *hw, int id)
1981 {
1982 	u32		reg;
1983 
1984 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1985 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1986 }
1987 
1988 void
1989 ixl_handle_empr_reset(struct ixl_pf *pf)
1990 {
1991 	struct ixl_vsi	*vsi = &pf->vsi;
1992 	bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
1993 
1994 	ixl_prepare_for_reset(pf, is_up);
1995 	/*
1996 	 * i40e_pf_reset checks the type of reset and acts
1997 	 * accordingly. If EMP or Core reset was performed
1998 	 * doing PF reset is not necessary and it sometimes
1999 	 * fails.
2000 	 */
2001 	ixl_pf_reset(pf);
2002 
2003 	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2004 	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2005 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2006 		device_printf(pf->dev,
2007 		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2008 		pf->link_up = FALSE;
2009 		ixl_update_link_status(pf);
2010 	}
2011 
2012 	ixl_rebuild_hw_structs_after_reset(pf, is_up);
2013 
2014 	ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2015 }
2016 
2017 void
2018 ixl_update_stats_counters(struct ixl_pf *pf)
2019 {
2020 	struct i40e_hw	*hw = &pf->hw;
2021 	struct ixl_vsi	*vsi = &pf->vsi;
2022 	struct ixl_vf	*vf;
2023 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2024 
2025 	struct i40e_hw_port_stats *nsd = &pf->stats;
2026 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2027 
2028 	/* Update hw stats */
2029 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2030 			   pf->stat_offsets_loaded,
2031 			   &osd->crc_errors, &nsd->crc_errors);
2032 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2033 			   pf->stat_offsets_loaded,
2034 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2035 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2036 			   I40E_GLPRT_GORCL(hw->port),
2037 			   pf->stat_offsets_loaded,
2038 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2039 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2040 			   I40E_GLPRT_GOTCL(hw->port),
2041 			   pf->stat_offsets_loaded,
2042 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2043 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2044 			   pf->stat_offsets_loaded,
2045 			   &osd->eth.rx_discards,
2046 			   &nsd->eth.rx_discards);
2047 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2048 			   I40E_GLPRT_UPRCL(hw->port),
2049 			   pf->stat_offsets_loaded,
2050 			   &osd->eth.rx_unicast,
2051 			   &nsd->eth.rx_unicast);
2052 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2053 			   I40E_GLPRT_UPTCL(hw->port),
2054 			   pf->stat_offsets_loaded,
2055 			   &osd->eth.tx_unicast,
2056 			   &nsd->eth.tx_unicast);
2057 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2058 			   I40E_GLPRT_MPRCL(hw->port),
2059 			   pf->stat_offsets_loaded,
2060 			   &osd->eth.rx_multicast,
2061 			   &nsd->eth.rx_multicast);
2062 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2063 			   I40E_GLPRT_MPTCL(hw->port),
2064 			   pf->stat_offsets_loaded,
2065 			   &osd->eth.tx_multicast,
2066 			   &nsd->eth.tx_multicast);
2067 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2068 			   I40E_GLPRT_BPRCL(hw->port),
2069 			   pf->stat_offsets_loaded,
2070 			   &osd->eth.rx_broadcast,
2071 			   &nsd->eth.rx_broadcast);
2072 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2073 			   I40E_GLPRT_BPTCL(hw->port),
2074 			   pf->stat_offsets_loaded,
2075 			   &osd->eth.tx_broadcast,
2076 			   &nsd->eth.tx_broadcast);
2077 
2078 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2079 			   pf->stat_offsets_loaded,
2080 			   &osd->tx_dropped_link_down,
2081 			   &nsd->tx_dropped_link_down);
2082 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2083 			   pf->stat_offsets_loaded,
2084 			   &osd->mac_local_faults,
2085 			   &nsd->mac_local_faults);
2086 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2087 			   pf->stat_offsets_loaded,
2088 			   &osd->mac_remote_faults,
2089 			   &nsd->mac_remote_faults);
2090 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2091 			   pf->stat_offsets_loaded,
2092 			   &osd->rx_length_errors,
2093 			   &nsd->rx_length_errors);
2094 
2095 	/* Flow control (LFC) stats */
2096 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2097 			   pf->stat_offsets_loaded,
2098 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2099 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2100 			   pf->stat_offsets_loaded,
2101 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2102 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2103 			   pf->stat_offsets_loaded,
2104 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2105 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2106 			   pf->stat_offsets_loaded,
2107 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2108 
2109 	/*
2110 	 * For watchdog management we need to know if we have been paused
2111 	 * during the last interval, so capture that here.
2112 	 */
2113 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2114 		vsi->shared->isc_pause_frames = 1;
2115 
2116 	/* Packet size stats rx */
2117 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2118 			   I40E_GLPRT_PRC64L(hw->port),
2119 			   pf->stat_offsets_loaded,
2120 			   &osd->rx_size_64, &nsd->rx_size_64);
2121 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2122 			   I40E_GLPRT_PRC127L(hw->port),
2123 			   pf->stat_offsets_loaded,
2124 			   &osd->rx_size_127, &nsd->rx_size_127);
2125 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2126 			   I40E_GLPRT_PRC255L(hw->port),
2127 			   pf->stat_offsets_loaded,
2128 			   &osd->rx_size_255, &nsd->rx_size_255);
2129 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2130 			   I40E_GLPRT_PRC511L(hw->port),
2131 			   pf->stat_offsets_loaded,
2132 			   &osd->rx_size_511, &nsd->rx_size_511);
2133 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2134 			   I40E_GLPRT_PRC1023L(hw->port),
2135 			   pf->stat_offsets_loaded,
2136 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2137 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2138 			   I40E_GLPRT_PRC1522L(hw->port),
2139 			   pf->stat_offsets_loaded,
2140 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2141 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2142 			   I40E_GLPRT_PRC9522L(hw->port),
2143 			   pf->stat_offsets_loaded,
2144 			   &osd->rx_size_big, &nsd->rx_size_big);
2145 
2146 	/* Packet size stats tx */
2147 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2148 			   I40E_GLPRT_PTC64L(hw->port),
2149 			   pf->stat_offsets_loaded,
2150 			   &osd->tx_size_64, &nsd->tx_size_64);
2151 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2152 			   I40E_GLPRT_PTC127L(hw->port),
2153 			   pf->stat_offsets_loaded,
2154 			   &osd->tx_size_127, &nsd->tx_size_127);
2155 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2156 			   I40E_GLPRT_PTC255L(hw->port),
2157 			   pf->stat_offsets_loaded,
2158 			   &osd->tx_size_255, &nsd->tx_size_255);
2159 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2160 			   I40E_GLPRT_PTC511L(hw->port),
2161 			   pf->stat_offsets_loaded,
2162 			   &osd->tx_size_511, &nsd->tx_size_511);
2163 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2164 			   I40E_GLPRT_PTC1023L(hw->port),
2165 			   pf->stat_offsets_loaded,
2166 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2167 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2168 			   I40E_GLPRT_PTC1522L(hw->port),
2169 			   pf->stat_offsets_loaded,
2170 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2171 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2172 			   I40E_GLPRT_PTC9522L(hw->port),
2173 			   pf->stat_offsets_loaded,
2174 			   &osd->tx_size_big, &nsd->tx_size_big);
2175 
2176 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2177 			   pf->stat_offsets_loaded,
2178 			   &osd->rx_undersize, &nsd->rx_undersize);
2179 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2180 			   pf->stat_offsets_loaded,
2181 			   &osd->rx_fragments, &nsd->rx_fragments);
2182 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2183 			   pf->stat_offsets_loaded,
2184 			   &osd->rx_oversize, &nsd->rx_oversize);
2185 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2186 			   pf->stat_offsets_loaded,
2187 			   &osd->rx_jabber, &nsd->rx_jabber);
2188 	/* EEE */
2189 	i40e_get_phy_lpi_status(hw, nsd);
2190 
2191 	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2192 			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
2193 			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
2194 
2195 	pf->stat_offsets_loaded = true;
2196 	/* End hw stats */
2197 
2198 	/* Update vsi stats */
2199 	ixl_update_vsi_stats(vsi);
2200 
2201 	for (int i = 0; i < pf->num_vfs; i++) {
2202 		vf = &pf->vfs[i];
2203 		if (vf->vf_flags & VF_FLAG_ENABLED)
2204 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2205 	}
2206 }
2207 
2208 /**
2209  * Update VSI-specific ethernet statistics counters.
2210  **/
2211 void
2212 ixl_update_eth_stats(struct ixl_vsi *vsi)
2213 {
2214 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2215 	struct i40e_hw *hw = &pf->hw;
2216 	struct i40e_eth_stats *es;
2217 	struct i40e_eth_stats *oes;
2218 	u16 stat_idx = vsi->info.stat_counter_idx;
2219 
2220 	es = &vsi->eth_stats;
2221 	oes = &vsi->eth_stats_offsets;
2222 
2223 	/* Gather up the stats that the hw collects */
2224 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2225 			   vsi->stat_offsets_loaded,
2226 			   &oes->tx_errors, &es->tx_errors);
2227 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2228 			   vsi->stat_offsets_loaded,
2229 			   &oes->rx_discards, &es->rx_discards);
2230 
2231 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2232 			   I40E_GLV_GORCL(stat_idx),
2233 			   vsi->stat_offsets_loaded,
2234 			   &oes->rx_bytes, &es->rx_bytes);
2235 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2236 			   I40E_GLV_UPRCL(stat_idx),
2237 			   vsi->stat_offsets_loaded,
2238 			   &oes->rx_unicast, &es->rx_unicast);
2239 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2240 			   I40E_GLV_MPRCL(stat_idx),
2241 			   vsi->stat_offsets_loaded,
2242 			   &oes->rx_multicast, &es->rx_multicast);
2243 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2244 			   I40E_GLV_BPRCL(stat_idx),
2245 			   vsi->stat_offsets_loaded,
2246 			   &oes->rx_broadcast, &es->rx_broadcast);
2247 
2248 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2249 			   I40E_GLV_GOTCL(stat_idx),
2250 			   vsi->stat_offsets_loaded,
2251 			   &oes->tx_bytes, &es->tx_bytes);
2252 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2253 			   I40E_GLV_UPTCL(stat_idx),
2254 			   vsi->stat_offsets_loaded,
2255 			   &oes->tx_unicast, &es->tx_unicast);
2256 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2257 			   I40E_GLV_MPTCL(stat_idx),
2258 			   vsi->stat_offsets_loaded,
2259 			   &oes->tx_multicast, &es->tx_multicast);
2260 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2261 			   I40E_GLV_BPTCL(stat_idx),
2262 			   vsi->stat_offsets_loaded,
2263 			   &oes->tx_broadcast, &es->tx_broadcast);
2264 	vsi->stat_offsets_loaded = true;
2265 }
2266 
2267 void
2268 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2269 {
2270 	struct ixl_pf		*pf;
2271 	struct i40e_eth_stats	*es;
2272 	u64			tx_discards, csum_errs;
2273 
2274 	struct i40e_hw_port_stats *nsd;
2275 
2276 	pf = vsi->back;
2277 	es = &vsi->eth_stats;
2278 	nsd = &pf->stats;
2279 
2280 	ixl_update_eth_stats(vsi);
2281 
2282 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2283 
2284 	csum_errs = 0;
2285 	for (int i = 0; i < vsi->num_rx_queues; i++)
2286 		csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2287 	nsd->checksum_error = csum_errs;
2288 
2289 	/* Update ifnet stats */
2290 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2291 	                   es->rx_multicast +
2292 			   es->rx_broadcast);
2293 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2294 	                   es->tx_multicast +
2295 			   es->tx_broadcast);
2296 	IXL_SET_IBYTES(vsi, es->rx_bytes);
2297 	IXL_SET_OBYTES(vsi, es->tx_bytes);
2298 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2299 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2300 
2301 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2302 	    nsd->checksum_error + nsd->rx_length_errors +
2303 	    nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2304 	    nsd->rx_jabber);
2305 	IXL_SET_OERRORS(vsi, es->tx_errors);
2306 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2307 	IXL_SET_OQDROPS(vsi, tx_discards);
2308 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2309 	IXL_SET_COLLISIONS(vsi, 0);
2310 }
2311 
2312 /**
2313  * Reset all of the stats for the given pf
2314  **/
2315 void
2316 ixl_pf_reset_stats(struct ixl_pf *pf)
2317 {
2318 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2319 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2320 	pf->stat_offsets_loaded = false;
2321 }
2322 
2323 /**
2324  * Resets all stats of the given vsi
2325  **/
2326 void
2327 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2328 {
2329 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2330 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2331 	vsi->stat_offsets_loaded = false;
2332 }
2333 
2334 /**
2335  * Read and update a 48 bit stat from the hw
2336  *
2337  * Since the device stats are not reset at PFReset, they likely will not
2338  * be zeroed when the driver starts.  We'll save the first values read
2339  * and use them as offsets to be subtracted from the raw values in order
2340  * to report stats that count from zero.
2341  **/
2342 void
2343 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2344 	bool offset_loaded, u64 *offset, u64 *stat)
2345 {
2346 	u64 new_data;
2347 
2348 	new_data = rd64(hw, loreg);
2349 
2350 	if (!offset_loaded)
2351 		*offset = new_data;
2352 	if (new_data >= *offset)
2353 		*stat = new_data - *offset;
2354 	else
2355 		*stat = (new_data + ((u64)1 << 48)) - *offset;
2356 	*stat &= 0xFFFFFFFFFFFFULL;
2357 }
2358 
2359 /**
2360  * Read and update a 32 bit stat from the hw
2361  **/
2362 void
2363 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2364 	bool offset_loaded, u64 *offset, u64 *stat)
2365 {
2366 	u32 new_data;
2367 
2368 	new_data = rd32(hw, reg);
2369 	if (!offset_loaded)
2370 		*offset = new_data;
2371 	if (new_data >= *offset)
2372 		*stat = (u32)(new_data - *offset);
2373 	else
2374 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2375 }
2376 
2377 /**
2378  * Add subset of device sysctls safe to use in recovery mode
2379  */
2380 void
2381 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2382 {
2383 	device_t dev = pf->dev;
2384 
2385 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2386 	struct sysctl_oid_list *ctx_list =
2387 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2388 
2389 	struct sysctl_oid *debug_node;
2390 	struct sysctl_oid_list *debug_list;
2391 
2392 	SYSCTL_ADD_PROC(ctx, ctx_list,
2393 	    OID_AUTO, "fw_version",
2394 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2395 	    ixl_sysctl_show_fw, "A", "Firmware version");
2396 
2397 	/* Add sysctls meant to print debug information, but don't list them
2398 	 * in "sysctl -a" output. */
2399 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2400 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2401 	    "Debug Sysctls");
2402 	debug_list = SYSCTL_CHILDREN(debug_node);
2403 
2404 	SYSCTL_ADD_UINT(ctx, debug_list,
2405 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2406 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2407 
2408 	SYSCTL_ADD_UINT(ctx, debug_list,
2409 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2410 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2411 
2412 	SYSCTL_ADD_PROC(ctx, debug_list,
2413 	    OID_AUTO, "dump_debug_data",
2414 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2415 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2416 
2417 	SYSCTL_ADD_PROC(ctx, debug_list,
2418 	    OID_AUTO, "do_pf_reset",
2419 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2420 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2421 
2422 	SYSCTL_ADD_PROC(ctx, debug_list,
2423 	    OID_AUTO, "do_core_reset",
2424 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2425 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2426 
2427 	SYSCTL_ADD_PROC(ctx, debug_list,
2428 	    OID_AUTO, "do_global_reset",
2429 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2430 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2431 
2432 	SYSCTL_ADD_PROC(ctx, debug_list,
2433 	    OID_AUTO, "queue_interrupt_table",
2434 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2435 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2436 }
2437 
2438 void
2439 ixl_add_device_sysctls(struct ixl_pf *pf)
2440 {
2441 	device_t dev = pf->dev;
2442 	struct i40e_hw *hw = &pf->hw;
2443 
2444 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2445 	struct sysctl_oid_list *ctx_list =
2446 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2447 
2448 	struct sysctl_oid *debug_node;
2449 	struct sysctl_oid_list *debug_list;
2450 
2451 	struct sysctl_oid *fec_node;
2452 	struct sysctl_oid_list *fec_list;
2453 	struct sysctl_oid *eee_node;
2454 	struct sysctl_oid_list *eee_list;
2455 
2456 	/* Set up sysctls */
2457 	SYSCTL_ADD_PROC(ctx, ctx_list,
2458 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2459 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2460 
2461 	SYSCTL_ADD_PROC(ctx, ctx_list,
2462 	    OID_AUTO, "advertise_speed",
2463 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2464 	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2465 
2466 	SYSCTL_ADD_PROC(ctx, ctx_list,
2467 	    OID_AUTO, "supported_speeds",
2468 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2469 	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2470 
2471 	SYSCTL_ADD_PROC(ctx, ctx_list,
2472 	    OID_AUTO, "current_speed",
2473 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2474 	    ixl_sysctl_current_speed, "A", "Current Port Speed");
2475 
2476 	SYSCTL_ADD_PROC(ctx, ctx_list,
2477 	    OID_AUTO, "fw_version",
2478 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2479 	    ixl_sysctl_show_fw, "A", "Firmware version");
2480 
2481 	SYSCTL_ADD_PROC(ctx, ctx_list,
2482 	    OID_AUTO, "unallocated_queues",
2483 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2484 	    ixl_sysctl_unallocated_queues, "I",
2485 	    "Queues not allocated to a PF or VF");
2486 
2487 	SYSCTL_ADD_PROC(ctx, ctx_list,
2488 	    OID_AUTO, "tx_itr",
2489 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2490 	    ixl_sysctl_pf_tx_itr, "I",
2491 	    "Immediately set TX ITR value for all queues");
2492 
2493 	SYSCTL_ADD_PROC(ctx, ctx_list,
2494 	    OID_AUTO, "rx_itr",
2495 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2496 	    ixl_sysctl_pf_rx_itr, "I",
2497 	    "Immediately set RX ITR value for all queues");
2498 
2499 	SYSCTL_ADD_INT(ctx, ctx_list,
2500 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2501 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2502 
2503 	SYSCTL_ADD_INT(ctx, ctx_list,
2504 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2505 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2506 
2507 	/* Add FEC sysctls for 25G adapters */
2508 	if (i40e_is_25G_device(hw->device_id)) {
2509 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2510 		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2511 		    "FEC Sysctls");
2512 		fec_list = SYSCTL_CHILDREN(fec_node);
2513 
2514 		SYSCTL_ADD_PROC(ctx, fec_list,
2515 		    OID_AUTO, "fc_ability",
2516 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2517 		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2518 
2519 		SYSCTL_ADD_PROC(ctx, fec_list,
2520 		    OID_AUTO, "rs_ability",
2521 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2522 		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2523 
2524 		SYSCTL_ADD_PROC(ctx, fec_list,
2525 		    OID_AUTO, "fc_requested",
2526 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2527 		    ixl_sysctl_fec_fc_request, "I",
2528 		    "FC FEC mode requested on link");
2529 
2530 		SYSCTL_ADD_PROC(ctx, fec_list,
2531 		    OID_AUTO, "rs_requested",
2532 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2533 		    ixl_sysctl_fec_rs_request, "I",
2534 		    "RS FEC mode requested on link");
2535 
2536 		SYSCTL_ADD_PROC(ctx, fec_list,
2537 		    OID_AUTO, "auto_fec_enabled",
2538 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2539 		    ixl_sysctl_fec_auto_enable, "I",
2540 		    "Let FW decide FEC ability/request modes");
2541 	}
2542 
2543 	SYSCTL_ADD_PROC(ctx, ctx_list,
2544 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2545 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2546 
2547 	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2548 	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2549 	    "Energy Efficient Ethernet (EEE) Sysctls");
2550 	eee_list = SYSCTL_CHILDREN(eee_node);
2551 
2552 	SYSCTL_ADD_PROC(ctx, eee_list,
2553 	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2554 	    pf, 0, ixl_sysctl_eee_enable, "I",
2555 	    "Enable Energy Efficient Ethernet (EEE)");
2556 
2557 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2558 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2559 	    "TX LPI status");
2560 
2561 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2562 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2563 	    "RX LPI status");
2564 
2565 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2566 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2567 	    "TX LPI count");
2568 
2569 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2570 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2571 	    "RX LPI count");
2572 
2573 	SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2574 	    "link_active_on_if_down",
2575 	    CTLTYPE_INT | CTLFLAG_RWTUN,
2576 	    pf, 0, ixl_sysctl_set_link_active, "I",
2577 	    IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2578 
2579 	/* Add sysctls meant to print debug information, but don't list them
2580 	 * in "sysctl -a" output. */
2581 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2582 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2583 	    "Debug Sysctls");
2584 	debug_list = SYSCTL_CHILDREN(debug_node);
2585 
2586 	SYSCTL_ADD_UINT(ctx, debug_list,
2587 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2588 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2589 
2590 	SYSCTL_ADD_UINT(ctx, debug_list,
2591 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2592 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2593 
2594 	SYSCTL_ADD_PROC(ctx, debug_list,
2595 	    OID_AUTO, "link_status",
2596 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2597 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2598 
2599 	SYSCTL_ADD_PROC(ctx, debug_list,
2600 	    OID_AUTO, "phy_abilities_init",
2601 	    CTLTYPE_STRING | CTLFLAG_RD,
2602 	    pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2603 
2604 	SYSCTL_ADD_PROC(ctx, debug_list,
2605 	    OID_AUTO, "phy_abilities",
2606 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2607 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2608 
2609 	SYSCTL_ADD_PROC(ctx, debug_list,
2610 	    OID_AUTO, "filter_list",
2611 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2612 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2613 
2614 	SYSCTL_ADD_PROC(ctx, debug_list,
2615 	    OID_AUTO, "hw_res_alloc",
2616 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2617 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2618 
2619 	SYSCTL_ADD_PROC(ctx, debug_list,
2620 	    OID_AUTO, "switch_config",
2621 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2622 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2623 
2624 	SYSCTL_ADD_PROC(ctx, debug_list,
2625 	    OID_AUTO, "switch_vlans",
2626 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2627 	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2628 
2629 	SYSCTL_ADD_PROC(ctx, debug_list,
2630 	    OID_AUTO, "rss_key",
2631 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2632 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2633 
2634 	SYSCTL_ADD_PROC(ctx, debug_list,
2635 	    OID_AUTO, "rss_lut",
2636 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2637 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2638 
2639 	SYSCTL_ADD_PROC(ctx, debug_list,
2640 	    OID_AUTO, "rss_hena",
2641 	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2642 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2643 
2644 	SYSCTL_ADD_PROC(ctx, debug_list,
2645 	    OID_AUTO, "disable_fw_link_management",
2646 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2647 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2648 
2649 	SYSCTL_ADD_PROC(ctx, debug_list,
2650 	    OID_AUTO, "dump_debug_data",
2651 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2652 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2653 
2654 	SYSCTL_ADD_PROC(ctx, debug_list,
2655 	    OID_AUTO, "do_pf_reset",
2656 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2657 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2658 
2659 	SYSCTL_ADD_PROC(ctx, debug_list,
2660 	    OID_AUTO, "do_core_reset",
2661 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2662 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2663 
2664 	SYSCTL_ADD_PROC(ctx, debug_list,
2665 	    OID_AUTO, "do_global_reset",
2666 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2667 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2668 
2669 	SYSCTL_ADD_PROC(ctx, debug_list,
2670 	    OID_AUTO, "queue_interrupt_table",
2671 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2672 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2673 
2674 	if (pf->has_i2c) {
2675 		SYSCTL_ADD_PROC(ctx, debug_list,
2676 		    OID_AUTO, "read_i2c_byte",
2677 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2678 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2679 
2680 		SYSCTL_ADD_PROC(ctx, debug_list,
2681 		    OID_AUTO, "write_i2c_byte",
2682 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2683 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2684 
2685 		SYSCTL_ADD_PROC(ctx, debug_list,
2686 		    OID_AUTO, "read_i2c_diag_data",
2687 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2688 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2689 	}
2690 }
2691 
2692 /*
2693  * Primarily for finding out how many queues can be assigned to VFs,
2694  * at runtime.
2695  */
2696 static int
2697 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2698 {
2699 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2700 	int queues;
2701 
2702 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2703 
2704 	return sysctl_handle_int(oidp, NULL, queues, req);
2705 }
2706 
2707 static const char *
2708 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2709 {
2710 	const char * link_speed_str[] = {
2711 		"Unknown",
2712 		"100 Mbps",
2713 		"1 Gbps",
2714 		"10 Gbps",
2715 		"40 Gbps",
2716 		"20 Gbps",
2717 		"25 Gbps",
2718 		"2.5 Gbps",
2719 		"5 Gbps"
2720 	};
2721 	int index;
2722 
2723 	switch (link_speed) {
2724 	case I40E_LINK_SPEED_100MB:
2725 		index = 1;
2726 		break;
2727 	case I40E_LINK_SPEED_1GB:
2728 		index = 2;
2729 		break;
2730 	case I40E_LINK_SPEED_10GB:
2731 		index = 3;
2732 		break;
2733 	case I40E_LINK_SPEED_40GB:
2734 		index = 4;
2735 		break;
2736 	case I40E_LINK_SPEED_20GB:
2737 		index = 5;
2738 		break;
2739 	case I40E_LINK_SPEED_25GB:
2740 		index = 6;
2741 		break;
2742 	case I40E_LINK_SPEED_2_5GB:
2743 		index = 7;
2744 		break;
2745 	case I40E_LINK_SPEED_5GB:
2746 		index = 8;
2747 		break;
2748 	case I40E_LINK_SPEED_UNKNOWN:
2749 	default:
2750 		index = 0;
2751 		break;
2752 	}
2753 
2754 	return (link_speed_str[index]);
2755 }
2756 
2757 int
2758 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2759 {
2760 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2761 	struct i40e_hw *hw = &pf->hw;
2762 	int error = 0;
2763 
2764 	ixl_update_link_status(pf);
2765 
2766 	error = sysctl_handle_string(oidp,
2767 	    __DECONST(void *,
2768 		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2769 	    8, req);
2770 
2771 	return (error);
2772 }
2773 
2774 /*
2775  * Converts 8-bit speeds value to and from sysctl flags and
2776  * Admin Queue flags.
2777  */
2778 static u8
2779 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2780 {
2781 #define SPEED_MAP_SIZE 8
2782 	static u16 speedmap[SPEED_MAP_SIZE] = {
2783 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2784 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2785 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2786 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2787 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2788 		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2789 		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2790 		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2791 	};
2792 	u8 retval = 0;
2793 
2794 	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2795 		if (to_aq)
2796 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2797 		else
2798 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2799 	}
2800 
2801 	return (retval);
2802 }
2803 
2804 int
2805 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2806 {
2807 	struct i40e_hw *hw = &pf->hw;
2808 	device_t dev = pf->dev;
2809 	struct i40e_aq_get_phy_abilities_resp abilities;
2810 	struct i40e_aq_set_phy_config config;
2811 	enum i40e_status_code aq_error = 0;
2812 
2813 	/* Get current capability information */
2814 	aq_error = i40e_aq_get_phy_capabilities(hw,
2815 	    FALSE, FALSE, &abilities, NULL);
2816 	if (aq_error) {
2817 		device_printf(dev,
2818 		    "%s: Error getting phy capabilities %d,"
2819 		    " aq error: %d\n", __func__, aq_error,
2820 		    hw->aq.asq_last_status);
2821 		return (EIO);
2822 	}
2823 
2824 	/* Prepare new config */
2825 	bzero(&config, sizeof(config));
2826 	if (from_aq)
2827 		config.link_speed = speeds;
2828 	else
2829 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2830 	config.phy_type = abilities.phy_type;
2831 	config.phy_type_ext = abilities.phy_type_ext;
2832 	config.abilities = abilities.abilities
2833 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2834 	config.eee_capability = abilities.eee_capability;
2835 	config.eeer = abilities.eeer_val;
2836 	config.low_power_ctrl = abilities.d3_lpan;
2837 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2838 	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2839 
2840 	/* Do aq command & restart link */
2841 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2842 	if (aq_error) {
2843 		device_printf(dev,
2844 		    "%s: Error setting new phy config %d,"
2845 		    " aq error: %d\n", __func__, aq_error,
2846 		    hw->aq.asq_last_status);
2847 		return (EIO);
2848 	}
2849 
2850 	return (0);
2851 }
2852 
2853 /*
2854 ** Supported link speeds
2855 **	Flags:
2856 **	 0x1 - 100 Mb
2857 **	 0x2 - 1G
2858 **	 0x4 - 10G
2859 **	 0x8 - 20G
2860 **	0x10 - 25G
2861 **	0x20 - 40G
2862 **	0x40 - 2.5G
2863 **	0x80 - 5G
2864 */
2865 static int
2866 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2867 {
2868 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2869 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2870 
2871 	return sysctl_handle_int(oidp, NULL, supported, req);
2872 }
2873 
2874 /*
2875 ** Control link advertise speed:
2876 **	Flags:
2877 **	 0x1 - advertise 100 Mb
2878 **	 0x2 - advertise 1G
2879 **	 0x4 - advertise 10G
2880 **	 0x8 - advertise 20G
2881 **	0x10 - advertise 25G
2882 **	0x20 - advertise 40G
2883 **	0x40 - advertise 2.5G
2884 **	0x80 - advertise 5G
2885 **
2886 **	Set to 0 to disable link
2887 */
2888 int
2889 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2890 {
2891 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2892 	device_t dev = pf->dev;
2893 	u8 converted_speeds;
2894 	int requested_ls = 0;
2895 	int error = 0;
2896 
2897 	/* Read in new mode */
2898 	requested_ls = pf->advertised_speed;
2899 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2900 	if ((error) || (req->newptr == NULL))
2901 		return (error);
2902 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2903 		device_printf(dev, "Interface is currently in FW recovery mode. "
2904 				"Setting advertise speed not supported\n");
2905 		return (EINVAL);
2906 	}
2907 
2908 	/* Error out if bits outside of possible flag range are set */
2909 	if ((requested_ls & ~((u8)0xFF)) != 0) {
2910 		device_printf(dev, "Input advertised speed out of range; "
2911 		    "valid flags are: 0x%02x\n",
2912 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2913 		return (EINVAL);
2914 	}
2915 
2916 	/* Check if adapter supports input value */
2917 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2918 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2919 		device_printf(dev, "Invalid advertised speed; "
2920 		    "valid flags are: 0x%02x\n",
2921 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2922 		return (EINVAL);
2923 	}
2924 
2925 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
2926 	if (error)
2927 		return (error);
2928 
2929 	pf->advertised_speed = requested_ls;
2930 	ixl_update_link_status(pf);
2931 	return (0);
2932 }
2933 
2934 /*
2935  * Input: bitmap of enum i40e_aq_link_speed
2936  */
2937 u64
2938 ixl_max_aq_speed_to_value(u8 link_speeds)
2939 {
2940 	if (link_speeds & I40E_LINK_SPEED_40GB)
2941 		return IF_Gbps(40);
2942 	if (link_speeds & I40E_LINK_SPEED_25GB)
2943 		return IF_Gbps(25);
2944 	if (link_speeds & I40E_LINK_SPEED_20GB)
2945 		return IF_Gbps(20);
2946 	if (link_speeds & I40E_LINK_SPEED_10GB)
2947 		return IF_Gbps(10);
2948 	if (link_speeds & I40E_LINK_SPEED_5GB)
2949 		return IF_Gbps(5);
2950 	if (link_speeds & I40E_LINK_SPEED_2_5GB)
2951 		return IF_Mbps(2500);
2952 	if (link_speeds & I40E_LINK_SPEED_1GB)
2953 		return IF_Gbps(1);
2954 	if (link_speeds & I40E_LINK_SPEED_100MB)
2955 		return IF_Mbps(100);
2956 	else
2957 		/* Minimum supported link speed */
2958 		return IF_Mbps(100);
2959 }
2960 
2961 /*
2962 ** Get the width and transaction speed of
2963 ** the bus this adapter is plugged into.
2964 */
2965 void
2966 ixl_get_bus_info(struct ixl_pf *pf)
2967 {
2968 	struct i40e_hw *hw = &pf->hw;
2969 	device_t dev = pf->dev;
2970         u16 link;
2971         u32 offset, num_ports;
2972 	u64 max_speed;
2973 
2974 	/* Some devices don't use PCIE */
2975 	if (hw->mac.type == I40E_MAC_X722)
2976 		return;
2977 
2978         /* Read PCI Express Capabilities Link Status Register */
2979         pci_find_cap(dev, PCIY_EXPRESS, &offset);
2980         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2981 
2982 	/* Fill out hw struct with PCIE info */
2983 	i40e_set_pci_config_data(hw, link);
2984 
2985 	/* Use info to print out bandwidth messages */
2986         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2987             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2988             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2989             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2990             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2991             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2992             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2993             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2994             ("Unknown"));
2995 
2996 	/*
2997 	 * If adapter is in slot with maximum supported speed,
2998 	 * no warning message needs to be printed out.
2999 	 */
3000 	if (hw->bus.speed >= i40e_bus_speed_8000
3001 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3002 		return;
3003 
3004 	num_ports = bitcount32(hw->func_caps.valid_functions);
3005 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3006 
3007 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3008                 device_printf(dev, "PCI-Express bandwidth available"
3009                     " for this device may be insufficient for"
3010                     " optimal performance.\n");
3011                 device_printf(dev, "Please move the device to a different"
3012 		    " PCI-e link with more lanes and/or higher"
3013 		    " transfer rate.\n");
3014         }
3015 }
3016 
3017 static int
3018 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3019 {
3020 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3021 	struct i40e_hw	*hw = &pf->hw;
3022 	struct sbuf	*sbuf;
3023 
3024 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3025 	ixl_nvm_version_str(hw, sbuf);
3026 	sbuf_finish(sbuf);
3027 	sbuf_delete(sbuf);
3028 
3029 	return (0);
3030 }
3031 
3032 void
3033 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3034 {
3035 	u8 nvma_ptr = nvma->config & 0xFF;
3036 	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3037 	const char * cmd_str;
3038 
3039 	switch (nvma->command) {
3040 	case I40E_NVM_READ:
3041 		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3042 		    nvma->offset == 0 && nvma->data_size == 1) {
3043 			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3044 			return;
3045 		}
3046 		cmd_str = "READ ";
3047 		break;
3048 	case I40E_NVM_WRITE:
3049 		cmd_str = "WRITE";
3050 		break;
3051 	default:
3052 		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3053 		return;
3054 	}
3055 	device_printf(dev,
3056 	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3057 	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3058 }
3059 
3060 int
3061 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3062 {
3063 	struct i40e_hw *hw = &pf->hw;
3064 	struct i40e_nvm_access *nvma;
3065 	device_t dev = pf->dev;
3066 	enum i40e_status_code status = 0;
3067 	size_t nvma_size, ifd_len, exp_len;
3068 	int err, perrno;
3069 
3070 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3071 
3072 	/* Sanity checks */
3073 	nvma_size = sizeof(struct i40e_nvm_access);
3074 	ifd_len = ifd->ifd_len;
3075 
3076 	if (ifd_len < nvma_size ||
3077 	    ifd->ifd_data == NULL) {
3078 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3079 		    __func__);
3080 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3081 		    __func__, ifd_len, nvma_size);
3082 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3083 		    ifd->ifd_data);
3084 		return (EINVAL);
3085 	}
3086 
3087 	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3088 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3089 	if (err) {
3090 		device_printf(dev, "%s: Cannot get request from user space\n",
3091 		    __func__);
3092 		free(nvma, M_IXL);
3093 		return (err);
3094 	}
3095 
3096 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3097 		ixl_print_nvm_cmd(dev, nvma);
3098 
3099 	if (IXL_PF_IS_RESETTING(pf)) {
3100 		int count = 0;
3101 		while (count++ < 100) {
3102 			i40e_msec_delay(100);
3103 			if (!(IXL_PF_IS_RESETTING(pf)))
3104 				break;
3105 		}
3106 	}
3107 
3108 	if (IXL_PF_IS_RESETTING(pf)) {
3109 		device_printf(dev,
3110 		    "%s: timeout waiting for EMP reset to finish\n",
3111 		    __func__);
3112 		free(nvma, M_IXL);
3113 		return (-EBUSY);
3114 	}
3115 
3116 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3117 		device_printf(dev,
3118 		    "%s: invalid request, data size not in supported range\n",
3119 		    __func__);
3120 		free(nvma, M_IXL);
3121 		return (EINVAL);
3122 	}
3123 
3124 	/*
3125 	 * Older versions of the NVM update tool don't set ifd_len to the size
3126 	 * of the entire buffer passed to the ioctl. Check the data_size field
3127 	 * in the contained i40e_nvm_access struct and ensure everything is
3128 	 * copied in from userspace.
3129 	 */
3130 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3131 
3132 	if (ifd_len < exp_len) {
3133 		ifd_len = exp_len;
3134 		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3135 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3136 		if (err) {
3137 			device_printf(dev, "%s: Cannot get request from user space\n",
3138 					__func__);
3139 			free(nvma, M_IXL);
3140 			return (err);
3141 		}
3142 	}
3143 
3144 	// TODO: Might need a different lock here
3145 	// IXL_PF_LOCK(pf);
3146 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3147 	// IXL_PF_UNLOCK(pf);
3148 
3149 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3150 	free(nvma, M_IXL);
3151 	if (err) {
3152 		device_printf(dev, "%s: Cannot return data to user space\n",
3153 				__func__);
3154 		return (err);
3155 	}
3156 
3157 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3158 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3159 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3160 		    i40e_stat_str(hw, status), perrno);
3161 
3162 	/*
3163 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3164 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3165 	 */
3166 	if (perrno == -EPERM)
3167 		return (-EACCES);
3168 	else
3169 		return (perrno);
3170 }
3171 
3172 int
3173 ixl_find_i2c_interface(struct ixl_pf *pf)
3174 {
3175 	struct i40e_hw *hw = &pf->hw;
3176 	bool i2c_en, port_matched;
3177 	u32 reg;
3178 
3179 	for (int i = 0; i < 4; i++) {
3180 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3181 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3182 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3183 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3184 		    & BIT(hw->port);
3185 		if (i2c_en && port_matched)
3186 			return (i);
3187 	}
3188 
3189 	return (-1);
3190 }
3191 
3192 void
3193 ixl_set_link(struct ixl_pf *pf, bool enable)
3194 {
3195 	struct i40e_hw *hw = &pf->hw;
3196 	device_t dev = pf->dev;
3197 	struct i40e_aq_get_phy_abilities_resp abilities;
3198 	struct i40e_aq_set_phy_config config;
3199 	enum i40e_status_code aq_error = 0;
3200 	u32 phy_type, phy_type_ext;
3201 
3202 	/* Get initial capability information */
3203 	aq_error = i40e_aq_get_phy_capabilities(hw,
3204 	    FALSE, TRUE, &abilities, NULL);
3205 	if (aq_error) {
3206 		device_printf(dev,
3207 		    "%s: Error getting phy capabilities %d,"
3208 		    " aq error: %d\n", __func__, aq_error,
3209 		    hw->aq.asq_last_status);
3210 		return;
3211 	}
3212 
3213 	phy_type = abilities.phy_type;
3214 	phy_type_ext = abilities.phy_type_ext;
3215 
3216 	/* Get current capability information */
3217 	aq_error = i40e_aq_get_phy_capabilities(hw,
3218 	    FALSE, FALSE, &abilities, NULL);
3219 	if (aq_error) {
3220 		device_printf(dev,
3221 		    "%s: Error getting phy capabilities %d,"
3222 		    " aq error: %d\n", __func__, aq_error,
3223 		    hw->aq.asq_last_status);
3224 		return;
3225 	}
3226 
3227 	/* Prepare new config */
3228 	memset(&config, 0, sizeof(config));
3229 	config.link_speed = abilities.link_speed;
3230 	config.abilities = abilities.abilities;
3231 	config.eee_capability = abilities.eee_capability;
3232 	config.eeer = abilities.eeer_val;
3233 	config.low_power_ctrl = abilities.d3_lpan;
3234 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3235 	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
3236 	config.phy_type = 0;
3237 	config.phy_type_ext = 0;
3238 
3239 	config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3240 			I40E_AQ_PHY_FLAG_PAUSE_RX);
3241 
3242 	switch (pf->fc) {
3243 	case I40E_FC_FULL:
3244 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3245 			I40E_AQ_PHY_FLAG_PAUSE_RX;
3246 		break;
3247 	case I40E_FC_RX_PAUSE:
3248 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3249 		break;
3250 	case I40E_FC_TX_PAUSE:
3251 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3252 		break;
3253 	default:
3254 		break;
3255 	}
3256 
3257 	if (enable) {
3258 		config.phy_type = phy_type;
3259 		config.phy_type_ext = phy_type_ext;
3260 
3261 	}
3262 
3263 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3264 	if (aq_error) {
3265 		device_printf(dev,
3266 		    "%s: Error setting new phy config %d,"
3267 		    " aq error: %d\n", __func__, aq_error,
3268 		    hw->aq.asq_last_status);
3269 		return;
3270 	}
3271 
3272 	aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3273 	if (aq_error) {
3274 		device_printf(dev,
3275 		    "%s: Error set link config %d,"
3276 		    " aq error: %d\n", __func__, aq_error,
3277 		    hw->aq.asq_last_status);
3278 		return;
3279 	}
3280 }
3281 
3282 static char *
3283 ixl_phy_type_string(u32 bit_pos, bool ext)
3284 {
3285 	static char * phy_types_str[32] = {
3286 		"SGMII",
3287 		"1000BASE-KX",
3288 		"10GBASE-KX4",
3289 		"10GBASE-KR",
3290 		"40GBASE-KR4",
3291 		"XAUI",
3292 		"XFI",
3293 		"SFI",
3294 		"XLAUI",
3295 		"XLPPI",
3296 		"40GBASE-CR4",
3297 		"10GBASE-CR1",
3298 		"SFP+ Active DA",
3299 		"QSFP+ Active DA",
3300 		"Reserved (14)",
3301 		"Reserved (15)",
3302 		"Reserved (16)",
3303 		"100BASE-TX",
3304 		"1000BASE-T",
3305 		"10GBASE-T",
3306 		"10GBASE-SR",
3307 		"10GBASE-LR",
3308 		"10GBASE-SFP+Cu",
3309 		"10GBASE-CR1",
3310 		"40GBASE-CR4",
3311 		"40GBASE-SR4",
3312 		"40GBASE-LR4",
3313 		"1000BASE-SX",
3314 		"1000BASE-LX",
3315 		"1000BASE-T Optical",
3316 		"20GBASE-KR2",
3317 		"Reserved (31)"
3318 	};
3319 	static char * ext_phy_types_str[8] = {
3320 		"25GBASE-KR",
3321 		"25GBASE-CR",
3322 		"25GBASE-SR",
3323 		"25GBASE-LR",
3324 		"25GBASE-AOC",
3325 		"25GBASE-ACC",
3326 		"2.5GBASE-T",
3327 		"5GBASE-T"
3328 	};
3329 
3330 	if (ext && bit_pos > 7) return "Invalid_Ext";
3331 	if (bit_pos > 31) return "Invalid";
3332 
3333 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3334 }
3335 
3336 /* TODO: ERJ: I don't this is necessary anymore. */
3337 int
3338 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3339 {
3340 	device_t dev = pf->dev;
3341 	struct i40e_hw *hw = &pf->hw;
3342 	struct i40e_aq_desc desc;
3343 	enum i40e_status_code status;
3344 
3345 	struct i40e_aqc_get_link_status *aq_link_status =
3346 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3347 
3348 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3349 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3350 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3351 	if (status) {
3352 		device_printf(dev,
3353 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3354 		    __func__, i40e_stat_str(hw, status),
3355 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3356 		return (EIO);
3357 	}
3358 
3359 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3360 	return (0);
3361 }
3362 
3363 static char *
3364 ixl_phy_type_string_ls(u8 val)
3365 {
3366 	if (val >= 0x1F)
3367 		return ixl_phy_type_string(val - 0x1F, true);
3368 	else
3369 		return ixl_phy_type_string(val, false);
3370 }
3371 
3372 static int
3373 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3374 {
3375 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3376 	device_t dev = pf->dev;
3377 	struct sbuf *buf;
3378 	int error = 0;
3379 
3380 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3381 	if (!buf) {
3382 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3383 		return (ENOMEM);
3384 	}
3385 
3386 	struct i40e_aqc_get_link_status link_status;
3387 	error = ixl_aq_get_link_status(pf, &link_status);
3388 	if (error) {
3389 		sbuf_delete(buf);
3390 		return (error);
3391 	}
3392 
3393 	sbuf_printf(buf, "\n"
3394 	    "PHY Type : 0x%02x<%s>\n"
3395 	    "Speed    : 0x%02x\n"
3396 	    "Link info: 0x%02x\n"
3397 	    "AN info  : 0x%02x\n"
3398 	    "Ext info : 0x%02x\n"
3399 	    "Loopback : 0x%02x\n"
3400 	    "Max Frame: %d\n"
3401 	    "Config   : 0x%02x\n"
3402 	    "Power    : 0x%02x",
3403 	    link_status.phy_type,
3404 	    ixl_phy_type_string_ls(link_status.phy_type),
3405 	    link_status.link_speed,
3406 	    link_status.link_info,
3407 	    link_status.an_info,
3408 	    link_status.ext_info,
3409 	    link_status.loopback,
3410 	    link_status.max_frame_size,
3411 	    link_status.config,
3412 	    link_status.power_desc);
3413 
3414 	error = sbuf_finish(buf);
3415 	if (error)
3416 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3417 
3418 	sbuf_delete(buf);
3419 	return (error);
3420 }
3421 
3422 static int
3423 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3424 {
3425 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3426 	struct i40e_hw *hw = &pf->hw;
3427 	device_t dev = pf->dev;
3428 	enum i40e_status_code status;
3429 	struct i40e_aq_get_phy_abilities_resp abilities;
3430 	struct sbuf *buf;
3431 	int error = 0;
3432 
3433 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3434 	if (!buf) {
3435 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3436 		return (ENOMEM);
3437 	}
3438 
3439 	status = i40e_aq_get_phy_capabilities(hw,
3440 	    FALSE, arg2 != 0, &abilities, NULL);
3441 	if (status) {
3442 		device_printf(dev,
3443 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3444 		    __func__, i40e_stat_str(hw, status),
3445 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3446 		sbuf_delete(buf);
3447 		return (EIO);
3448 	}
3449 
3450 	sbuf_printf(buf, "\n"
3451 	    "PHY Type : %08x",
3452 	    abilities.phy_type);
3453 
3454 	if (abilities.phy_type != 0) {
3455 		sbuf_printf(buf, "<");
3456 		for (int i = 0; i < 32; i++)
3457 			if ((1 << i) & abilities.phy_type)
3458 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3459 		sbuf_printf(buf, ">");
3460 	}
3461 
3462 	sbuf_printf(buf, "\nPHY Ext  : %02x",
3463 	    abilities.phy_type_ext);
3464 
3465 	if (abilities.phy_type_ext != 0) {
3466 		sbuf_printf(buf, "<");
3467 		for (int i = 0; i < 4; i++)
3468 			if ((1 << i) & abilities.phy_type_ext)
3469 				sbuf_printf(buf, "%s,",
3470 				    ixl_phy_type_string(i, true));
3471 		sbuf_printf(buf, ">");
3472 	}
3473 
3474 	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3475 	if (abilities.link_speed != 0) {
3476 		u8 link_speed;
3477 		sbuf_printf(buf, " <");
3478 		for (int i = 0; i < 8; i++) {
3479 			link_speed = (1 << i) & abilities.link_speed;
3480 			if (link_speed)
3481 				sbuf_printf(buf, "%s, ",
3482 				    ixl_link_speed_string(link_speed));
3483 		}
3484 		sbuf_printf(buf, ">");
3485 	}
3486 
3487 	sbuf_printf(buf, "\n"
3488 	    "Abilities: %02x\n"
3489 	    "EEE cap  : %04x\n"
3490 	    "EEER reg : %08x\n"
3491 	    "D3 Lpan  : %02x\n"
3492 	    "ID       : %02x %02x %02x %02x\n"
3493 	    "ModType  : %02x %02x %02x\n"
3494 	    "ModType E: %01x\n"
3495 	    "FEC Cfg  : %02x\n"
3496 	    "Ext CC   : %02x",
3497 	    abilities.abilities, abilities.eee_capability,
3498 	    abilities.eeer_val, abilities.d3_lpan,
3499 	    abilities.phy_id[0], abilities.phy_id[1],
3500 	    abilities.phy_id[2], abilities.phy_id[3],
3501 	    abilities.module_type[0], abilities.module_type[1],
3502 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3503 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3504 	    abilities.ext_comp_code);
3505 
3506 	error = sbuf_finish(buf);
3507 	if (error)
3508 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3509 
3510 	sbuf_delete(buf);
3511 	return (error);
3512 }
3513 
3514 static int
3515 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3516 {
3517 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3518 	struct ixl_vsi *vsi = &pf->vsi;
3519 	struct ixl_mac_filter *f;
3520 	device_t dev = pf->dev;
3521 	int error = 0, ftl_len = 0, ftl_counter = 0;
3522 
3523 	struct sbuf *buf;
3524 
3525 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3526 	if (!buf) {
3527 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3528 		return (ENOMEM);
3529 	}
3530 
3531 	sbuf_printf(buf, "\n");
3532 
3533 	/* Print MAC filters */
3534 	sbuf_printf(buf, "PF Filters:\n");
3535 	LIST_FOREACH(f, &vsi->ftl, ftle)
3536 		ftl_len++;
3537 
3538 	if (ftl_len < 1)
3539 		sbuf_printf(buf, "(none)\n");
3540 	else {
3541 		LIST_FOREACH(f, &vsi->ftl, ftle) {
3542 			sbuf_printf(buf,
3543 			    MAC_FORMAT ", vlan %4d, flags %#06x",
3544 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3545 			/* don't print '\n' for last entry */
3546 			if (++ftl_counter != ftl_len)
3547 				sbuf_printf(buf, "\n");
3548 		}
3549 	}
3550 
3551 #ifdef PCI_IOV
3552 	/* TODO: Give each VF its own filter list sysctl */
3553 	struct ixl_vf *vf;
3554 	if (pf->num_vfs > 0) {
3555 		sbuf_printf(buf, "\n\n");
3556 		for (int i = 0; i < pf->num_vfs; i++) {
3557 			vf = &pf->vfs[i];
3558 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3559 				continue;
3560 
3561 			vsi = &vf->vsi;
3562 			ftl_len = 0, ftl_counter = 0;
3563 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3564 			LIST_FOREACH(f, &vsi->ftl, ftle)
3565 				ftl_len++;
3566 
3567 			if (ftl_len < 1)
3568 				sbuf_printf(buf, "(none)\n");
3569 			else {
3570 				LIST_FOREACH(f, &vsi->ftl, ftle) {
3571 					sbuf_printf(buf,
3572 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3573 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3574 				}
3575 			}
3576 		}
3577 	}
3578 #endif
3579 
3580 	error = sbuf_finish(buf);
3581 	if (error)
3582 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3583 	sbuf_delete(buf);
3584 
3585 	return (error);
3586 }
3587 
3588 #define IXL_SW_RES_SIZE 0x14
3589 int
3590 ixl_res_alloc_cmp(const void *a, const void *b)
3591 {
3592 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3593 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3594 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3595 
3596 	return ((int)one->resource_type - (int)two->resource_type);
3597 }
3598 
3599 /*
3600  * Longest string length: 25
3601  */
3602 const char *
3603 ixl_switch_res_type_string(u8 type)
3604 {
3605 	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3606 		"VEB",
3607 		"VSI",
3608 		"Perfect Match MAC address",
3609 		"S-tag",
3610 		"(Reserved)",
3611 		"Multicast hash entry",
3612 		"Unicast hash entry",
3613 		"VLAN",
3614 		"VSI List entry",
3615 		"(Reserved)",
3616 		"VLAN Statistic Pool",
3617 		"Mirror Rule",
3618 		"Queue Set",
3619 		"Inner VLAN Forward filter",
3620 		"(Reserved)",
3621 		"Inner MAC",
3622 		"IP",
3623 		"GRE/VN1 Key",
3624 		"VN2 Key",
3625 		"Tunneling Port"
3626 	};
3627 
3628 	if (type < IXL_SW_RES_SIZE)
3629 		return ixl_switch_res_type_strings[type];
3630 	else
3631 		return "(Reserved)";
3632 }
3633 
3634 static int
3635 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3636 {
3637 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3638 	struct i40e_hw *hw = &pf->hw;
3639 	device_t dev = pf->dev;
3640 	struct sbuf *buf;
3641 	enum i40e_status_code status;
3642 	int error = 0;
3643 
3644 	u8 num_entries;
3645 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3646 
3647 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3648 	if (!buf) {
3649 		device_printf(dev, "Could not allocate sbuf for output.\n");
3650 		return (ENOMEM);
3651 	}
3652 
3653 	bzero(resp, sizeof(resp));
3654 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3655 				resp,
3656 				IXL_SW_RES_SIZE,
3657 				NULL);
3658 	if (status) {
3659 		device_printf(dev,
3660 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3661 		    __func__, i40e_stat_str(hw, status),
3662 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3663 		sbuf_delete(buf);
3664 		return (error);
3665 	}
3666 
3667 	/* Sort entries by type for display */
3668 	qsort(resp, num_entries,
3669 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3670 	    &ixl_res_alloc_cmp);
3671 
3672 	sbuf_cat(buf, "\n");
3673 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3674 	sbuf_printf(buf,
3675 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3676 	    "                          | (this)     | (all) | (this) | (all)       \n");
3677 	for (int i = 0; i < num_entries; i++) {
3678 		sbuf_printf(buf,
3679 		    "%25s | %10d   %5d   %6d   %12d",
3680 		    ixl_switch_res_type_string(resp[i].resource_type),
3681 		    resp[i].guaranteed,
3682 		    resp[i].total,
3683 		    resp[i].used,
3684 		    resp[i].total_unalloced);
3685 		if (i < num_entries - 1)
3686 			sbuf_cat(buf, "\n");
3687 	}
3688 
3689 	error = sbuf_finish(buf);
3690 	if (error)
3691 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3692 
3693 	sbuf_delete(buf);
3694 	return (error);
3695 }
3696 
3697 enum ixl_sw_seid_offset {
3698 	IXL_SW_SEID_EMP = 1,
3699 	IXL_SW_SEID_MAC_START = 2,
3700 	IXL_SW_SEID_MAC_END = 5,
3701 	IXL_SW_SEID_PF_START = 16,
3702 	IXL_SW_SEID_PF_END = 31,
3703 	IXL_SW_SEID_VF_START = 32,
3704 	IXL_SW_SEID_VF_END = 159,
3705 };
3706 
3707 /*
3708  * Caller must init and delete sbuf; this function will clear and
3709  * finish it for caller.
3710  *
3711  * Note: The SEID argument only applies for elements defined by FW at
3712  * power-on; these include the EMP, Ports, PFs and VFs.
3713  */
3714 static char *
3715 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3716 {
3717 	sbuf_clear(s);
3718 
3719 	/* If SEID is in certain ranges, then we can infer the
3720 	 * mapping of SEID to switch element.
3721 	 */
3722 	if (seid == IXL_SW_SEID_EMP) {
3723 		sbuf_cat(s, "EMP");
3724 		goto out;
3725 	} else if (seid >= IXL_SW_SEID_MAC_START &&
3726 	    seid <= IXL_SW_SEID_MAC_END) {
3727 		sbuf_printf(s, "MAC  %2d",
3728 		    seid - IXL_SW_SEID_MAC_START);
3729 		goto out;
3730 	} else if (seid >= IXL_SW_SEID_PF_START &&
3731 	    seid <= IXL_SW_SEID_PF_END) {
3732 		sbuf_printf(s, "PF  %3d",
3733 		    seid - IXL_SW_SEID_PF_START);
3734 		goto out;
3735 	} else if (seid >= IXL_SW_SEID_VF_START &&
3736 	    seid <= IXL_SW_SEID_VF_END) {
3737 		sbuf_printf(s, "VF  %3d",
3738 		    seid - IXL_SW_SEID_VF_START);
3739 		goto out;
3740 	}
3741 
3742 	switch (element_type) {
3743 	case I40E_AQ_SW_ELEM_TYPE_BMC:
3744 		sbuf_cat(s, "BMC");
3745 		break;
3746 	case I40E_AQ_SW_ELEM_TYPE_PV:
3747 		sbuf_cat(s, "PV");
3748 		break;
3749 	case I40E_AQ_SW_ELEM_TYPE_VEB:
3750 		sbuf_cat(s, "VEB");
3751 		break;
3752 	case I40E_AQ_SW_ELEM_TYPE_PA:
3753 		sbuf_cat(s, "PA");
3754 		break;
3755 	case I40E_AQ_SW_ELEM_TYPE_VSI:
3756 		sbuf_printf(s, "VSI");
3757 		break;
3758 	default:
3759 		sbuf_cat(s, "?");
3760 		break;
3761 	}
3762 
3763 out:
3764 	sbuf_finish(s);
3765 	return sbuf_data(s);
3766 }
3767 
3768 static int
3769 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3770 {
3771 	const struct i40e_aqc_switch_config_element_resp *one, *two;
3772 	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3773 	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3774 
3775 	return ((int)one->seid - (int)two->seid);
3776 }
3777 
3778 static int
3779 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3780 {
3781 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3782 	struct i40e_hw *hw = &pf->hw;
3783 	device_t dev = pf->dev;
3784 	struct sbuf *buf;
3785 	struct sbuf *nmbuf;
3786 	enum i40e_status_code status;
3787 	int error = 0;
3788 	u16 next = 0;
3789 	u8 aq_buf[I40E_AQ_LARGE_BUF];
3790 
3791 	struct i40e_aqc_switch_config_element_resp *elem;
3792 	struct i40e_aqc_get_switch_config_resp *sw_config;
3793 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3794 
3795 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3796 	if (!buf) {
3797 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3798 		return (ENOMEM);
3799 	}
3800 
3801 	status = i40e_aq_get_switch_config(hw, sw_config,
3802 	    sizeof(aq_buf), &next, NULL);
3803 	if (status) {
3804 		device_printf(dev,
3805 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3806 		    __func__, i40e_stat_str(hw, status),
3807 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3808 		sbuf_delete(buf);
3809 		return error;
3810 	}
3811 	if (next)
3812 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3813 		    __func__, next);
3814 
3815 	nmbuf = sbuf_new_auto();
3816 	if (!nmbuf) {
3817 		device_printf(dev, "Could not allocate sbuf for name output.\n");
3818 		sbuf_delete(buf);
3819 		return (ENOMEM);
3820 	}
3821 
3822 	/* Sort entries by SEID for display */
3823 	qsort(sw_config->element, sw_config->header.num_reported,
3824 	    sizeof(struct i40e_aqc_switch_config_element_resp),
3825 	    &ixl_sw_cfg_elem_seid_cmp);
3826 
3827 	sbuf_cat(buf, "\n");
3828 	/* Assuming <= 255 elements in switch */
3829 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3830 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3831 	/* Exclude:
3832 	 * Revision -- all elements are revision 1 for now
3833 	 */
3834 	sbuf_printf(buf,
3835 	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3836 	    "                |                 |                 | (uplink)\n");
3837 	for (int i = 0; i < sw_config->header.num_reported; i++) {
3838 		elem = &sw_config->element[i];
3839 
3840 		// "%4d (%8s) | %8s   %8s   %#8x",
3841 		sbuf_printf(buf, "%4d", elem->seid);
3842 		sbuf_cat(buf, " ");
3843 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3844 		    elem->element_type, elem->seid));
3845 		sbuf_cat(buf, " | ");
3846 		sbuf_printf(buf, "%4d", elem->uplink_seid);
3847 		sbuf_cat(buf, " ");
3848 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3849 		    0, elem->uplink_seid));
3850 		sbuf_cat(buf, " | ");
3851 		sbuf_printf(buf, "%4d", elem->downlink_seid);
3852 		sbuf_cat(buf, " ");
3853 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3854 		    0, elem->downlink_seid));
3855 		sbuf_cat(buf, " | ");
3856 		sbuf_printf(buf, "%8d", elem->connection_type);
3857 		if (i < sw_config->header.num_reported - 1)
3858 			sbuf_cat(buf, "\n");
3859 	}
3860 	sbuf_delete(nmbuf);
3861 
3862 	error = sbuf_finish(buf);
3863 	if (error)
3864 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3865 
3866 	sbuf_delete(buf);
3867 
3868 	return (error);
3869 }
3870 
3871 static int
3872 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3873 {
3874 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3875 	struct i40e_hw *hw = &pf->hw;
3876 	device_t dev = pf->dev;
3877 	int requested_vlan = -1;
3878 	enum i40e_status_code status = 0;
3879 	int error = 0;
3880 
3881 	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3882 	if ((error) || (req->newptr == NULL))
3883 	    return (error);
3884 
3885 	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3886 		device_printf(dev, "Flags disallow setting of vlans\n");
3887 		return (ENODEV);
3888 	}
3889 
3890 	hw->switch_tag = requested_vlan;
3891 	device_printf(dev,
3892 	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3893 	    hw->switch_tag, hw->first_tag, hw->second_tag);
3894 	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3895 	if (status) {
3896 		device_printf(dev,
3897 		    "%s: aq_set_switch_config() error %s, aq error %s\n",
3898 		    __func__, i40e_stat_str(hw, status),
3899 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3900 		return (status);
3901 	}
3902 	return (0);
3903 }
3904 
3905 static int
3906 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3907 {
3908 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3909 	struct i40e_hw *hw = &pf->hw;
3910 	device_t dev = pf->dev;
3911 	struct sbuf *buf;
3912 	int error = 0;
3913 	enum i40e_status_code status;
3914 	u32 reg;
3915 
3916 	struct i40e_aqc_get_set_rss_key_data key_data;
3917 
3918 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3919 	if (!buf) {
3920 		device_printf(dev, "Could not allocate sbuf for output.\n");
3921 		return (ENOMEM);
3922 	}
3923 
3924 	bzero(&key_data, sizeof(key_data));
3925 
3926 	sbuf_cat(buf, "\n");
3927 	if (hw->mac.type == I40E_MAC_X722) {
3928 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3929 		if (status)
3930 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3931 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3932 	} else {
3933 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3934 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3935 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3936 		}
3937 	}
3938 
3939 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3940 
3941 	error = sbuf_finish(buf);
3942 	if (error)
3943 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3944 	sbuf_delete(buf);
3945 
3946 	return (error);
3947 }
3948 
3949 static void
3950 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3951 {
3952 	int i, j, k, width;
3953 	char c;
3954 
3955 	if (length < 1 || buf == NULL) return;
3956 
3957 	int byte_stride = 16;
3958 	int lines = length / byte_stride;
3959 	int rem = length % byte_stride;
3960 	if (rem > 0)
3961 		lines++;
3962 
3963 	for (i = 0; i < lines; i++) {
3964 		width = (rem > 0 && i == lines - 1)
3965 		    ? rem : byte_stride;
3966 
3967 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3968 
3969 		for (j = 0; j < width; j++)
3970 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3971 
3972 		if (width < byte_stride) {
3973 			for (k = 0; k < (byte_stride - width); k++)
3974 				sbuf_printf(sb, "   ");
3975 		}
3976 
3977 		if (!text) {
3978 			sbuf_printf(sb, "\n");
3979 			continue;
3980 		}
3981 
3982 		for (j = 0; j < width; j++) {
3983 			c = (char)buf[i * byte_stride + j];
3984 			if (c < 32 || c > 126)
3985 				sbuf_printf(sb, ".");
3986 			else
3987 				sbuf_printf(sb, "%c", c);
3988 
3989 			if (j == width - 1)
3990 				sbuf_printf(sb, "\n");
3991 		}
3992 	}
3993 }
3994 
3995 static int
3996 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3997 {
3998 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3999 	struct i40e_hw *hw = &pf->hw;
4000 	device_t dev = pf->dev;
4001 	struct sbuf *buf;
4002 	int error = 0;
4003 	enum i40e_status_code status;
4004 	u8 hlut[512];
4005 	u32 reg;
4006 
4007 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4008 	if (!buf) {
4009 		device_printf(dev, "Could not allocate sbuf for output.\n");
4010 		return (ENOMEM);
4011 	}
4012 
4013 	bzero(hlut, sizeof(hlut));
4014 	sbuf_cat(buf, "\n");
4015 	if (hw->mac.type == I40E_MAC_X722) {
4016 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4017 		if (status)
4018 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4019 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4020 	} else {
4021 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4022 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4023 			bcopy(&reg, &hlut[i << 2], 4);
4024 		}
4025 	}
4026 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4027 
4028 	error = sbuf_finish(buf);
4029 	if (error)
4030 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4031 	sbuf_delete(buf);
4032 
4033 	return (error);
4034 }
4035 
4036 static int
4037 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4038 {
4039 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4040 	struct i40e_hw *hw = &pf->hw;
4041 	u64 hena;
4042 
4043 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4044 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4045 
4046 	return sysctl_handle_long(oidp, NULL, hena, req);
4047 }
4048 
4049 /*
4050  * Sysctl to disable firmware's link management
4051  *
4052  * 1 - Disable link management on this port
4053  * 0 - Re-enable link management
4054  *
4055  * On normal NVMs, firmware manages link by default.
4056  */
4057 static int
4058 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4059 {
4060 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4061 	struct i40e_hw *hw = &pf->hw;
4062 	device_t dev = pf->dev;
4063 	int requested_mode = -1;
4064 	enum i40e_status_code status = 0;
4065 	int error = 0;
4066 
4067 	/* Read in new mode */
4068 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4069 	if ((error) || (req->newptr == NULL))
4070 		return (error);
4071 	/* Check for sane value */
4072 	if (requested_mode < 0 || requested_mode > 1) {
4073 		device_printf(dev, "Valid modes are 0 or 1\n");
4074 		return (EINVAL);
4075 	}
4076 
4077 	/* Set new mode */
4078 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4079 	if (status) {
4080 		device_printf(dev,
4081 		    "%s: Error setting new phy debug mode %s,"
4082 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4083 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4084 		return (EIO);
4085 	}
4086 
4087 	return (0);
4088 }
4089 
4090 /*
4091  * Read some diagnostic data from a (Q)SFP+ module
4092  *
4093  *             SFP A2   QSFP Lower Page
4094  * Temperature 96-97	22-23
4095  * Vcc         98-99    26-27
4096  * TX power    102-103  34-35..40-41
4097  * RX power    104-105  50-51..56-57
4098  */
4099 static int
4100 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4101 {
4102 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4103 	device_t dev = pf->dev;
4104 	struct sbuf *sbuf;
4105 	int error = 0;
4106 	u8 output;
4107 
4108 	if (req->oldptr == NULL) {
4109 		error = SYSCTL_OUT(req, 0, 128);
4110 		return (0);
4111 	}
4112 
4113 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4114 	if (error) {
4115 		device_printf(dev, "Error reading from i2c\n");
4116 		return (error);
4117 	}
4118 
4119 	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4120 	if (output == 0x3) {
4121 		/*
4122 		 * Check for:
4123 		 * - Internally calibrated data
4124 		 * - Diagnostic monitoring is implemented
4125 		 */
4126 		pf->read_i2c_byte(pf, 92, 0xA0, &output);
4127 		if (!(output & 0x60)) {
4128 			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4129 			return (0);
4130 		}
4131 
4132 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4133 
4134 		for (u8 offset = 96; offset < 100; offset++) {
4135 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4136 			sbuf_printf(sbuf, "%02X ", output);
4137 		}
4138 		for (u8 offset = 102; offset < 106; offset++) {
4139 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4140 			sbuf_printf(sbuf, "%02X ", output);
4141 		}
4142 	} else if (output == 0xD || output == 0x11) {
4143 		/*
4144 		 * QSFP+ modules are always internally calibrated, and must indicate
4145 		 * what types of diagnostic monitoring are implemented
4146 		 */
4147 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4148 
4149 		for (u8 offset = 22; offset < 24; offset++) {
4150 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4151 			sbuf_printf(sbuf, "%02X ", output);
4152 		}
4153 		for (u8 offset = 26; offset < 28; offset++) {
4154 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4155 			sbuf_printf(sbuf, "%02X ", output);
4156 		}
4157 		/* Read the data from the first lane */
4158 		for (u8 offset = 34; offset < 36; offset++) {
4159 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4160 			sbuf_printf(sbuf, "%02X ", output);
4161 		}
4162 		for (u8 offset = 50; offset < 52; offset++) {
4163 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4164 			sbuf_printf(sbuf, "%02X ", output);
4165 		}
4166 	} else {
4167 		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4168 		return (0);
4169 	}
4170 
4171 	sbuf_finish(sbuf);
4172 	sbuf_delete(sbuf);
4173 
4174 	return (0);
4175 }
4176 
4177 /*
4178  * Sysctl to read a byte from I2C bus.
4179  *
4180  * Input: 32-bit value:
4181  * 	bits 0-7:   device address (0xA0 or 0xA2)
4182  * 	bits 8-15:  offset (0-255)
4183  *	bits 16-31: unused
4184  * Output: 8-bit value read
4185  */
4186 static int
4187 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4188 {
4189 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4190 	device_t dev = pf->dev;
4191 	int input = -1, error = 0;
4192 	u8 dev_addr, offset, output;
4193 
4194 	/* Read in I2C read parameters */
4195 	error = sysctl_handle_int(oidp, &input, 0, req);
4196 	if ((error) || (req->newptr == NULL))
4197 		return (error);
4198 	/* Validate device address */
4199 	dev_addr = input & 0xFF;
4200 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4201 		return (EINVAL);
4202 	}
4203 	offset = (input >> 8) & 0xFF;
4204 
4205 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4206 	if (error)
4207 		return (error);
4208 
4209 	device_printf(dev, "%02X\n", output);
4210 	return (0);
4211 }
4212 
4213 /*
4214  * Sysctl to write a byte to the I2C bus.
4215  *
4216  * Input: 32-bit value:
4217  * 	bits 0-7:   device address (0xA0 or 0xA2)
4218  * 	bits 8-15:  offset (0-255)
4219  *	bits 16-23: value to write
4220  *	bits 24-31: unused
4221  * Output: 8-bit value written
4222  */
4223 static int
4224 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4225 {
4226 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4227 	device_t dev = pf->dev;
4228 	int input = -1, error = 0;
4229 	u8 dev_addr, offset, value;
4230 
4231 	/* Read in I2C write parameters */
4232 	error = sysctl_handle_int(oidp, &input, 0, req);
4233 	if ((error) || (req->newptr == NULL))
4234 		return (error);
4235 	/* Validate device address */
4236 	dev_addr = input & 0xFF;
4237 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4238 		return (EINVAL);
4239 	}
4240 	offset = (input >> 8) & 0xFF;
4241 	value = (input >> 16) & 0xFF;
4242 
4243 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4244 	if (error)
4245 		return (error);
4246 
4247 	device_printf(dev, "%02X written\n", value);
4248 	return (0);
4249 }
4250 
4251 static int
4252 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4253     u8 bit_pos, int *is_set)
4254 {
4255 	device_t dev = pf->dev;
4256 	struct i40e_hw *hw = &pf->hw;
4257 	enum i40e_status_code status;
4258 
4259 	if (IXL_PF_IN_RECOVERY_MODE(pf))
4260 		return (EIO);
4261 
4262 	status = i40e_aq_get_phy_capabilities(hw,
4263 	    FALSE, FALSE, abilities, NULL);
4264 	if (status) {
4265 		device_printf(dev,
4266 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4267 		    __func__, i40e_stat_str(hw, status),
4268 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4269 		return (EIO);
4270 	}
4271 
4272 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4273 	return (0);
4274 }
4275 
4276 static int
4277 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4278     u8 bit_pos, int set)
4279 {
4280 	device_t dev = pf->dev;
4281 	struct i40e_hw *hw = &pf->hw;
4282 	struct i40e_aq_set_phy_config config;
4283 	enum i40e_status_code status;
4284 
4285 	/* Set new PHY config */
4286 	memset(&config, 0, sizeof(config));
4287 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4288 	if (set)
4289 		config.fec_config |= bit_pos;
4290 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4291 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4292 		config.phy_type = abilities->phy_type;
4293 		config.phy_type_ext = abilities->phy_type_ext;
4294 		config.link_speed = abilities->link_speed;
4295 		config.eee_capability = abilities->eee_capability;
4296 		config.eeer = abilities->eeer_val;
4297 		config.low_power_ctrl = abilities->d3_lpan;
4298 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4299 
4300 		if (status) {
4301 			device_printf(dev,
4302 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4303 			    __func__, i40e_stat_str(hw, status),
4304 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4305 			return (EIO);
4306 		}
4307 	}
4308 
4309 	return (0);
4310 }
4311 
4312 static int
4313 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4314 {
4315 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4316 	int mode, error = 0;
4317 
4318 	struct i40e_aq_get_phy_abilities_resp abilities;
4319 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4320 	if (error)
4321 		return (error);
4322 	/* Read in new mode */
4323 	error = sysctl_handle_int(oidp, &mode, 0, req);
4324 	if ((error) || (req->newptr == NULL))
4325 		return (error);
4326 
4327 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4328 }
4329 
4330 static int
4331 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4332 {
4333 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4334 	int mode, error = 0;
4335 
4336 	struct i40e_aq_get_phy_abilities_resp abilities;
4337 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4338 	if (error)
4339 		return (error);
4340 	/* Read in new mode */
4341 	error = sysctl_handle_int(oidp, &mode, 0, req);
4342 	if ((error) || (req->newptr == NULL))
4343 		return (error);
4344 
4345 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4346 }
4347 
4348 static int
4349 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4350 {
4351 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4352 	int mode, error = 0;
4353 
4354 	struct i40e_aq_get_phy_abilities_resp abilities;
4355 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4356 	if (error)
4357 		return (error);
4358 	/* Read in new mode */
4359 	error = sysctl_handle_int(oidp, &mode, 0, req);
4360 	if ((error) || (req->newptr == NULL))
4361 		return (error);
4362 
4363 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4364 }
4365 
4366 static int
4367 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4368 {
4369 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4370 	int mode, error = 0;
4371 
4372 	struct i40e_aq_get_phy_abilities_resp abilities;
4373 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4374 	if (error)
4375 		return (error);
4376 	/* Read in new mode */
4377 	error = sysctl_handle_int(oidp, &mode, 0, req);
4378 	if ((error) || (req->newptr == NULL))
4379 		return (error);
4380 
4381 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4382 }
4383 
4384 static int
4385 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4386 {
4387 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4388 	int mode, error = 0;
4389 
4390 	struct i40e_aq_get_phy_abilities_resp abilities;
4391 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4392 	if (error)
4393 		return (error);
4394 	/* Read in new mode */
4395 	error = sysctl_handle_int(oidp, &mode, 0, req);
4396 	if ((error) || (req->newptr == NULL))
4397 		return (error);
4398 
4399 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4400 }
4401 
4402 static int
4403 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4404 {
4405 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4406 	struct i40e_hw *hw = &pf->hw;
4407 	device_t dev = pf->dev;
4408 	struct sbuf *buf;
4409 	int error = 0;
4410 	enum i40e_status_code status;
4411 
4412 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4413 	if (!buf) {
4414 		device_printf(dev, "Could not allocate sbuf for output.\n");
4415 		return (ENOMEM);
4416 	}
4417 
4418 	u8 *final_buff;
4419 	/* This amount is only necessary if reading the entire cluster into memory */
4420 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4421 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4422 	if (final_buff == NULL) {
4423 		device_printf(dev, "Could not allocate memory for output.\n");
4424 		goto out;
4425 	}
4426 	int final_buff_len = 0;
4427 
4428 	u8 cluster_id = 1;
4429 	bool more = true;
4430 
4431 	u8 dump_buf[4096];
4432 	u16 curr_buff_size = 4096;
4433 	u8 curr_next_table = 0;
4434 	u32 curr_next_index = 0;
4435 
4436 	u16 ret_buff_size;
4437 	u8 ret_next_table;
4438 	u32 ret_next_index;
4439 
4440 	sbuf_cat(buf, "\n");
4441 
4442 	while (more) {
4443 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4444 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4445 		if (status) {
4446 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4447 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4448 			goto free_out;
4449 		}
4450 
4451 		/* copy info out of temp buffer */
4452 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4453 		final_buff_len += ret_buff_size;
4454 
4455 		if (ret_next_table != curr_next_table) {
4456 			/* We're done with the current table; we can dump out read data. */
4457 			sbuf_printf(buf, "%d:", curr_next_table);
4458 			int bytes_printed = 0;
4459 			while (bytes_printed <= final_buff_len) {
4460 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4461 				bytes_printed += 16;
4462 			}
4463 				sbuf_cat(buf, "\n");
4464 
4465 			/* The entire cluster has been read; we're finished */
4466 			if (ret_next_table == 0xFF)
4467 				break;
4468 
4469 			/* Otherwise clear the output buffer and continue reading */
4470 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4471 			final_buff_len = 0;
4472 		}
4473 
4474 		if (ret_next_index == 0xFFFFFFFF)
4475 			ret_next_index = 0;
4476 
4477 		bzero(dump_buf, sizeof(dump_buf));
4478 		curr_next_table = ret_next_table;
4479 		curr_next_index = ret_next_index;
4480 	}
4481 
4482 free_out:
4483 	free(final_buff, M_IXL);
4484 out:
4485 	error = sbuf_finish(buf);
4486 	if (error)
4487 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4488 	sbuf_delete(buf);
4489 
4490 	return (error);
4491 }
4492 
4493 static int
4494 ixl_start_fw_lldp(struct ixl_pf *pf)
4495 {
4496 	struct i40e_hw *hw = &pf->hw;
4497 	enum i40e_status_code status;
4498 
4499 	status = i40e_aq_start_lldp(hw, false, NULL);
4500 	if (status != I40E_SUCCESS) {
4501 		switch (hw->aq.asq_last_status) {
4502 		case I40E_AQ_RC_EEXIST:
4503 			device_printf(pf->dev,
4504 			    "FW LLDP agent is already running\n");
4505 			break;
4506 		case I40E_AQ_RC_EPERM:
4507 			device_printf(pf->dev,
4508 			    "Device configuration forbids SW from starting "
4509 			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4510 			    "attribute to \"Enabled\" to use this sysctl\n");
4511 			return (EINVAL);
4512 		default:
4513 			device_printf(pf->dev,
4514 			    "Starting FW LLDP agent failed: error: %s, %s\n",
4515 			    i40e_stat_str(hw, status),
4516 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4517 			return (EINVAL);
4518 		}
4519 	}
4520 
4521 	ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4522 	return (0);
4523 }
4524 
4525 static int
4526 ixl_stop_fw_lldp(struct ixl_pf *pf)
4527 {
4528 	struct i40e_hw *hw = &pf->hw;
4529 	device_t dev = pf->dev;
4530 	enum i40e_status_code status;
4531 
4532 	if (hw->func_caps.npar_enable != 0) {
4533 		device_printf(dev,
4534 		    "Disabling FW LLDP agent is not supported on this device\n");
4535 		return (EINVAL);
4536 	}
4537 
4538 	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4539 		device_printf(dev,
4540 		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4541 		return (EINVAL);
4542 	}
4543 
4544 	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4545 	if (status != I40E_SUCCESS) {
4546 		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4547 			device_printf(dev,
4548 			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4549 			    i40e_stat_str(hw, status),
4550 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4551 			return (EINVAL);
4552 		}
4553 
4554 		device_printf(dev, "FW LLDP agent is already stopped\n");
4555 	}
4556 
4557 	i40e_aq_set_dcb_parameters(hw, true, NULL);
4558 	ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4559 	return (0);
4560 }
4561 
4562 static int
4563 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4564 {
4565 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4566 	int state, new_state, error = 0;
4567 
4568 	state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4569 
4570 	/* Read in new mode */
4571 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4572 	if ((error) || (req->newptr == NULL))
4573 		return (error);
4574 
4575 	/* Already in requested state */
4576 	if (new_state == state)
4577 		return (error);
4578 
4579 	if (new_state == 0)
4580 		return ixl_stop_fw_lldp(pf);
4581 
4582 	return ixl_start_fw_lldp(pf);
4583 }
4584 
4585 static int
4586 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4587 {
4588 	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4589 	int                   state, new_state;
4590 	int                   sysctl_handle_status = 0;
4591 	enum i40e_status_code cmd_status;
4592 
4593 	/* Init states' values */
4594 	state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4595 
4596 	/* Get requested mode */
4597 	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4598 	if ((sysctl_handle_status) || (req->newptr == NULL))
4599 		return (sysctl_handle_status);
4600 
4601 	/* Check if state has changed */
4602 	if (new_state == state)
4603 		return (0);
4604 
4605 	/* Set new state */
4606 	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4607 
4608 	/* Save new state or report error */
4609 	if (!cmd_status) {
4610 		if (new_state == 0)
4611 			ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4612 		else
4613 			ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4614 	} else if (cmd_status == I40E_ERR_CONFIG)
4615 		return (EPERM);
4616 	else
4617 		return (EIO);
4618 
4619 	return (0);
4620 }
4621 
4622 static int
4623 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4624 {
4625 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4626 	int error, state;
4627 
4628 	state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4629 
4630 	error = sysctl_handle_int(oidp, &state, 0, req);
4631 	if ((error) || (req->newptr == NULL))
4632 		return (error);
4633 
4634 	if (state == 0)
4635 		ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4636 	else
4637 		ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4638 
4639 	return (0);
4640 }
4641 
4642 
4643 int
4644 ixl_attach_get_link_status(struct ixl_pf *pf)
4645 {
4646 	struct i40e_hw *hw = &pf->hw;
4647 	device_t dev = pf->dev;
4648 	int error = 0;
4649 
4650 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4651 	    (hw->aq.fw_maj_ver < 4)) {
4652 		i40e_msec_delay(75);
4653 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4654 		if (error) {
4655 			device_printf(dev, "link restart failed, aq_err=%d\n",
4656 			    pf->hw.aq.asq_last_status);
4657 			return error;
4658 		}
4659 	}
4660 
4661 	/* Determine link state */
4662 	hw->phy.get_link_info = TRUE;
4663 	i40e_get_link_status(hw, &pf->link_up);
4664 
4665 	/* Flow Control mode not set by user, read current FW settings */
4666 	if (pf->fc == -1)
4667 		pf->fc = hw->fc.current_mode;
4668 
4669 	return (0);
4670 }
4671 
4672 static int
4673 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4674 {
4675 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4676 	int requested = 0, error = 0;
4677 
4678 	/* Read in new mode */
4679 	error = sysctl_handle_int(oidp, &requested, 0, req);
4680 	if ((error) || (req->newptr == NULL))
4681 		return (error);
4682 
4683 	/* Initiate the PF reset later in the admin task */
4684 	ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4685 
4686 	return (error);
4687 }
4688 
4689 static int
4690 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4691 {
4692 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4693 	struct i40e_hw *hw = &pf->hw;
4694 	int requested = 0, error = 0;
4695 
4696 	/* Read in new mode */
4697 	error = sysctl_handle_int(oidp, &requested, 0, req);
4698 	if ((error) || (req->newptr == NULL))
4699 		return (error);
4700 
4701 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4702 
4703 	return (error);
4704 }
4705 
4706 static int
4707 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4708 {
4709 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4710 	struct i40e_hw *hw = &pf->hw;
4711 	int requested = 0, error = 0;
4712 
4713 	/* Read in new mode */
4714 	error = sysctl_handle_int(oidp, &requested, 0, req);
4715 	if ((error) || (req->newptr == NULL))
4716 		return (error);
4717 
4718 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4719 
4720 	return (error);
4721 }
4722 
4723 /*
4724  * Print out mapping of TX queue indexes and Rx queue indexes
4725  * to MSI-X vectors.
4726  */
4727 static int
4728 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4729 {
4730 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4731 	struct ixl_vsi *vsi = &pf->vsi;
4732 	device_t dev = pf->dev;
4733 	struct sbuf *buf;
4734 	int error = 0;
4735 
4736 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4737 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4738 
4739 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4740 	if (!buf) {
4741 		device_printf(dev, "Could not allocate sbuf for output.\n");
4742 		return (ENOMEM);
4743 	}
4744 
4745 	sbuf_cat(buf, "\n");
4746 	for (int i = 0; i < vsi->num_rx_queues; i++) {
4747 		rx_que = &vsi->rx_queues[i];
4748 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4749 	}
4750 	for (int i = 0; i < vsi->num_tx_queues; i++) {
4751 		tx_que = &vsi->tx_queues[i];
4752 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4753 	}
4754 
4755 	error = sbuf_finish(buf);
4756 	if (error)
4757 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4758 	sbuf_delete(buf);
4759 
4760 	return (error);
4761 }
4762