xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision fef4249f07287374b10087f597a62b8e00dedabc)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 
35 #include "ixl_pf.h"
36 
37 #ifdef PCI_IOV
38 #include "ixl_pf_iov.h"
39 #endif
40 
41 #ifdef IXL_IW
42 #include "ixl_iw.h"
43 #include "ixl_iw_int.h"
44 #endif
45 
46 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
47 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
49 static u_int	ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
50 static u_int	ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
51 static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
53 
54 /* Sysctls */
55 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
56 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
59 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
60 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
61 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
62 
63 static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
64 static int	ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
65 
66 /* Debug Sysctls */
67 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88 
89 /* Debug Sysctls */
90 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 #ifdef IXL_DEBUG
95 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97 #endif
98 
99 #ifdef IXL_IW
100 extern int ixl_enable_iwarp;
101 extern int ixl_limit_iwarp_msix;
102 #endif
103 
104 static const char * const ixl_fc_string[6] = {
105 	"None",
106 	"Rx",
107 	"Tx",
108 	"Full",
109 	"Priority",
110 	"Default"
111 };
112 
113 static char *ixl_fec_string[3] = {
114        "CL108 RS-FEC",
115        "CL74 FC-FEC/BASE-R",
116        "None"
117 };
118 
119 /* Functions for setting and checking driver state. Note the functions take
120  * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
121  * operations require bitmasks. This can easily lead to programming error, so
122  * we provide wrapper functions to avoid this.
123  */
124 
125 /**
126  * ixl_set_state - Set the specified state
127  * @s: the state bitmap
128  * @bit: the state to set
129  *
130  * Atomically update the state bitmap with the specified bit set.
131  */
132 inline void
133 ixl_set_state(volatile u32 *s, enum ixl_state bit)
134 {
135 	/* atomic_set_32 expects a bitmask */
136 	atomic_set_32(s, BIT(bit));
137 }
138 
139 /**
140  * ixl_clear_state - Clear the specified state
141  * @s: the state bitmap
142  * @bit: the state to clear
143  *
144  * Atomically update the state bitmap with the specified bit cleared.
145  */
146 inline void
147 ixl_clear_state(volatile u32 *s, enum ixl_state bit)
148 {
149 	/* atomic_clear_32 expects a bitmask */
150 	atomic_clear_32(s, BIT(bit));
151 }
152 
153 /**
154  * ixl_test_state - Test the specified state
155  * @s: the state bitmap
156  * @bit: the bit to test
157  *
158  * Return true if the state is set, false otherwise. Use this only if the flow
159  * does not need to update the state. If you must update the state as well,
160  * prefer ixl_testandset_state.
161  */
162 inline bool
163 ixl_test_state(volatile u32 *s, enum ixl_state bit)
164 {
165 	return !!(*s & BIT(bit));
166 }
167 
168 /**
169  * ixl_testandset_state - Test and set the specified state
170  * @s: the state bitmap
171  * @bit: the bit to test
172  *
173  * Atomically update the state bitmap, setting the specified bit. Returns the
174  * previous value of the bit.
175  */
176 inline u32
177 ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
178 {
179 	/* atomic_testandset_32 expects a bit position, as opposed to bitmask
180 	expected by other atomic functions */
181 	return atomic_testandset_32(s, bit);
182 }
183 
184 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
185 
186 /*
187 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
188 */
189 void
190 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
191 {
192 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
193 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
194 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
195 
196 	sbuf_printf(buf,
197 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
198 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
199 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
200 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
201 	    IXL_NVM_VERSION_HI_SHIFT,
202 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
203 	    IXL_NVM_VERSION_LO_SHIFT,
204 	    hw->nvm.eetrack,
205 	    oem_ver, oem_build, oem_patch);
206 }
207 
208 void
209 ixl_print_nvm_version(struct ixl_pf *pf)
210 {
211 	struct i40e_hw *hw = &pf->hw;
212 	device_t dev = pf->dev;
213 	struct sbuf *sbuf;
214 
215 	sbuf = sbuf_new_auto();
216 	ixl_nvm_version_str(hw, sbuf);
217 	sbuf_finish(sbuf);
218 	device_printf(dev, "%s\n", sbuf_data(sbuf));
219 	sbuf_delete(sbuf);
220 }
221 
222 /**
223  * ixl_get_fw_mode - Check the state of FW
224  * @hw: device hardware structure
225  *
226  * Identify state of FW. It might be in a recovery mode
227  * which limits functionality and requires special handling
228  * from the driver.
229  *
230  * @returns FW mode (normal, recovery, unexpected EMP reset)
231  */
232 static enum ixl_fw_mode
233 ixl_get_fw_mode(struct ixl_pf *pf)
234 {
235 	struct i40e_hw *hw = &pf->hw;
236 	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
237 	u32 fwsts;
238 
239 #ifdef IXL_DEBUG
240 	if (pf->recovery_mode)
241 		return IXL_FW_MODE_RECOVERY;
242 #endif
243 	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
244 
245 	/* Is set and has one of expected values */
246 	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
247 	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
248 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
249 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
250 		fw_mode = IXL_FW_MODE_RECOVERY;
251 	else {
252 		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
253 		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
254 			fw_mode = IXL_FW_MODE_UEMPR;
255 	}
256 	return (fw_mode);
257 }
258 
259 /**
260  * ixl_pf_reset - Reset the PF
261  * @pf: PF structure
262  *
263  * Ensure that FW is in the right state and do the reset
264  * if needed.
265  *
266  * @returns zero on success, or an error code on failure.
267  */
268 int
269 ixl_pf_reset(struct ixl_pf *pf)
270 {
271 	struct i40e_hw *hw = &pf->hw;
272 	enum i40e_status_code status;
273 	enum ixl_fw_mode fw_mode;
274 
275 	fw_mode = ixl_get_fw_mode(pf);
276 	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
277 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
278 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
279 		/* Don't try to reset device if it's in recovery mode */
280 		return (0);
281 	}
282 
283 	status = i40e_pf_reset(hw);
284 	if (status == I40E_SUCCESS)
285 		return (0);
286 
287 	/* Check FW mode again in case it has changed while
288 	 * waiting for reset to complete */
289 	fw_mode = ixl_get_fw_mode(pf);
290 	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
291 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
292 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
293 		return (0);
294 	}
295 
296 	if (fw_mode == IXL_FW_MODE_UEMPR)
297 		device_printf(pf->dev,
298 		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
299 	else
300 		device_printf(pf->dev, "PF reset failure %s\n",
301 		    i40e_stat_str(hw, status));
302 	return (EIO);
303 }
304 
305 /**
306  * ixl_setup_hmc - Setup LAN Host Memory Cache
307  * @pf: PF structure
308  *
309  * Init and configure LAN Host Memory Cache
310  *
311  * @returns 0 on success, EIO on error
312  */
313 int
314 ixl_setup_hmc(struct ixl_pf *pf)
315 {
316 	struct i40e_hw *hw = &pf->hw;
317 	enum i40e_status_code status;
318 
319 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
320 	    hw->func_caps.num_rx_qp, 0, 0);
321 	if (status) {
322 		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
323 		    i40e_stat_str(hw, status));
324 		return (EIO);
325 	}
326 
327 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
328 	if (status) {
329 		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
330 		    i40e_stat_str(hw, status));
331 		return (EIO);
332 	}
333 
334 	return (0);
335 }
336 
337 /**
338  * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
339  * @pf: PF structure
340  *
341  * Shutdown Host Memory Cache if configured.
342  *
343  */
344 void
345 ixl_shutdown_hmc(struct ixl_pf *pf)
346 {
347 	struct i40e_hw *hw = &pf->hw;
348 	enum i40e_status_code status;
349 
350 	/* HMC not configured, no need to shutdown */
351 	if (hw->hmc.hmc_obj == NULL)
352 		return;
353 
354 	status = i40e_shutdown_lan_hmc(hw);
355 	if (status)
356 		device_printf(pf->dev,
357 		    "Shutdown LAN HMC failed with code %s\n",
358 		    i40e_stat_str(hw, status));
359 }
360 /*
361  * Write PF ITR values to queue ITR registers.
362  */
363 void
364 ixl_configure_itr(struct ixl_pf *pf)
365 {
366 	ixl_configure_tx_itr(pf);
367 	ixl_configure_rx_itr(pf);
368 }
369 
370 /*********************************************************************
371  *
372  *  Get the hardware capabilities
373  *
374  **********************************************************************/
375 
376 int
377 ixl_get_hw_capabilities(struct ixl_pf *pf)
378 {
379 	struct i40e_aqc_list_capabilities_element_resp *buf;
380 	struct i40e_hw	*hw = &pf->hw;
381 	device_t 	dev = pf->dev;
382 	enum i40e_status_code status;
383 	int len, i2c_intfc_num;
384 	bool again = TRUE;
385 	u16 needed;
386 
387 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
388 		hw->func_caps.iwarp = 0;
389 		return (0);
390 	}
391 
392 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
393 retry:
394 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
395 	    malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
396 		device_printf(dev, "Unable to allocate cap memory\n");
397                 return (ENOMEM);
398 	}
399 
400 	/* This populates the hw struct */
401         status = i40e_aq_discover_capabilities(hw, buf, len,
402 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
403 	free(buf, M_IXL);
404 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
405 	    (again == TRUE)) {
406 		/* retry once with a larger buffer */
407 		again = FALSE;
408 		len = needed;
409 		goto retry;
410 	} else if (status != I40E_SUCCESS) {
411 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
412 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
413 		return (ENODEV);
414 	}
415 
416 	/*
417 	 * Some devices have both MDIO and I2C; since this isn't reported
418 	 * by the FW, check registers to see if an I2C interface exists.
419 	 */
420 	i2c_intfc_num = ixl_find_i2c_interface(pf);
421 	if (i2c_intfc_num != -1)
422 		pf->has_i2c = true;
423 
424 	/* Determine functions to use for driver I2C accesses */
425 	switch (pf->i2c_access_method) {
426 	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
427 		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
428 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
429 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
430 		} else {
431 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
432 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
433 		}
434 		break;
435 	}
436 	case IXL_I2C_ACCESS_METHOD_AQ:
437 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
438 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
439 		break;
440 	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
441 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
442 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
443 		break;
444 	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
445 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
446 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
447 		break;
448 	default:
449 		/* Should not happen */
450 		device_printf(dev, "Error setting I2C access functions\n");
451 		break;
452 	}
453 
454 	/* Keep link active by default */
455 	ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
456 
457 	/* Print a subset of the capability information. */
458 	device_printf(dev,
459 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
460 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
461 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
462 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
463 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
464 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
465 	    "MDIO shared");
466 
467 	return (0);
468 }
469 
470 /* For the set_advertise sysctl */
471 void
472 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
473 {
474 	device_t dev = pf->dev;
475 	int err;
476 
477 	/* Make sure to initialize the device to the complete list of
478 	 * supported speeds on driver load, to ensure unloading and
479 	 * reloading the driver will restore this value.
480 	 */
481 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
482 	if (err) {
483 		/* Non-fatal error */
484 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
485 			      __func__, err);
486 		return;
487 	}
488 
489 	pf->advertised_speed =
490 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
491 }
492 
493 int
494 ixl_teardown_hw_structs(struct ixl_pf *pf)
495 {
496 	enum i40e_status_code status = 0;
497 	struct i40e_hw *hw = &pf->hw;
498 	device_t dev = pf->dev;
499 
500 	/* Shutdown LAN HMC */
501 	if (hw->hmc.hmc_obj) {
502 		status = i40e_shutdown_lan_hmc(hw);
503 		if (status) {
504 			device_printf(dev,
505 			    "init: LAN HMC shutdown failure; status %s\n",
506 			    i40e_stat_str(hw, status));
507 			goto err_out;
508 		}
509 	}
510 
511 	/* Shutdown admin queue */
512 	ixl_disable_intr0(hw);
513 	status = i40e_shutdown_adminq(hw);
514 	if (status)
515 		device_printf(dev,
516 		    "init: Admin Queue shutdown failure; status %s\n",
517 		    i40e_stat_str(hw, status));
518 
519 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
520 err_out:
521 	return (status);
522 }
523 
524 /*
525 ** Creates new filter with given MAC address and VLAN ID
526 */
527 static struct ixl_mac_filter *
528 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
529 {
530 	struct ixl_mac_filter  *f;
531 
532 	/* create a new empty filter */
533 	f = malloc(sizeof(struct ixl_mac_filter),
534 	    M_IXL, M_NOWAIT | M_ZERO);
535 	if (f) {
536 		LIST_INSERT_HEAD(headp, f, ftle);
537 		bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
538 		f->vlan = vlan;
539 	}
540 
541 	return (f);
542 }
543 
544 /**
545  * ixl_free_filters - Free all filters in given list
546  * headp - pointer to list head
547  *
548  * Frees memory used by each entry in the list.
549  * Does not remove filters from HW.
550  */
551 void
552 ixl_free_filters(struct ixl_ftl_head *headp)
553 {
554 	struct ixl_mac_filter *f, *nf;
555 
556 	f = LIST_FIRST(headp);
557 	while (f != NULL) {
558 		nf = LIST_NEXT(f, ftle);
559 		free(f, M_IXL);
560 		f = nf;
561 	}
562 
563 	LIST_INIT(headp);
564 }
565 
566 static u_int
567 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
568 {
569 	struct ixl_add_maddr_arg *ama = arg;
570 	struct ixl_vsi *vsi = ama->vsi;
571 	const u8 *macaddr = (u8*)LLADDR(sdl);
572 	struct ixl_mac_filter *f;
573 
574 	/* Does one already exist */
575 	f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
576 	if (f != NULL)
577 		return (0);
578 
579 	f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
580 	if (f == NULL) {
581 		device_printf(vsi->dev, "WARNING: no filter available!!\n");
582 		return (0);
583 	}
584 	f->flags |= IXL_FILTER_MC;
585 
586 	return (1);
587 }
588 
589 /*********************************************************************
590  * 	Filter Routines
591  *
592  *	Routines for multicast and vlan filter management.
593  *
594  *********************************************************************/
595 
596 /**
597  * ixl_add_multi - Add multicast filters to the hardware
598  * @vsi: The VSI structure
599  *
600  * In case number of multicast filters in the IFP exceeds 127 entries,
601  * multicast promiscuous mode will be enabled and the filters will be removed
602  * from the hardware
603  */
604 void
605 ixl_add_multi(struct ixl_vsi *vsi)
606 {
607 	if_t			ifp = vsi->ifp;
608 	struct i40e_hw		*hw = vsi->hw;
609 	int			mcnt = 0;
610 	struct ixl_add_maddr_arg cb_arg;
611 	enum i40e_status_code	status;
612 
613 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
614 
615 	mcnt = if_llmaddr_count(ifp);
616 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
617 		status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
618 		    TRUE, NULL);
619 		if (status != I40E_SUCCESS)
620 			if_printf(ifp, "Failed to enable multicast promiscuous "
621 			    "mode, status: %s\n", i40e_stat_str(hw, status));
622 		else
623 			if_printf(ifp, "Enabled multicast promiscuous mode\n");
624 		/* Delete all existing MC filters */
625 		ixl_del_multi(vsi, true);
626 		return;
627 	}
628 
629 	cb_arg.vsi = vsi;
630 	LIST_INIT(&cb_arg.to_add);
631 
632 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
633 	if (mcnt > 0)
634 		ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
635 
636 	IOCTL_DEBUGOUT("ixl_add_multi: end");
637 }
638 
639 static u_int
640 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
641 {
642 	struct ixl_mac_filter *f = arg;
643 
644 	if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
645 		return (1);
646 	else
647 		return (0);
648 }
649 
650 /**
651  * ixl_dis_multi_promisc - Disable multicast promiscuous mode
652  * @vsi: The VSI structure
653  * @vsi_mcnt: Number of multicast filters in the VSI
654  *
655  * Disable multicast promiscuous mode based on number of entries in the IFP
656  * and the VSI, then re-add multicast filters.
657  *
658  */
659 static void
660 ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
661 {
662 	struct ifnet		*ifp = vsi->ifp;
663 	struct i40e_hw		*hw = vsi->hw;
664 	int			ifp_mcnt = 0;
665 	enum i40e_status_code	status;
666 
667 	ifp_mcnt = if_llmaddr_count(ifp);
668 	/*
669 	 * Equal lists or empty ifp list mean the list has not been changed
670 	 * and in such case avoid disabling multicast promiscuous mode as it
671 	 * was not previously enabled. Case where multicast promiscuous mode has
672 	 * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
673 	 */
674 	if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
675 	    ifp_mcnt >= MAX_MULTICAST_ADDR)
676 		return;
677 
678 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
679 	    FALSE, NULL);
680 	if (status != I40E_SUCCESS) {
681 		if_printf(ifp, "Failed to disable multicast promiscuous "
682 		    "mode, status: %s\n", i40e_stat_str(hw, status));
683 
684 		return;
685 	}
686 
687 	if_printf(ifp, "Disabled multicast promiscuous mode\n");
688 
689 	ixl_add_multi(vsi);
690 }
691 
692 /**
693  * ixl_del_multi - Delete multicast filters from the hardware
694  * @vsi: The VSI structure
695  * @all: Bool to determine if all the multicast filters should be removed
696  *
697  * In case number of multicast filters in the IFP drops to 127 entries,
698  * multicast promiscuous mode will be disabled and the filters will be reapplied
699  * to the hardware.
700  */
701 void
702 ixl_del_multi(struct ixl_vsi *vsi, bool all)
703 {
704 	int			to_del_cnt = 0, vsi_mcnt = 0;
705 	if_t			ifp = vsi->ifp;
706 	struct ixl_mac_filter	*f, *fn;
707 	struct ixl_ftl_head	to_del;
708 
709 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
710 
711 	LIST_INIT(&to_del);
712 	/* Search for removed multicast addresses */
713 	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
714 		if ((f->flags & IXL_FILTER_MC) == 0)
715 			continue;
716 
717 		/* Count all the multicast filters in the VSI for comparison */
718 		vsi_mcnt++;
719 
720 		if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
721 			continue;
722 
723 		LIST_REMOVE(f, ftle);
724 		LIST_INSERT_HEAD(&to_del, f, ftle);
725 		to_del_cnt++;
726 	}
727 
728 	if (to_del_cnt > 0) {
729 		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
730 		return;
731 	}
732 
733 	ixl_dis_multi_promisc(vsi, vsi_mcnt);
734 
735 	IOCTL_DEBUGOUT("ixl_del_multi: end");
736 }
737 
738 void
739 ixl_link_up_msg(struct ixl_pf *pf)
740 {
741 	struct i40e_hw *hw = &pf->hw;
742 	if_t ifp = pf->vsi.ifp;
743 	char *req_fec_string, *neg_fec_string;
744 	u8 fec_abilities;
745 
746 	fec_abilities = hw->phy.link_info.req_fec_info;
747 	/* If both RS and KR are requested, only show RS */
748 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
749 		req_fec_string = ixl_fec_string[0];
750 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
751 		req_fec_string = ixl_fec_string[1];
752 	else
753 		req_fec_string = ixl_fec_string[2];
754 
755 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
756 		neg_fec_string = ixl_fec_string[0];
757 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
758 		neg_fec_string = ixl_fec_string[1];
759 	else
760 		neg_fec_string = ixl_fec_string[2];
761 
762 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
763 	    if_name(ifp),
764 	    ixl_link_speed_string(hw->phy.link_info.link_speed),
765 	    req_fec_string, neg_fec_string,
766 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
767 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
768 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
769 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
770 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
771 		ixl_fc_string[1] : ixl_fc_string[0]);
772 }
773 
774 /*
775  * Configure admin queue/misc interrupt cause registers in hardware.
776  */
777 void
778 ixl_configure_intr0_msix(struct ixl_pf *pf)
779 {
780 	struct i40e_hw *hw = &pf->hw;
781 	u32 reg;
782 
783 	/* First set up the adminq - vector 0 */
784 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
785 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
786 
787 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
788 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
789 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
790 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
791 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
792 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
793 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
794 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
795 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
796 
797 	/*
798 	 * 0x7FF is the end of the queue list.
799 	 * This means we won't use MSI-X vector 0 for a queue interrupt
800 	 * in MSI-X mode.
801 	 */
802 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
803 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
804 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
805 
806 	wr32(hw, I40E_PFINT_DYN_CTL0,
807 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
808 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
809 
810 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
811 }
812 
813 void
814 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
815 {
816 	/* Display supported media types */
817 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
818 		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
819 
820 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
821 		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
822 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
823 		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
824 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
825 		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
826 
827 	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
828 		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
829 
830 	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
831 		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
832 
833 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
834 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
835 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
836 		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
837 
838 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
839 		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
840 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
841 		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
842 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
843 		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
844 
845 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
846 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
847 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
848 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
849 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
850 		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
851 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
852 		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
853 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
854 		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
855 
856 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
857 		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
858 
859 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
860 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
861 		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
862 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
863 		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
864 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
865 		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
866 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
867 		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
868 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
869 		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
870 
871 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
872 		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
873 
874 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
875 		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
876 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
877 		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
878 
879 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
880 		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
881 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
882 		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
883 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
884 		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
885 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
886 		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
887 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
888 		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
889 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
890 		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
891 }
892 
893 /*********************************************************************
894  *
895  *  Get Firmware Switch configuration
896  *	- this will need to be more robust when more complex
897  *	  switch configurations are enabled.
898  *
899  **********************************************************************/
900 int
901 ixl_switch_config(struct ixl_pf *pf)
902 {
903 	struct i40e_hw	*hw = &pf->hw;
904 	struct ixl_vsi	*vsi = &pf->vsi;
905 	device_t 	dev = iflib_get_dev(vsi->ctx);
906 	struct i40e_aqc_get_switch_config_resp *sw_config;
907 	u8	aq_buf[I40E_AQ_LARGE_BUF];
908 	int	ret;
909 	u16	next = 0;
910 
911 	memset(&aq_buf, 0, sizeof(aq_buf));
912 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
913 	ret = i40e_aq_get_switch_config(hw, sw_config,
914 	    sizeof(aq_buf), &next, NULL);
915 	if (ret) {
916 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
917 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
918 		return (ret);
919 	}
920 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
921 		device_printf(dev,
922 		    "Switch config: header reported: %d in structure, %d total\n",
923 		    LE16_TO_CPU(sw_config->header.num_reported),
924 		    LE16_TO_CPU(sw_config->header.num_total));
925 		for (int i = 0;
926 		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
927 			device_printf(dev,
928 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
929 			    sw_config->element[i].element_type,
930 			    LE16_TO_CPU(sw_config->element[i].seid),
931 			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
932 			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
933 		}
934 	}
935 	/* Simplified due to a single VSI */
936 	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
937 	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
938 	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
939 	return (ret);
940 }
941 
942 void
943 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
944 {
945 	struct sysctl_oid *tree;
946 	struct sysctl_oid_list *child;
947 	struct sysctl_oid_list *vsi_list;
948 
949 	tree = device_get_sysctl_tree(vsi->dev);
950 	child = SYSCTL_CHILDREN(tree);
951 	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
952 			CTLFLAG_RD, NULL, "VSI Number");
953 
954 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
955 	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
956 
957 	/* Copy of netstat RX errors counter for validation purposes */
958 	SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
959 			CTLFLAG_RD, &vsi->ierrors,
960 			"RX packet errors");
961 
962 	if (queues_sysctls)
963 		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
964 }
965 
966 /*
967  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
968  * Writes to the ITR registers immediately.
969  */
970 static int
971 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
972 {
973 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
974 	device_t dev = pf->dev;
975 	int error = 0;
976 	int requested_tx_itr;
977 
978 	requested_tx_itr = pf->tx_itr;
979 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
980 	if ((error) || (req->newptr == NULL))
981 		return (error);
982 	if (pf->dynamic_tx_itr) {
983 		device_printf(dev,
984 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
985 		    return (EINVAL);
986 	}
987 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
988 		device_printf(dev,
989 		    "Invalid TX itr value; value must be between 0 and %d\n",
990 		        IXL_MAX_ITR);
991 		return (EINVAL);
992 	}
993 
994 	pf->tx_itr = requested_tx_itr;
995 	ixl_configure_tx_itr(pf);
996 
997 	return (error);
998 }
999 
1000 /*
1001  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1002  * Writes to the ITR registers immediately.
1003  */
1004 static int
1005 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1006 {
1007 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1008 	device_t dev = pf->dev;
1009 	int error = 0;
1010 	int requested_rx_itr;
1011 
1012 	requested_rx_itr = pf->rx_itr;
1013 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1014 	if ((error) || (req->newptr == NULL))
1015 		return (error);
1016 	if (pf->dynamic_rx_itr) {
1017 		device_printf(dev,
1018 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1019 		    return (EINVAL);
1020 	}
1021 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1022 		device_printf(dev,
1023 		    "Invalid RX itr value; value must be between 0 and %d\n",
1024 		        IXL_MAX_ITR);
1025 		return (EINVAL);
1026 	}
1027 
1028 	pf->rx_itr = requested_rx_itr;
1029 	ixl_configure_rx_itr(pf);
1030 
1031 	return (error);
1032 }
1033 
1034 void
1035 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1036 	struct sysctl_oid_list *child,
1037 	struct i40e_hw_port_stats *stats)
1038 {
1039 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1040 	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1041 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1042 
1043 	struct i40e_eth_stats *eth_stats = &stats->eth;
1044 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1045 
1046 	struct ixl_sysctl_info ctls[] =
1047 	{
1048 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1049 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1050 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1051 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1052 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1053 		/* Packet Reception Stats */
1054 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1055 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1056 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1057 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1058 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1059 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1060 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1061 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1062 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1063 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1064 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1065 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1066 		/* Packet Transmission Stats */
1067 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1068 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1069 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1070 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1071 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1072 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1073 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1074 		/* Flow control */
1075 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1076 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1077 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1078 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1079 		/* End */
1080 		{0,0,0}
1081 	};
1082 
1083 	struct ixl_sysctl_info *entry = ctls;
1084 	while (entry->stat != 0)
1085 	{
1086 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1087 				CTLFLAG_RD, entry->stat,
1088 				entry->description);
1089 		entry++;
1090 	}
1091 }
1092 
1093 void
1094 ixl_set_rss_key(struct ixl_pf *pf)
1095 {
1096 	struct i40e_hw *hw = &pf->hw;
1097 	struct ixl_vsi *vsi = &pf->vsi;
1098 	device_t	dev = pf->dev;
1099 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1100 	enum i40e_status_code status;
1101 
1102 #ifdef RSS
1103         /* Fetch the configured RSS key */
1104         rss_getkey((uint8_t *) &rss_seed);
1105 #else
1106 	ixl_get_default_rss_key(rss_seed);
1107 #endif
1108 	/* Fill out hash function seed */
1109 	if (hw->mac.type == I40E_MAC_X722) {
1110 		struct i40e_aqc_get_set_rss_key_data key_data;
1111 		bcopy(rss_seed, &key_data, 52);
1112 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1113 		if (status)
1114 			device_printf(dev,
1115 			    "i40e_aq_set_rss_key status %s, error %s\n",
1116 			    i40e_stat_str(hw, status),
1117 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1118 	} else {
1119 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1120 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1121 	}
1122 }
1123 
1124 /*
1125  * Configure enabled PCTYPES for RSS.
1126  */
1127 void
1128 ixl_set_rss_pctypes(struct ixl_pf *pf)
1129 {
1130 	struct i40e_hw *hw = &pf->hw;
1131 	u64		set_hena = 0, hena;
1132 
1133 #ifdef RSS
1134 	u32		rss_hash_config;
1135 
1136 	rss_hash_config = rss_gethashconfig();
1137 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1138                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1139 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1140                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1141 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1142                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1143 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1144                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1145 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1146 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1147 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1148                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1149         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1150                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1151 #else
1152 	if (hw->mac.type == I40E_MAC_X722)
1153 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1154 	else
1155 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1156 #endif
1157 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1158 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1159 	hena |= set_hena;
1160 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1161 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1162 
1163 }
1164 
1165 /*
1166 ** Setup the PF's RSS parameters.
1167 */
1168 void
1169 ixl_config_rss(struct ixl_pf *pf)
1170 {
1171 	ixl_set_rss_key(pf);
1172 	ixl_set_rss_pctypes(pf);
1173 	ixl_set_rss_hlut(pf);
1174 }
1175 
1176 /*
1177  * In some firmware versions there is default MAC/VLAN filter
1178  * configured which interferes with filters managed by driver.
1179  * Make sure it's removed.
1180  */
1181 void
1182 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1183 {
1184 	struct i40e_aqc_remove_macvlan_element_data e;
1185 
1186 	bzero(&e, sizeof(e));
1187 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1188 	e.vlan_tag = 0;
1189 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1190 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1191 
1192 	bzero(&e, sizeof(e));
1193 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1194 	e.vlan_tag = 0;
1195 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1196 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1197 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1198 }
1199 
1200 /*
1201 ** Initialize filter list and add filters that the hardware
1202 ** needs to know about.
1203 **
1204 ** Requires VSI's seid to be set before calling.
1205 */
1206 void
1207 ixl_init_filters(struct ixl_vsi *vsi)
1208 {
1209 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1210 
1211 	ixl_dbg_filter(pf, "%s: start\n", __func__);
1212 
1213 	/* Initialize mac filter list for VSI */
1214 	LIST_INIT(&vsi->ftl);
1215 	vsi->num_hw_filters = 0;
1216 
1217 	/* Receive broadcast Ethernet frames */
1218 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1219 
1220 	if (IXL_VSI_IS_VF(vsi))
1221 		return;
1222 
1223 	ixl_del_default_hw_filters(vsi);
1224 
1225 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1226 
1227 	/*
1228 	 * Prevent Tx flow control frames from being sent out by
1229 	 * non-firmware transmitters.
1230 	 * This affects every VSI in the PF.
1231 	 */
1232 #ifndef IXL_DEBUG_FC
1233 	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1234 #else
1235 	if (pf->enable_tx_fc_filter)
1236 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1237 #endif
1238 }
1239 
1240 void
1241 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1242 {
1243 	struct i40e_hw *hw = vsi->hw;
1244 	struct ixl_ftl_head tmp;
1245 	int cnt;
1246 
1247 	/*
1248 	 * The ixl_add_hw_filters function adds filters configured
1249 	 * in HW to a list in VSI. Move all filters to a temporary
1250 	 * list to avoid corrupting it by concatenating to itself.
1251 	 */
1252 	LIST_INIT(&tmp);
1253 	LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1254 	cnt = vsi->num_hw_filters;
1255 	vsi->num_hw_filters = 0;
1256 
1257 	ixl_add_hw_filters(vsi, &tmp, cnt);
1258 
1259 	/*
1260 	 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1261 	 * will be NULL. Furthermore, the ftl of such vsi already contains
1262 	 * IXL_VLAN_ANY filter so we can skip that as well.
1263 	 */
1264 	if (hw == NULL)
1265 		return;
1266 
1267 	/* Filter could be removed if MAC address was changed */
1268 	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1269 
1270 	if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1271 		return;
1272 	/*
1273 	 * VLAN HW filtering is enabled, make sure that filters
1274 	 * for all registered VLAN tags are configured
1275 	 */
1276 	ixl_add_vlan_filters(vsi, hw->mac.addr);
1277 }
1278 
1279 /*
1280  * This routine adds a MAC/VLAN filter to the software filter
1281  * list, then adds that new filter to the HW if it doesn't already
1282  * exist in the SW filter list.
1283  */
1284 void
1285 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1286 {
1287 	struct ixl_mac_filter	*f, *tmp;
1288 	struct ixl_pf		*pf;
1289 	device_t		dev;
1290 	struct ixl_ftl_head	to_add;
1291 	int			to_add_cnt;
1292 
1293 	pf = vsi->back;
1294 	dev = pf->dev;
1295 	to_add_cnt = 1;
1296 
1297 	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1298 	    MAC_FORMAT_ARGS(macaddr), vlan);
1299 
1300 	/* Does one already exist */
1301 	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1302 	if (f != NULL)
1303 		return;
1304 
1305 	LIST_INIT(&to_add);
1306 	f = ixl_new_filter(&to_add, macaddr, vlan);
1307 	if (f == NULL) {
1308 		device_printf(dev, "WARNING: no filter available!!\n");
1309 		return;
1310 	}
1311 	if (f->vlan != IXL_VLAN_ANY)
1312 		f->flags |= IXL_FILTER_VLAN;
1313 	else
1314 		vsi->num_macs++;
1315 
1316 	/*
1317 	** Is this the first vlan being registered, if so we
1318 	** need to remove the ANY filter that indicates we are
1319 	** not in a vlan, and replace that with a 0 filter.
1320 	*/
1321 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1322 		tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1323 		if (tmp != NULL) {
1324 			struct ixl_ftl_head to_del;
1325 
1326 			/* Prepare new filter first to avoid removing
1327 			 * VLAN_ANY filter if allocation fails */
1328 			f = ixl_new_filter(&to_add, macaddr, 0);
1329 			if (f == NULL) {
1330 				device_printf(dev, "WARNING: no filter available!!\n");
1331 				free(LIST_FIRST(&to_add), M_IXL);
1332 				return;
1333 			}
1334 			to_add_cnt++;
1335 
1336 			LIST_REMOVE(tmp, ftle);
1337 			LIST_INIT(&to_del);
1338 			LIST_INSERT_HEAD(&to_del, tmp, ftle);
1339 			ixl_del_hw_filters(vsi, &to_del, 1);
1340 		}
1341 	}
1342 
1343 	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1344 }
1345 
1346 /**
1347  * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1348  * @vsi: pointer to VSI
1349  * @macaddr: MAC address
1350  *
1351  * Adds MAC/VLAN filter for each VLAN configured on the interface
1352  * if there is enough HW filters. Otherwise adds a single filter
1353  * for all tagged and untagged frames to allow all configured VLANs
1354  * to recieve traffic.
1355  */
1356 void
1357 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1358 {
1359 	struct ixl_ftl_head to_add;
1360 	struct ixl_mac_filter *f;
1361 	int to_add_cnt = 0;
1362 	int i, vlan = 0;
1363 
1364 	if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1365 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1366 		return;
1367 	}
1368 	LIST_INIT(&to_add);
1369 
1370 	/* Add filter for untagged frames if it does not exist yet */
1371 	f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1372 	if (f == NULL) {
1373 		f = ixl_new_filter(&to_add, macaddr, 0);
1374 		if (f == NULL) {
1375 			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1376 			return;
1377 		}
1378 		to_add_cnt++;
1379 	}
1380 
1381 	for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1382 		bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1383 		if (vlan == -1)
1384 			break;
1385 
1386 		/* Does one already exist */
1387 		f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1388 		if (f != NULL)
1389 			continue;
1390 
1391 		f = ixl_new_filter(&to_add, macaddr, vlan);
1392 		if (f == NULL) {
1393 			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1394 			ixl_free_filters(&to_add);
1395 			return;
1396 		}
1397 		to_add_cnt++;
1398 	}
1399 
1400 	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1401 }
1402 
1403 void
1404 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1405 {
1406 	struct ixl_mac_filter *f, *tmp;
1407 	struct ixl_ftl_head ftl_head;
1408 	int to_del_cnt = 1;
1409 
1410 	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1411 	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1412 	    MAC_FORMAT_ARGS(macaddr), vlan);
1413 
1414 	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1415 	if (f == NULL)
1416 		return;
1417 
1418 	LIST_REMOVE(f, ftle);
1419 	LIST_INIT(&ftl_head);
1420 	LIST_INSERT_HEAD(&ftl_head, f, ftle);
1421 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1422 		vsi->num_macs--;
1423 
1424 	/* If this is not the last vlan just remove the filter */
1425 	if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1426 		ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1427 		return;
1428 	}
1429 
1430 	/* It's the last vlan, we need to switch back to a non-vlan filter */
1431 	tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1432 	if (tmp != NULL) {
1433 		LIST_REMOVE(tmp, ftle);
1434 		LIST_INSERT_AFTER(f, tmp, ftle);
1435 		to_del_cnt++;
1436 	}
1437 	ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1438 
1439 	ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1440 }
1441 
1442 /**
1443  * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1444  * @vsi: VSI which filters need to be removed
1445  * @macaddr: MAC address
1446  *
1447  * Remove all MAC/VLAN filters with a given MAC address. For multicast
1448  * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1449  * so skip them to speed up processing. Those filters should be removed
1450  * using ixl_del_filter function.
1451  */
1452 void
1453 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1454 {
1455 	struct ixl_mac_filter *f, *tmp;
1456 	struct ixl_ftl_head to_del;
1457 	int to_del_cnt = 0;
1458 
1459 	LIST_INIT(&to_del);
1460 
1461 	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1462 		if ((f->flags & IXL_FILTER_MC) != 0 ||
1463 		    !ixl_ether_is_equal(f->macaddr, macaddr))
1464 			continue;
1465 
1466 		LIST_REMOVE(f, ftle);
1467 		LIST_INSERT_HEAD(&to_del, f, ftle);
1468 		to_del_cnt++;
1469 	}
1470 
1471 	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1472 	    "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1473 	    __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1474 	if (to_del_cnt > 0)
1475 		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1476 }
1477 
1478 /*
1479 ** Find the filter with both matching mac addr and vlan id
1480 */
1481 struct ixl_mac_filter *
1482 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1483 {
1484 	struct ixl_mac_filter	*f;
1485 
1486 	LIST_FOREACH(f, headp, ftle) {
1487 		if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1488 		    (f->vlan == vlan)) {
1489 			return (f);
1490 		}
1491 	}
1492 
1493 	return (NULL);
1494 }
1495 
1496 /*
1497 ** This routine takes additions to the vsi filter
1498 ** table and creates an Admin Queue call to create
1499 ** the filters in the hardware.
1500 */
1501 void
1502 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1503 {
1504 	struct i40e_aqc_add_macvlan_element_data *a, *b;
1505 	struct ixl_mac_filter	*f, *fn;
1506 	struct ixl_pf		*pf;
1507 	struct i40e_hw		*hw;
1508 	device_t		dev;
1509 	enum i40e_status_code	status;
1510 	int			j = 0;
1511 
1512 	pf = vsi->back;
1513 	dev = vsi->dev;
1514 	hw = &pf->hw;
1515 
1516 	ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1517 
1518 	if (cnt < 1) {
1519 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1520 		return;
1521 	}
1522 
1523 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1524 	    M_IXL, M_NOWAIT | M_ZERO);
1525 	if (a == NULL) {
1526 		device_printf(dev, "add_hw_filters failed to get memory\n");
1527 		return;
1528 	}
1529 
1530 	LIST_FOREACH(f, to_add, ftle) {
1531 		b = &a[j]; // a pox on fvl long names :)
1532 		bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1533 		if (f->vlan == IXL_VLAN_ANY) {
1534 			b->vlan_tag = 0;
1535 			b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1536 		} else {
1537 			b->vlan_tag = f->vlan;
1538 			b->flags = 0;
1539 		}
1540 		b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1541 		/* Some FW versions do not set match method
1542 		 * when adding filters fails. Initialize it with
1543 		 * expected error value to allow detection which
1544 		 * filters were not added */
1545 		b->match_method = I40E_AQC_MM_ERR_NO_RES;
1546 		ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1547 		    MAC_FORMAT_ARGS(f->macaddr));
1548 
1549 		if (++j == cnt)
1550 			break;
1551 	}
1552 	if (j != cnt) {
1553 		/* Something went wrong */
1554 		device_printf(dev,
1555 		    "%s ERROR: list of filters to short expected: %d, found: %d\n",
1556 		    __func__, cnt, j);
1557 		ixl_free_filters(to_add);
1558 		goto out_free;
1559 	}
1560 
1561 	status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1562 	if (status == I40E_SUCCESS) {
1563 		LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1564 		vsi->num_hw_filters += j;
1565 		goto out_free;
1566 	}
1567 
1568 	device_printf(dev,
1569 	    "i40e_aq_add_macvlan status %s, error %s\n",
1570 	    i40e_stat_str(hw, status),
1571 	    i40e_aq_str(hw, hw->aq.asq_last_status));
1572 	j = 0;
1573 
1574 	/* Verify which filters were actually configured in HW
1575 	 * and add them to the list */
1576 	LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1577 		LIST_REMOVE(f, ftle);
1578 		if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1579 			ixl_dbg_filter(pf,
1580 			    "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1581 			    __func__,
1582 			    MAC_FORMAT_ARGS(f->macaddr),
1583 			    f->vlan);
1584 			free(f, M_IXL);
1585 		} else {
1586 			LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1587 			vsi->num_hw_filters++;
1588 		}
1589 		j++;
1590 	}
1591 
1592 out_free:
1593 	free(a, M_IXL);
1594 }
1595 
1596 /*
1597 ** This routine takes removals in the vsi filter
1598 ** table and creates an Admin Queue call to delete
1599 ** the filters in the hardware.
1600 */
1601 void
1602 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1603 {
1604 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1605 	struct ixl_pf		*pf;
1606 	struct i40e_hw		*hw;
1607 	device_t		dev;
1608 	struct ixl_mac_filter	*f, *f_temp;
1609 	enum i40e_status_code	status;
1610 	int			j = 0;
1611 
1612 	pf = vsi->back;
1613 	hw = &pf->hw;
1614 	dev = vsi->dev;
1615 
1616 	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1617 
1618 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1619 	    M_IXL, M_NOWAIT | M_ZERO);
1620 	if (d == NULL) {
1621 		device_printf(dev, "%s: failed to get memory\n", __func__);
1622 		return;
1623 	}
1624 
1625 	LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1626 		e = &d[j]; // a pox on fvl long names :)
1627 		bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1628 		e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1629 		if (f->vlan == IXL_VLAN_ANY) {
1630 			e->vlan_tag = 0;
1631 			e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1632 		} else {
1633 			e->vlan_tag = f->vlan;
1634 		}
1635 
1636 		ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1637 		    MAC_FORMAT_ARGS(f->macaddr));
1638 
1639 		/* delete entry from the list */
1640 		LIST_REMOVE(f, ftle);
1641 		free(f, M_IXL);
1642 		if (++j == cnt)
1643 			break;
1644 	}
1645 	if (j != cnt || !LIST_EMPTY(to_del)) {
1646 		/* Something went wrong */
1647 		device_printf(dev,
1648 		    "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1649 		    __func__, cnt, j);
1650 		ixl_free_filters(to_del);
1651 		goto out_free;
1652 	}
1653 	status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1654 	if (status) {
1655 		device_printf(dev,
1656 		    "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1657 		    __func__, i40e_stat_str(hw, status),
1658 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1659 		for (int i = 0; i < j; i++) {
1660 			if (d[i].error_code == 0)
1661 				continue;
1662 			device_printf(dev,
1663 			    "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1664 			    __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1665 			    d[i].vlan_tag);
1666 		}
1667 	}
1668 
1669 	vsi->num_hw_filters -= j;
1670 
1671 out_free:
1672 	free(d, M_IXL);
1673 
1674 	ixl_dbg_filter(pf, "%s: end\n", __func__);
1675 }
1676 
1677 int
1678 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1679 {
1680 	struct i40e_hw	*hw = &pf->hw;
1681 	int		error = 0;
1682 	u32		reg;
1683 	u16		pf_qidx;
1684 
1685 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1686 
1687 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1688 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1689 	    pf_qidx, vsi_qidx);
1690 
1691 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1692 
1693 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1694 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1695 	    I40E_QTX_ENA_QENA_STAT_MASK;
1696 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1697 	/* Verify the enable took */
1698 	for (int j = 0; j < 10; j++) {
1699 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1700 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1701 			break;
1702 		i40e_usec_delay(10);
1703 	}
1704 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1705 		device_printf(pf->dev, "TX queue %d still disabled!\n",
1706 		    pf_qidx);
1707 		error = ETIMEDOUT;
1708 	}
1709 
1710 	return (error);
1711 }
1712 
1713 int
1714 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1715 {
1716 	struct i40e_hw	*hw = &pf->hw;
1717 	int		error = 0;
1718 	u32		reg;
1719 	u16		pf_qidx;
1720 
1721 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1722 
1723 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1724 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1725 	    pf_qidx, vsi_qidx);
1726 
1727 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1728 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1729 	    I40E_QRX_ENA_QENA_STAT_MASK;
1730 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1731 	/* Verify the enable took */
1732 	for (int j = 0; j < 10; j++) {
1733 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1734 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1735 			break;
1736 		i40e_usec_delay(10);
1737 	}
1738 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1739 		device_printf(pf->dev, "RX queue %d still disabled!\n",
1740 		    pf_qidx);
1741 		error = ETIMEDOUT;
1742 	}
1743 
1744 	return (error);
1745 }
1746 
1747 int
1748 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1749 {
1750 	int error = 0;
1751 
1752 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1753 	/* Called function already prints error message */
1754 	if (error)
1755 		return (error);
1756 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1757 	return (error);
1758 }
1759 
1760 /*
1761  * Returns error on first ring that is detected hung.
1762  */
1763 int
1764 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1765 {
1766 	struct i40e_hw	*hw = &pf->hw;
1767 	int		error = 0;
1768 	u32		reg;
1769 	u16		pf_qidx;
1770 
1771 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1772 
1773 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1774 	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1775 	    pf_qidx, vsi_qidx);
1776 
1777 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1778 	i40e_usec_delay(500);
1779 
1780 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1781 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1782 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1783 	/* Verify the disable took */
1784 	for (int j = 0; j < 10; j++) {
1785 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1786 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1787 			break;
1788 		i40e_msec_delay(10);
1789 	}
1790 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1791 		device_printf(pf->dev, "TX queue %d still enabled!\n",
1792 		    pf_qidx);
1793 		error = ETIMEDOUT;
1794 	}
1795 
1796 	return (error);
1797 }
1798 
1799 /*
1800  * Returns error on first ring that is detected hung.
1801  */
1802 int
1803 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1804 {
1805 	struct i40e_hw	*hw = &pf->hw;
1806 	int		error = 0;
1807 	u32		reg;
1808 	u16		pf_qidx;
1809 
1810 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1811 
1812 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1813 	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1814 	    pf_qidx, vsi_qidx);
1815 
1816 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1817 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1818 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1819 	/* Verify the disable took */
1820 	for (int j = 0; j < 10; j++) {
1821 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1822 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1823 			break;
1824 		i40e_msec_delay(10);
1825 	}
1826 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1827 		device_printf(pf->dev, "RX queue %d still enabled!\n",
1828 		    pf_qidx);
1829 		error = ETIMEDOUT;
1830 	}
1831 
1832 	return (error);
1833 }
1834 
1835 int
1836 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1837 {
1838 	int error = 0;
1839 
1840 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1841 	/* Called function already prints error message */
1842 	if (error)
1843 		return (error);
1844 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1845 	return (error);
1846 }
1847 
1848 static void
1849 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1850 {
1851 	struct i40e_hw *hw = &pf->hw;
1852 	device_t dev = pf->dev;
1853 	struct ixl_vf *vf;
1854 	bool mdd_detected = false;
1855 	bool pf_mdd_detected = false;
1856 	bool vf_mdd_detected = false;
1857 	u16 vf_num, queue;
1858 	u8 pf_num, event;
1859 	u8 pf_mdet_num, vp_mdet_num;
1860 	u32 reg;
1861 
1862 	/* find what triggered the MDD event */
1863 	reg = rd32(hw, I40E_GL_MDET_TX);
1864 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1865 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1866 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1867 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1868 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1869 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1870 		    I40E_GL_MDET_TX_EVENT_SHIFT;
1871 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1872 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1873 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1874 		mdd_detected = true;
1875 	}
1876 
1877 	if (!mdd_detected)
1878 		return;
1879 
1880 	reg = rd32(hw, I40E_PF_MDET_TX);
1881 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1882 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1883 		pf_mdet_num = hw->pf_id;
1884 		pf_mdd_detected = true;
1885 	}
1886 
1887 	/* Check if MDD was caused by a VF */
1888 	for (int i = 0; i < pf->num_vfs; i++) {
1889 		vf = &(pf->vfs[i]);
1890 		reg = rd32(hw, I40E_VP_MDET_TX(i));
1891 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1892 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1893 			vp_mdet_num = i;
1894 			vf->num_mdd_events++;
1895 			vf_mdd_detected = true;
1896 		}
1897 	}
1898 
1899 	/* Print out an error message */
1900 	if (vf_mdd_detected && pf_mdd_detected)
1901 		device_printf(dev,
1902 		    "Malicious Driver Detection event %d"
1903 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1904 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1905 	else if (vf_mdd_detected && !pf_mdd_detected)
1906 		device_printf(dev,
1907 		    "Malicious Driver Detection event %d"
1908 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1909 		    event, queue, pf_num, vf_num, vp_mdet_num);
1910 	else if (!vf_mdd_detected && pf_mdd_detected)
1911 		device_printf(dev,
1912 		    "Malicious Driver Detection event %d"
1913 		    " on TX queue %d, pf number %d (PF-%d)\n",
1914 		    event, queue, pf_num, pf_mdet_num);
1915 	/* Theoretically shouldn't happen */
1916 	else
1917 		device_printf(dev,
1918 		    "TX Malicious Driver Detection event (unknown)\n");
1919 }
1920 
1921 static void
1922 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1923 {
1924 	struct i40e_hw *hw = &pf->hw;
1925 	device_t dev = pf->dev;
1926 	struct ixl_vf *vf;
1927 	bool mdd_detected = false;
1928 	bool pf_mdd_detected = false;
1929 	bool vf_mdd_detected = false;
1930 	u16 queue;
1931 	u8 pf_num, event;
1932 	u8 pf_mdet_num, vp_mdet_num;
1933 	u32 reg;
1934 
1935 	/*
1936 	 * GL_MDET_RX doesn't contain VF number information, unlike
1937 	 * GL_MDET_TX.
1938 	 */
1939 	reg = rd32(hw, I40E_GL_MDET_RX);
1940 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1941 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1942 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1943 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1944 		    I40E_GL_MDET_RX_EVENT_SHIFT;
1945 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1946 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1947 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1948 		mdd_detected = true;
1949 	}
1950 
1951 	if (!mdd_detected)
1952 		return;
1953 
1954 	reg = rd32(hw, I40E_PF_MDET_RX);
1955 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1956 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1957 		pf_mdet_num = hw->pf_id;
1958 		pf_mdd_detected = true;
1959 	}
1960 
1961 	/* Check if MDD was caused by a VF */
1962 	for (int i = 0; i < pf->num_vfs; i++) {
1963 		vf = &(pf->vfs[i]);
1964 		reg = rd32(hw, I40E_VP_MDET_RX(i));
1965 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1966 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1967 			vp_mdet_num = i;
1968 			vf->num_mdd_events++;
1969 			vf_mdd_detected = true;
1970 		}
1971 	}
1972 
1973 	/* Print out an error message */
1974 	if (vf_mdd_detected && pf_mdd_detected)
1975 		device_printf(dev,
1976 		    "Malicious Driver Detection event %d"
1977 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1978 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1979 	else if (vf_mdd_detected && !pf_mdd_detected)
1980 		device_printf(dev,
1981 		    "Malicious Driver Detection event %d"
1982 		    " on RX queue %d, pf number %d, (VF-%d)\n",
1983 		    event, queue, pf_num, vp_mdet_num);
1984 	else if (!vf_mdd_detected && pf_mdd_detected)
1985 		device_printf(dev,
1986 		    "Malicious Driver Detection event %d"
1987 		    " on RX queue %d, pf number %d (PF-%d)\n",
1988 		    event, queue, pf_num, pf_mdet_num);
1989 	/* Theoretically shouldn't happen */
1990 	else
1991 		device_printf(dev,
1992 		    "RX Malicious Driver Detection event (unknown)\n");
1993 }
1994 
1995 /**
1996  * ixl_handle_mdd_event
1997  *
1998  * Called from interrupt handler to identify possibly malicious vfs
1999  * (But also detects events from the PF, as well)
2000  **/
2001 void
2002 ixl_handle_mdd_event(struct ixl_pf *pf)
2003 {
2004 	struct i40e_hw *hw = &pf->hw;
2005 	u32 reg;
2006 
2007 	/*
2008 	 * Handle both TX/RX because it's possible they could
2009 	 * both trigger in the same interrupt.
2010 	 */
2011 	ixl_handle_tx_mdd_event(pf);
2012 	ixl_handle_rx_mdd_event(pf);
2013 
2014 	ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
2015 
2016 	/* re-enable mdd interrupt cause */
2017 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2018 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2019 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2020 	ixl_flush(hw);
2021 }
2022 
2023 void
2024 ixl_enable_intr0(struct i40e_hw *hw)
2025 {
2026 	u32		reg;
2027 
2028 	/* Use IXL_ITR_NONE so ITR isn't updated here */
2029 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2030 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2031 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2032 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2033 }
2034 
2035 void
2036 ixl_disable_intr0(struct i40e_hw *hw)
2037 {
2038 	u32		reg;
2039 
2040 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2041 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2042 	ixl_flush(hw);
2043 }
2044 
2045 void
2046 ixl_enable_queue(struct i40e_hw *hw, int id)
2047 {
2048 	u32		reg;
2049 
2050 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2051 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2052 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2053 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2054 }
2055 
2056 void
2057 ixl_disable_queue(struct i40e_hw *hw, int id)
2058 {
2059 	u32		reg;
2060 
2061 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2062 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2063 }
2064 
2065 void
2066 ixl_handle_empr_reset(struct ixl_pf *pf)
2067 {
2068 	struct ixl_vsi	*vsi = &pf->vsi;
2069 	bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
2070 
2071 	ixl_prepare_for_reset(pf, is_up);
2072 	/*
2073 	 * i40e_pf_reset checks the type of reset and acts
2074 	 * accordingly. If EMP or Core reset was performed
2075 	 * doing PF reset is not necessary and it sometimes
2076 	 * fails.
2077 	 */
2078 	ixl_pf_reset(pf);
2079 
2080 	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2081 	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2082 		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2083 		device_printf(pf->dev,
2084 		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2085 		pf->link_up = FALSE;
2086 		ixl_update_link_status(pf);
2087 	}
2088 
2089 	ixl_rebuild_hw_structs_after_reset(pf, is_up);
2090 
2091 	ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2092 }
2093 
2094 void
2095 ixl_update_stats_counters(struct ixl_pf *pf)
2096 {
2097 	struct i40e_hw	*hw = &pf->hw;
2098 	struct ixl_vsi	*vsi = &pf->vsi;
2099 	struct ixl_vf	*vf;
2100 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2101 
2102 	struct i40e_hw_port_stats *nsd = &pf->stats;
2103 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2104 
2105 	/* Update hw stats */
2106 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2107 			   pf->stat_offsets_loaded,
2108 			   &osd->crc_errors, &nsd->crc_errors);
2109 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2110 			   pf->stat_offsets_loaded,
2111 			   &osd->illegal_bytes, &nsd->illegal_bytes);
2112 	ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port),
2113 			   pf->stat_offsets_loaded,
2114 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2115 	ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port),
2116 			   pf->stat_offsets_loaded,
2117 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2118 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2119 			   pf->stat_offsets_loaded,
2120 			   &osd->eth.rx_discards,
2121 			   &nsd->eth.rx_discards);
2122 	ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port),
2123 			   pf->stat_offsets_loaded,
2124 			   &osd->eth.rx_unicast,
2125 			   &nsd->eth.rx_unicast);
2126 	ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port),
2127 			   pf->stat_offsets_loaded,
2128 			   &osd->eth.tx_unicast,
2129 			   &nsd->eth.tx_unicast);
2130 	ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port),
2131 			   pf->stat_offsets_loaded,
2132 			   &osd->eth.rx_multicast,
2133 			   &nsd->eth.rx_multicast);
2134 	ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port),
2135 			   pf->stat_offsets_loaded,
2136 			   &osd->eth.tx_multicast,
2137 			   &nsd->eth.tx_multicast);
2138 	ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port),
2139 			   pf->stat_offsets_loaded,
2140 			   &osd->eth.rx_broadcast,
2141 			   &nsd->eth.rx_broadcast);
2142 	ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port),
2143 			   pf->stat_offsets_loaded,
2144 			   &osd->eth.tx_broadcast,
2145 			   &nsd->eth.tx_broadcast);
2146 
2147 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2148 			   pf->stat_offsets_loaded,
2149 			   &osd->tx_dropped_link_down,
2150 			   &nsd->tx_dropped_link_down);
2151 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2152 			   pf->stat_offsets_loaded,
2153 			   &osd->mac_local_faults,
2154 			   &nsd->mac_local_faults);
2155 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2156 			   pf->stat_offsets_loaded,
2157 			   &osd->mac_remote_faults,
2158 			   &nsd->mac_remote_faults);
2159 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2160 			   pf->stat_offsets_loaded,
2161 			   &osd->rx_length_errors,
2162 			   &nsd->rx_length_errors);
2163 
2164 	/* Flow control (LFC) stats */
2165 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2166 			   pf->stat_offsets_loaded,
2167 			   &osd->link_xon_rx, &nsd->link_xon_rx);
2168 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2169 			   pf->stat_offsets_loaded,
2170 			   &osd->link_xon_tx, &nsd->link_xon_tx);
2171 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2172 			   pf->stat_offsets_loaded,
2173 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2174 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2175 			   pf->stat_offsets_loaded,
2176 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2177 
2178 	/*
2179 	 * For watchdog management we need to know if we have been paused
2180 	 * during the last interval, so capture that here.
2181 	 */
2182 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2183 		vsi->shared->isc_pause_frames = 1;
2184 
2185 	/* Packet size stats rx */
2186 	ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port),
2187 			   pf->stat_offsets_loaded,
2188 			   &osd->rx_size_64, &nsd->rx_size_64);
2189 	ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port),
2190 			   pf->stat_offsets_loaded,
2191 			   &osd->rx_size_127, &nsd->rx_size_127);
2192 	ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port),
2193 			   pf->stat_offsets_loaded,
2194 			   &osd->rx_size_255, &nsd->rx_size_255);
2195 	ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port),
2196 			   pf->stat_offsets_loaded,
2197 			   &osd->rx_size_511, &nsd->rx_size_511);
2198 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port),
2199 			   pf->stat_offsets_loaded,
2200 			   &osd->rx_size_1023, &nsd->rx_size_1023);
2201 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port),
2202 			   pf->stat_offsets_loaded,
2203 			   &osd->rx_size_1522, &nsd->rx_size_1522);
2204 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port),
2205 			   pf->stat_offsets_loaded,
2206 			   &osd->rx_size_big, &nsd->rx_size_big);
2207 
2208 	/* Packet size stats tx */
2209 	ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port),
2210 			   pf->stat_offsets_loaded,
2211 			   &osd->tx_size_64, &nsd->tx_size_64);
2212 	ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port),
2213 			   pf->stat_offsets_loaded,
2214 			   &osd->tx_size_127, &nsd->tx_size_127);
2215 	ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port),
2216 			   pf->stat_offsets_loaded,
2217 			   &osd->tx_size_255, &nsd->tx_size_255);
2218 	ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port),
2219 			   pf->stat_offsets_loaded,
2220 			   &osd->tx_size_511, &nsd->tx_size_511);
2221 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port),
2222 			   pf->stat_offsets_loaded,
2223 			   &osd->tx_size_1023, &nsd->tx_size_1023);
2224 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port),
2225 			   pf->stat_offsets_loaded,
2226 			   &osd->tx_size_1522, &nsd->tx_size_1522);
2227 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port),
2228 			   pf->stat_offsets_loaded,
2229 			   &osd->tx_size_big, &nsd->tx_size_big);
2230 
2231 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2232 			   pf->stat_offsets_loaded,
2233 			   &osd->rx_undersize, &nsd->rx_undersize);
2234 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2235 			   pf->stat_offsets_loaded,
2236 			   &osd->rx_fragments, &nsd->rx_fragments);
2237 
2238 	u64 rx_roc;
2239 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2240 			   pf->stat_offsets_loaded,
2241 			   &osd->rx_oversize, &rx_roc);
2242 
2243 	/*
2244 	 * Read from RXERR1 register to get the count for the packets
2245 	 * larger than RX MAX and include that in total rx_oversize count.
2246 	 *
2247 	 * Also need to add BIT(7) to hw->port value while indexing
2248 	 * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when
2249 	 * SR-IOV is enabled. Indexes 128..143 are for PFs.
2250 	 */
2251 	u64 rx_err1;
2252 	ixl_stat_update64(hw,
2253 			   I40E_GL_RXERR1L(hw->pf_id + BIT(7)),
2254 			   pf->stat_offsets_loaded,
2255 			   &osd->rx_err1,
2256 			   &rx_err1);
2257 
2258 	nsd->rx_oversize = rx_roc + rx_err1;
2259 
2260 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2261 			   pf->stat_offsets_loaded,
2262 			   &osd->rx_jabber, &nsd->rx_jabber);
2263 	/* EEE */
2264 	i40e_get_phy_lpi_status(hw, nsd);
2265 
2266 	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2267 			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
2268 			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
2269 
2270 	pf->stat_offsets_loaded = true;
2271 	/* End hw stats */
2272 
2273 	/* Update vsi stats */
2274 	ixl_update_vsi_stats(vsi);
2275 
2276 	for (int i = 0; i < pf->num_vfs; i++) {
2277 		vf = &pf->vfs[i];
2278 		if (vf->vf_flags & VF_FLAG_ENABLED)
2279 			ixl_update_eth_stats(&pf->vfs[i].vsi);
2280 	}
2281 }
2282 
2283 /**
2284  * Update VSI-specific ethernet statistics counters.
2285  **/
2286 void
2287 ixl_update_eth_stats(struct ixl_vsi *vsi)
2288 {
2289 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2290 	struct i40e_hw *hw = &pf->hw;
2291 	struct i40e_eth_stats *es;
2292 	struct i40e_eth_stats *oes;
2293 	u16 stat_idx = vsi->info.stat_counter_idx;
2294 
2295 	es = &vsi->eth_stats;
2296 	oes = &vsi->eth_stats_offsets;
2297 
2298 	/* Gather up the stats that the hw collects */
2299 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2300 			   vsi->stat_offsets_loaded,
2301 			   &oes->tx_errors, &es->tx_errors);
2302 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2303 			   vsi->stat_offsets_loaded,
2304 			   &oes->rx_discards, &es->rx_discards);
2305 
2306 	ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx),
2307 			   vsi->stat_offsets_loaded,
2308 			   &oes->rx_bytes, &es->rx_bytes);
2309 	ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx),
2310 			   vsi->stat_offsets_loaded,
2311 			   &oes->rx_unicast, &es->rx_unicast);
2312 	ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx),
2313 			   vsi->stat_offsets_loaded,
2314 			   &oes->rx_multicast, &es->rx_multicast);
2315 	ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx),
2316 			   vsi->stat_offsets_loaded,
2317 			   &oes->rx_broadcast, &es->rx_broadcast);
2318 
2319 	ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx),
2320 			   vsi->stat_offsets_loaded,
2321 			   &oes->tx_bytes, &es->tx_bytes);
2322 	ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx),
2323 			   vsi->stat_offsets_loaded,
2324 			   &oes->tx_unicast, &es->tx_unicast);
2325 	ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx),
2326 			   vsi->stat_offsets_loaded,
2327 			   &oes->tx_multicast, &es->tx_multicast);
2328 	ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx),
2329 			   vsi->stat_offsets_loaded,
2330 			   &oes->tx_broadcast, &es->tx_broadcast);
2331 	vsi->stat_offsets_loaded = true;
2332 }
2333 
2334 void
2335 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2336 {
2337 	struct ixl_pf		*pf;
2338 	struct i40e_eth_stats	*es;
2339 	u64			tx_discards, csum_errs;
2340 
2341 	struct i40e_hw_port_stats *nsd;
2342 
2343 	pf = vsi->back;
2344 	es = &vsi->eth_stats;
2345 	nsd = &pf->stats;
2346 
2347 	ixl_update_eth_stats(vsi);
2348 
2349 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2350 
2351 	csum_errs = 0;
2352 	for (int i = 0; i < vsi->num_rx_queues; i++)
2353 		csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2354 	nsd->checksum_error = csum_errs;
2355 
2356 	/* Update ifnet stats */
2357 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2358 	                   es->rx_multicast +
2359 			   es->rx_broadcast);
2360 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2361 	                   es->tx_multicast +
2362 			   es->tx_broadcast);
2363 	IXL_SET_IBYTES(vsi, es->rx_bytes);
2364 	IXL_SET_OBYTES(vsi, es->tx_bytes);
2365 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2366 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2367 
2368 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2369 	    nsd->checksum_error + nsd->rx_length_errors +
2370 	    nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2371 	    nsd->rx_jabber);
2372 	IXL_SET_OERRORS(vsi, es->tx_errors);
2373 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2374 	IXL_SET_OQDROPS(vsi, tx_discards);
2375 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2376 	IXL_SET_COLLISIONS(vsi, 0);
2377 }
2378 
2379 /**
2380  * Reset all of the stats for the given pf
2381  **/
2382 void
2383 ixl_pf_reset_stats(struct ixl_pf *pf)
2384 {
2385 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2386 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2387 	pf->stat_offsets_loaded = false;
2388 }
2389 
2390 /**
2391  * Resets all stats of the given vsi
2392  **/
2393 void
2394 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2395 {
2396 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2397 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2398 	vsi->stat_offsets_loaded = false;
2399 }
2400 
2401 /**
2402  * Helper function for reading and updating 48/64 bit stats from the hw
2403  *
2404  * Since the device stats are not reset at PFReset, they likely will not
2405  * be zeroed when the driver starts.  We'll save the first values read
2406  * and use them as offsets to be subtracted from the raw values in order
2407  * to report stats that count from zero.
2408  **/
2409 static void
2410 _ixl_stat_update_helper(struct i40e_hw *hw, u32 reg,
2411 	bool offset_loaded, u64 mask, u64 *offset, u64 *stat)
2412 {
2413 	u64 new_data = rd64(hw, reg);
2414 
2415 	if (!offset_loaded)
2416 		*offset = new_data;
2417 	if (new_data >= *offset)
2418 		*stat = new_data - *offset;
2419 	else
2420 		*stat = (new_data + mask) - *offset + 1;
2421 	*stat &= mask;
2422 }
2423 
2424 /**
2425  * Read and update a 48 bit stat from the hw
2426  **/
2427 void
2428 ixl_stat_update48(struct i40e_hw *hw, u32 reg,
2429 	bool offset_loaded, u64 *offset, u64 *stat)
2430 {
2431 	_ixl_stat_update_helper(hw,
2432 		reg,
2433 		offset_loaded,
2434 		0xFFFFFFFFFFFFULL,
2435 		offset,
2436 		stat);
2437 }
2438 
2439 /**
2440  * ixl_stat_update64 - read and update a 64 bit stat from the chip.
2441  **/
2442 void
2443 ixl_stat_update64(struct i40e_hw *hw, u32 reg,
2444 			       bool offset_loaded, u64 *offset, u64 *stat)
2445 {
2446 	_ixl_stat_update_helper(hw,
2447 		reg,
2448 		offset_loaded,
2449 		0xFFFFFFFFFFFFFFFFULL,
2450 		offset,
2451 		stat);
2452 }
2453 
2454 /**
2455  * Read and update a 32 bit stat from the hw
2456  **/
2457 void
2458 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2459 	bool offset_loaded, u64 *offset, u64 *stat)
2460 {
2461 	u32 new_data;
2462 
2463 	new_data = rd32(hw, reg);
2464 	if (!offset_loaded)
2465 		*offset = new_data;
2466 	if (new_data >= *offset)
2467 		*stat = (u32)(new_data - *offset);
2468 	else
2469 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2470 }
2471 
2472 /**
2473  * Add subset of device sysctls safe to use in recovery mode
2474  */
2475 void
2476 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2477 {
2478 	device_t dev = pf->dev;
2479 
2480 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2481 	struct sysctl_oid_list *ctx_list =
2482 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2483 
2484 	struct sysctl_oid *debug_node;
2485 	struct sysctl_oid_list *debug_list;
2486 
2487 	SYSCTL_ADD_PROC(ctx, ctx_list,
2488 	    OID_AUTO, "fw_version",
2489 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2490 	    ixl_sysctl_show_fw, "A", "Firmware version");
2491 
2492 	/* Add sysctls meant to print debug information, but don't list them
2493 	 * in "sysctl -a" output. */
2494 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2495 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2496 	    "Debug Sysctls");
2497 	debug_list = SYSCTL_CHILDREN(debug_node);
2498 
2499 	SYSCTL_ADD_UINT(ctx, debug_list,
2500 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2501 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2502 
2503 	SYSCTL_ADD_UINT(ctx, debug_list,
2504 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2505 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2506 
2507 	SYSCTL_ADD_PROC(ctx, debug_list,
2508 	    OID_AUTO, "dump_debug_data",
2509 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2510 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2511 
2512 	SYSCTL_ADD_PROC(ctx, debug_list,
2513 	    OID_AUTO, "do_pf_reset",
2514 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2515 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2516 
2517 	SYSCTL_ADD_PROC(ctx, debug_list,
2518 	    OID_AUTO, "do_core_reset",
2519 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2520 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2521 
2522 	SYSCTL_ADD_PROC(ctx, debug_list,
2523 	    OID_AUTO, "do_global_reset",
2524 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2525 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2526 
2527 	SYSCTL_ADD_PROC(ctx, debug_list,
2528 	    OID_AUTO, "queue_interrupt_table",
2529 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2530 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2531 }
2532 
2533 void
2534 ixl_add_device_sysctls(struct ixl_pf *pf)
2535 {
2536 	device_t dev = pf->dev;
2537 	struct i40e_hw *hw = &pf->hw;
2538 
2539 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2540 	struct sysctl_oid_list *ctx_list =
2541 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2542 
2543 	struct sysctl_oid *debug_node;
2544 	struct sysctl_oid_list *debug_list;
2545 
2546 	struct sysctl_oid *fec_node;
2547 	struct sysctl_oid_list *fec_list;
2548 	struct sysctl_oid *eee_node;
2549 	struct sysctl_oid_list *eee_list;
2550 
2551 	/* Set up sysctls */
2552 	SYSCTL_ADD_PROC(ctx, ctx_list,
2553 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2554 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2555 
2556 	SYSCTL_ADD_PROC(ctx, ctx_list,
2557 	    OID_AUTO, "advertise_speed",
2558 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2559 	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2560 
2561 	SYSCTL_ADD_PROC(ctx, ctx_list,
2562 	    OID_AUTO, "supported_speeds",
2563 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2564 	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2565 
2566 	SYSCTL_ADD_PROC(ctx, ctx_list,
2567 	    OID_AUTO, "current_speed",
2568 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2569 	    ixl_sysctl_current_speed, "A", "Current Port Speed");
2570 
2571 	SYSCTL_ADD_PROC(ctx, ctx_list,
2572 	    OID_AUTO, "fw_version",
2573 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2574 	    ixl_sysctl_show_fw, "A", "Firmware version");
2575 
2576 	SYSCTL_ADD_PROC(ctx, ctx_list,
2577 	    OID_AUTO, "unallocated_queues",
2578 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2579 	    ixl_sysctl_unallocated_queues, "I",
2580 	    "Queues not allocated to a PF or VF");
2581 
2582 	SYSCTL_ADD_PROC(ctx, ctx_list,
2583 	    OID_AUTO, "tx_itr",
2584 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2585 	    ixl_sysctl_pf_tx_itr, "I",
2586 	    "Immediately set TX ITR value for all queues");
2587 
2588 	SYSCTL_ADD_PROC(ctx, ctx_list,
2589 	    OID_AUTO, "rx_itr",
2590 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2591 	    ixl_sysctl_pf_rx_itr, "I",
2592 	    "Immediately set RX ITR value for all queues");
2593 
2594 	SYSCTL_ADD_INT(ctx, ctx_list,
2595 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2596 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2597 
2598 	SYSCTL_ADD_INT(ctx, ctx_list,
2599 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2600 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2601 
2602 	/* Add FEC sysctls for 25G adapters */
2603 	if (i40e_is_25G_device(hw->device_id)) {
2604 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2605 		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2606 		    "FEC Sysctls");
2607 		fec_list = SYSCTL_CHILDREN(fec_node);
2608 
2609 		SYSCTL_ADD_PROC(ctx, fec_list,
2610 		    OID_AUTO, "fc_ability",
2611 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2612 		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2613 
2614 		SYSCTL_ADD_PROC(ctx, fec_list,
2615 		    OID_AUTO, "rs_ability",
2616 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2617 		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2618 
2619 		SYSCTL_ADD_PROC(ctx, fec_list,
2620 		    OID_AUTO, "fc_requested",
2621 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2622 		    ixl_sysctl_fec_fc_request, "I",
2623 		    "FC FEC mode requested on link");
2624 
2625 		SYSCTL_ADD_PROC(ctx, fec_list,
2626 		    OID_AUTO, "rs_requested",
2627 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2628 		    ixl_sysctl_fec_rs_request, "I",
2629 		    "RS FEC mode requested on link");
2630 
2631 		SYSCTL_ADD_PROC(ctx, fec_list,
2632 		    OID_AUTO, "auto_fec_enabled",
2633 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2634 		    ixl_sysctl_fec_auto_enable, "I",
2635 		    "Let FW decide FEC ability/request modes");
2636 	}
2637 
2638 	SYSCTL_ADD_PROC(ctx, ctx_list,
2639 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2640 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2641 
2642 	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2643 	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2644 	    "Energy Efficient Ethernet (EEE) Sysctls");
2645 	eee_list = SYSCTL_CHILDREN(eee_node);
2646 
2647 	SYSCTL_ADD_PROC(ctx, eee_list,
2648 	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2649 	    pf, 0, ixl_sysctl_eee_enable, "I",
2650 	    "Enable Energy Efficient Ethernet (EEE)");
2651 
2652 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2653 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2654 	    "TX LPI status");
2655 
2656 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2657 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2658 	    "RX LPI status");
2659 
2660 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2661 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2662 	    "TX LPI count");
2663 
2664 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2665 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2666 	    "RX LPI count");
2667 
2668 	SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2669 	    "link_active_on_if_down",
2670 	    CTLTYPE_INT | CTLFLAG_RWTUN,
2671 	    pf, 0, ixl_sysctl_set_link_active, "I",
2672 	    IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2673 
2674 	/* Add sysctls meant to print debug information, but don't list them
2675 	 * in "sysctl -a" output. */
2676 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2677 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2678 	    "Debug Sysctls");
2679 	debug_list = SYSCTL_CHILDREN(debug_node);
2680 
2681 	SYSCTL_ADD_UINT(ctx, debug_list,
2682 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2683 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2684 
2685 	SYSCTL_ADD_UINT(ctx, debug_list,
2686 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2687 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2688 
2689 	SYSCTL_ADD_PROC(ctx, debug_list,
2690 	    OID_AUTO, "link_status",
2691 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2692 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2693 
2694 	SYSCTL_ADD_PROC(ctx, debug_list,
2695 	    OID_AUTO, "phy_abilities_init",
2696 	    CTLTYPE_STRING | CTLFLAG_RD,
2697 	    pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2698 
2699 	SYSCTL_ADD_PROC(ctx, debug_list,
2700 	    OID_AUTO, "phy_abilities",
2701 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2702 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2703 
2704 	SYSCTL_ADD_PROC(ctx, debug_list,
2705 	    OID_AUTO, "filter_list",
2706 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2707 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2708 
2709 	SYSCTL_ADD_PROC(ctx, debug_list,
2710 	    OID_AUTO, "hw_res_alloc",
2711 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2712 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2713 
2714 	SYSCTL_ADD_PROC(ctx, debug_list,
2715 	    OID_AUTO, "switch_config",
2716 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2717 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2718 
2719 	SYSCTL_ADD_PROC(ctx, debug_list,
2720 	    OID_AUTO, "switch_vlans",
2721 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2722 	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2723 
2724 	SYSCTL_ADD_PROC(ctx, debug_list,
2725 	    OID_AUTO, "rss_key",
2726 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2727 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2728 
2729 	SYSCTL_ADD_PROC(ctx, debug_list,
2730 	    OID_AUTO, "rss_lut",
2731 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2732 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2733 
2734 	SYSCTL_ADD_PROC(ctx, debug_list,
2735 	    OID_AUTO, "rss_hena",
2736 	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2737 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2738 
2739 	SYSCTL_ADD_PROC(ctx, debug_list,
2740 	    OID_AUTO, "disable_fw_link_management",
2741 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2742 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2743 
2744 	SYSCTL_ADD_PROC(ctx, debug_list,
2745 	    OID_AUTO, "dump_debug_data",
2746 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2747 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2748 
2749 	SYSCTL_ADD_PROC(ctx, debug_list,
2750 	    OID_AUTO, "do_pf_reset",
2751 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2752 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2753 
2754 	SYSCTL_ADD_PROC(ctx, debug_list,
2755 	    OID_AUTO, "do_core_reset",
2756 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2757 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2758 
2759 	SYSCTL_ADD_PROC(ctx, debug_list,
2760 	    OID_AUTO, "do_global_reset",
2761 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2762 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2763 
2764 	SYSCTL_ADD_PROC(ctx, debug_list,
2765 	    OID_AUTO, "queue_interrupt_table",
2766 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2767 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2768 
2769 	SYSCTL_ADD_PROC(ctx, debug_list,
2770 	    OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD,
2771 	    pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics");
2772 
2773 	if (pf->has_i2c) {
2774 		SYSCTL_ADD_PROC(ctx, debug_list,
2775 		    OID_AUTO, "read_i2c_byte",
2776 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2777 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2778 
2779 		SYSCTL_ADD_PROC(ctx, debug_list,
2780 		    OID_AUTO, "write_i2c_byte",
2781 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2782 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2783 
2784 		SYSCTL_ADD_PROC(ctx, debug_list,
2785 		    OID_AUTO, "read_i2c_diag_data",
2786 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2787 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2788 	}
2789 }
2790 
2791 /*
2792  * Primarily for finding out how many queues can be assigned to VFs,
2793  * at runtime.
2794  */
2795 static int
2796 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2797 {
2798 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2799 	int queues;
2800 
2801 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2802 
2803 	return sysctl_handle_int(oidp, NULL, queues, req);
2804 }
2805 
2806 static const char *
2807 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2808 {
2809 	const char * link_speed_str[] = {
2810 		"Unknown",
2811 		"100 Mbps",
2812 		"1 Gbps",
2813 		"10 Gbps",
2814 		"40 Gbps",
2815 		"20 Gbps",
2816 		"25 Gbps",
2817 		"2.5 Gbps",
2818 		"5 Gbps"
2819 	};
2820 	int index;
2821 
2822 	switch (link_speed) {
2823 	case I40E_LINK_SPEED_100MB:
2824 		index = 1;
2825 		break;
2826 	case I40E_LINK_SPEED_1GB:
2827 		index = 2;
2828 		break;
2829 	case I40E_LINK_SPEED_10GB:
2830 		index = 3;
2831 		break;
2832 	case I40E_LINK_SPEED_40GB:
2833 		index = 4;
2834 		break;
2835 	case I40E_LINK_SPEED_20GB:
2836 		index = 5;
2837 		break;
2838 	case I40E_LINK_SPEED_25GB:
2839 		index = 6;
2840 		break;
2841 	case I40E_LINK_SPEED_2_5GB:
2842 		index = 7;
2843 		break;
2844 	case I40E_LINK_SPEED_5GB:
2845 		index = 8;
2846 		break;
2847 	case I40E_LINK_SPEED_UNKNOWN:
2848 	default:
2849 		index = 0;
2850 		break;
2851 	}
2852 
2853 	return (link_speed_str[index]);
2854 }
2855 
2856 int
2857 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2858 {
2859 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2860 	struct i40e_hw *hw = &pf->hw;
2861 	int error = 0;
2862 
2863 	ixl_update_link_status(pf);
2864 
2865 	error = sysctl_handle_string(oidp,
2866 	    __DECONST(void *,
2867 		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2868 	    8, req);
2869 
2870 	return (error);
2871 }
2872 
2873 /*
2874  * Converts 8-bit speeds value to and from sysctl flags and
2875  * Admin Queue flags.
2876  */
2877 static u8
2878 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2879 {
2880 #define SPEED_MAP_SIZE 8
2881 	static u16 speedmap[SPEED_MAP_SIZE] = {
2882 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2883 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2884 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2885 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2886 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2887 		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2888 		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2889 		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2890 	};
2891 	u8 retval = 0;
2892 
2893 	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2894 		if (to_aq)
2895 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2896 		else
2897 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2898 	}
2899 
2900 	return (retval);
2901 }
2902 
2903 int
2904 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2905 {
2906 	struct i40e_hw *hw = &pf->hw;
2907 	device_t dev = pf->dev;
2908 	struct i40e_aq_get_phy_abilities_resp abilities;
2909 	struct i40e_aq_set_phy_config config;
2910 	enum i40e_status_code aq_error = 0;
2911 
2912 	/* Get current capability information */
2913 	aq_error = i40e_aq_get_phy_capabilities(hw,
2914 	    FALSE, FALSE, &abilities, NULL);
2915 	if (aq_error) {
2916 		device_printf(dev,
2917 		    "%s: Error getting phy capabilities %d,"
2918 		    " aq error: %d\n", __func__, aq_error,
2919 		    hw->aq.asq_last_status);
2920 		return (EIO);
2921 	}
2922 
2923 	/* Prepare new config */
2924 	bzero(&config, sizeof(config));
2925 	if (from_aq)
2926 		config.link_speed = speeds;
2927 	else
2928 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2929 	config.phy_type = abilities.phy_type;
2930 	config.phy_type_ext = abilities.phy_type_ext;
2931 	config.abilities = abilities.abilities
2932 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2933 	config.eee_capability = abilities.eee_capability;
2934 	config.eeer = abilities.eeer_val;
2935 	config.low_power_ctrl = abilities.d3_lpan;
2936 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2937 	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2938 
2939 	/* Do aq command & restart link */
2940 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2941 	if (aq_error) {
2942 		device_printf(dev,
2943 		    "%s: Error setting new phy config %d,"
2944 		    " aq error: %d\n", __func__, aq_error,
2945 		    hw->aq.asq_last_status);
2946 		return (EIO);
2947 	}
2948 
2949 	return (0);
2950 }
2951 
2952 /*
2953 ** Supported link speeds
2954 **	Flags:
2955 **	 0x1 - 100 Mb
2956 **	 0x2 - 1G
2957 **	 0x4 - 10G
2958 **	 0x8 - 20G
2959 **	0x10 - 25G
2960 **	0x20 - 40G
2961 **	0x40 - 2.5G
2962 **	0x80 - 5G
2963 */
2964 static int
2965 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2966 {
2967 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2968 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2969 
2970 	return sysctl_handle_int(oidp, NULL, supported, req);
2971 }
2972 
2973 /*
2974 ** Control link advertise speed:
2975 **	Flags:
2976 **	 0x1 - advertise 100 Mb
2977 **	 0x2 - advertise 1G
2978 **	 0x4 - advertise 10G
2979 **	 0x8 - advertise 20G
2980 **	0x10 - advertise 25G
2981 **	0x20 - advertise 40G
2982 **	0x40 - advertise 2.5G
2983 **	0x80 - advertise 5G
2984 **
2985 **	Set to 0 to disable link
2986 */
2987 int
2988 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2989 {
2990 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2991 	device_t dev = pf->dev;
2992 	u8 converted_speeds;
2993 	int requested_ls = 0;
2994 	int error = 0;
2995 
2996 	/* Read in new mode */
2997 	requested_ls = pf->advertised_speed;
2998 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2999 	if ((error) || (req->newptr == NULL))
3000 		return (error);
3001 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
3002 		device_printf(dev, "Interface is currently in FW recovery mode. "
3003 				"Setting advertise speed not supported\n");
3004 		return (EINVAL);
3005 	}
3006 
3007 	/* Error out if bits outside of possible flag range are set */
3008 	if ((requested_ls & ~((u8)0xFF)) != 0) {
3009 		device_printf(dev, "Input advertised speed out of range; "
3010 		    "valid flags are: 0x%02x\n",
3011 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3012 		return (EINVAL);
3013 	}
3014 
3015 	/* Check if adapter supports input value */
3016 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3017 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3018 		device_printf(dev, "Invalid advertised speed; "
3019 		    "valid flags are: 0x%02x\n",
3020 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3021 		return (EINVAL);
3022 	}
3023 
3024 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3025 	if (error)
3026 		return (error);
3027 
3028 	pf->advertised_speed = requested_ls;
3029 	ixl_update_link_status(pf);
3030 	return (0);
3031 }
3032 
3033 /*
3034  * Input: bitmap of enum i40e_aq_link_speed
3035  */
3036 u64
3037 ixl_max_aq_speed_to_value(u8 link_speeds)
3038 {
3039 	if (link_speeds & I40E_LINK_SPEED_40GB)
3040 		return IF_Gbps(40);
3041 	if (link_speeds & I40E_LINK_SPEED_25GB)
3042 		return IF_Gbps(25);
3043 	if (link_speeds & I40E_LINK_SPEED_20GB)
3044 		return IF_Gbps(20);
3045 	if (link_speeds & I40E_LINK_SPEED_10GB)
3046 		return IF_Gbps(10);
3047 	if (link_speeds & I40E_LINK_SPEED_5GB)
3048 		return IF_Gbps(5);
3049 	if (link_speeds & I40E_LINK_SPEED_2_5GB)
3050 		return IF_Mbps(2500);
3051 	if (link_speeds & I40E_LINK_SPEED_1GB)
3052 		return IF_Gbps(1);
3053 	if (link_speeds & I40E_LINK_SPEED_100MB)
3054 		return IF_Mbps(100);
3055 	else
3056 		/* Minimum supported link speed */
3057 		return IF_Mbps(100);
3058 }
3059 
3060 /*
3061 ** Get the width and transaction speed of
3062 ** the bus this adapter is plugged into.
3063 */
3064 void
3065 ixl_get_bus_info(struct ixl_pf *pf)
3066 {
3067 	struct i40e_hw *hw = &pf->hw;
3068 	device_t dev = pf->dev;
3069         u16 link;
3070         u32 offset, num_ports;
3071 	u64 max_speed;
3072 
3073 	/* Some devices don't use PCIE */
3074 	if (hw->mac.type == I40E_MAC_X722)
3075 		return;
3076 
3077         /* Read PCI Express Capabilities Link Status Register */
3078         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3079         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3080 
3081 	/* Fill out hw struct with PCIE info */
3082 	i40e_set_pci_config_data(hw, link);
3083 
3084 	/* Use info to print out bandwidth messages */
3085         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3086             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3087             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3088             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3089             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3090             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3091             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3092             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3093             ("Unknown"));
3094 
3095 	/*
3096 	 * If adapter is in slot with maximum supported speed,
3097 	 * no warning message needs to be printed out.
3098 	 */
3099 	if (hw->bus.speed >= i40e_bus_speed_8000
3100 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3101 		return;
3102 
3103 	num_ports = bitcount32(hw->func_caps.valid_functions);
3104 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3105 
3106 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3107                 device_printf(dev, "PCI-Express bandwidth available"
3108                     " for this device may be insufficient for"
3109                     " optimal performance.\n");
3110                 device_printf(dev, "Please move the device to a different"
3111 		    " PCI-e link with more lanes and/or higher"
3112 		    " transfer rate.\n");
3113         }
3114 }
3115 
3116 static int
3117 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3118 {
3119 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3120 	struct i40e_hw	*hw = &pf->hw;
3121 	struct sbuf	*sbuf;
3122 
3123 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3124 	ixl_nvm_version_str(hw, sbuf);
3125 	sbuf_finish(sbuf);
3126 	sbuf_delete(sbuf);
3127 
3128 	return (0);
3129 }
3130 
3131 void
3132 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3133 {
3134 	u8 nvma_ptr = nvma->config & 0xFF;
3135 	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3136 	const char * cmd_str;
3137 
3138 	switch (nvma->command) {
3139 	case I40E_NVM_READ:
3140 		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3141 		    nvma->offset == 0 && nvma->data_size == 1) {
3142 			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3143 			return;
3144 		}
3145 		cmd_str = "READ ";
3146 		break;
3147 	case I40E_NVM_WRITE:
3148 		cmd_str = "WRITE";
3149 		break;
3150 	default:
3151 		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3152 		return;
3153 	}
3154 	device_printf(dev,
3155 	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3156 	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3157 }
3158 
3159 int
3160 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3161 {
3162 	struct i40e_hw *hw = &pf->hw;
3163 	struct i40e_nvm_access *nvma;
3164 	device_t dev = pf->dev;
3165 	enum i40e_status_code status = 0;
3166 	size_t nvma_size, ifd_len, exp_len;
3167 	int err, perrno;
3168 
3169 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3170 
3171 	/* Sanity checks */
3172 	nvma_size = sizeof(struct i40e_nvm_access);
3173 	ifd_len = ifd->ifd_len;
3174 
3175 	if (ifd_len < nvma_size ||
3176 	    ifd->ifd_data == NULL) {
3177 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3178 		    __func__);
3179 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3180 		    __func__, ifd_len, nvma_size);
3181 		device_printf(dev, "%s: data pointer: %p\n", __func__,
3182 		    ifd->ifd_data);
3183 		return (EINVAL);
3184 	}
3185 
3186 	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3187 	err = copyin(ifd->ifd_data, nvma, ifd_len);
3188 	if (err) {
3189 		device_printf(dev, "%s: Cannot get request from user space\n",
3190 		    __func__);
3191 		free(nvma, M_IXL);
3192 		return (err);
3193 	}
3194 
3195 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3196 		ixl_print_nvm_cmd(dev, nvma);
3197 
3198 	if (IXL_PF_IS_RESETTING(pf)) {
3199 		int count = 0;
3200 		while (count++ < 100) {
3201 			i40e_msec_delay(100);
3202 			if (!(IXL_PF_IS_RESETTING(pf)))
3203 				break;
3204 		}
3205 	}
3206 
3207 	if (IXL_PF_IS_RESETTING(pf)) {
3208 		device_printf(dev,
3209 		    "%s: timeout waiting for EMP reset to finish\n",
3210 		    __func__);
3211 		free(nvma, M_IXL);
3212 		return (-EBUSY);
3213 	}
3214 
3215 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3216 		device_printf(dev,
3217 		    "%s: invalid request, data size not in supported range\n",
3218 		    __func__);
3219 		free(nvma, M_IXL);
3220 		return (EINVAL);
3221 	}
3222 
3223 	/*
3224 	 * Older versions of the NVM update tool don't set ifd_len to the size
3225 	 * of the entire buffer passed to the ioctl. Check the data_size field
3226 	 * in the contained i40e_nvm_access struct and ensure everything is
3227 	 * copied in from userspace.
3228 	 */
3229 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3230 
3231 	if (ifd_len < exp_len) {
3232 		ifd_len = exp_len;
3233 		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3234 		err = copyin(ifd->ifd_data, nvma, ifd_len);
3235 		if (err) {
3236 			device_printf(dev, "%s: Cannot get request from user space\n",
3237 					__func__);
3238 			free(nvma, M_IXL);
3239 			return (err);
3240 		}
3241 	}
3242 
3243 	// TODO: Might need a different lock here
3244 	// IXL_PF_LOCK(pf);
3245 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3246 	// IXL_PF_UNLOCK(pf);
3247 
3248 	err = copyout(nvma, ifd->ifd_data, ifd_len);
3249 	free(nvma, M_IXL);
3250 	if (err) {
3251 		device_printf(dev, "%s: Cannot return data to user space\n",
3252 				__func__);
3253 		return (err);
3254 	}
3255 
3256 	/* Let the nvmupdate report errors, show them only when debug is enabled */
3257 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3258 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3259 		    i40e_stat_str(hw, status), perrno);
3260 
3261 	/*
3262 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3263 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3264 	 */
3265 	if (perrno == -EPERM)
3266 		return (-EACCES);
3267 	else
3268 		return (perrno);
3269 }
3270 
3271 int
3272 ixl_find_i2c_interface(struct ixl_pf *pf)
3273 {
3274 	struct i40e_hw *hw = &pf->hw;
3275 	bool i2c_en, port_matched;
3276 	u32 reg;
3277 
3278 	for (int i = 0; i < 4; i++) {
3279 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3280 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3281 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3282 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3283 		    & BIT(hw->port);
3284 		if (i2c_en && port_matched)
3285 			return (i);
3286 	}
3287 
3288 	return (-1);
3289 }
3290 
3291 void
3292 ixl_set_link(struct ixl_pf *pf, bool enable)
3293 {
3294 	struct i40e_hw *hw = &pf->hw;
3295 	device_t dev = pf->dev;
3296 	struct i40e_aq_get_phy_abilities_resp abilities;
3297 	struct i40e_aq_set_phy_config config;
3298 	enum i40e_status_code aq_error = 0;
3299 	u32 phy_type, phy_type_ext;
3300 
3301 	/* Get initial capability information */
3302 	aq_error = i40e_aq_get_phy_capabilities(hw,
3303 	    FALSE, TRUE, &abilities, NULL);
3304 	if (aq_error) {
3305 		device_printf(dev,
3306 		    "%s: Error getting phy capabilities %d,"
3307 		    " aq error: %d\n", __func__, aq_error,
3308 		    hw->aq.asq_last_status);
3309 		return;
3310 	}
3311 
3312 	phy_type = abilities.phy_type;
3313 	phy_type_ext = abilities.phy_type_ext;
3314 
3315 	/* Get current capability information */
3316 	aq_error = i40e_aq_get_phy_capabilities(hw,
3317 	    FALSE, FALSE, &abilities, NULL);
3318 	if (aq_error) {
3319 		device_printf(dev,
3320 		    "%s: Error getting phy capabilities %d,"
3321 		    " aq error: %d\n", __func__, aq_error,
3322 		    hw->aq.asq_last_status);
3323 		return;
3324 	}
3325 
3326 	/* Prepare new config */
3327 	memset(&config, 0, sizeof(config));
3328 	config.link_speed = abilities.link_speed;
3329 	config.abilities = abilities.abilities;
3330 	config.eee_capability = abilities.eee_capability;
3331 	config.eeer = abilities.eeer_val;
3332 	config.low_power_ctrl = abilities.d3_lpan;
3333 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3334 	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
3335 	config.phy_type = 0;
3336 	config.phy_type_ext = 0;
3337 
3338 	config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3339 			I40E_AQ_PHY_FLAG_PAUSE_RX);
3340 
3341 	switch (pf->fc) {
3342 	case I40E_FC_FULL:
3343 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3344 			I40E_AQ_PHY_FLAG_PAUSE_RX;
3345 		break;
3346 	case I40E_FC_RX_PAUSE:
3347 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3348 		break;
3349 	case I40E_FC_TX_PAUSE:
3350 		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3351 		break;
3352 	default:
3353 		break;
3354 	}
3355 
3356 	if (enable) {
3357 		config.phy_type = phy_type;
3358 		config.phy_type_ext = phy_type_ext;
3359 
3360 	}
3361 
3362 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3363 	if (aq_error) {
3364 		device_printf(dev,
3365 		    "%s: Error setting new phy config %d,"
3366 		    " aq error: %d\n", __func__, aq_error,
3367 		    hw->aq.asq_last_status);
3368 		return;
3369 	}
3370 
3371 	aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3372 	if (aq_error) {
3373 		device_printf(dev,
3374 		    "%s: Error set link config %d,"
3375 		    " aq error: %d\n", __func__, aq_error,
3376 		    hw->aq.asq_last_status);
3377 		return;
3378 	}
3379 }
3380 
3381 static char *
3382 ixl_phy_type_string(u32 bit_pos, bool ext)
3383 {
3384 	static char * phy_types_str[32] = {
3385 		"SGMII",
3386 		"1000BASE-KX",
3387 		"10GBASE-KX4",
3388 		"10GBASE-KR",
3389 		"40GBASE-KR4",
3390 		"XAUI",
3391 		"XFI",
3392 		"SFI",
3393 		"XLAUI",
3394 		"XLPPI",
3395 		"40GBASE-CR4",
3396 		"10GBASE-CR1",
3397 		"SFP+ Active DA",
3398 		"QSFP+ Active DA",
3399 		"Reserved (14)",
3400 		"Reserved (15)",
3401 		"Reserved (16)",
3402 		"100BASE-TX",
3403 		"1000BASE-T",
3404 		"10GBASE-T",
3405 		"10GBASE-SR",
3406 		"10GBASE-LR",
3407 		"10GBASE-SFP+Cu",
3408 		"10GBASE-CR1",
3409 		"40GBASE-CR4",
3410 		"40GBASE-SR4",
3411 		"40GBASE-LR4",
3412 		"1000BASE-SX",
3413 		"1000BASE-LX",
3414 		"1000BASE-T Optical",
3415 		"20GBASE-KR2",
3416 		"Reserved (31)"
3417 	};
3418 	static char * ext_phy_types_str[8] = {
3419 		"25GBASE-KR",
3420 		"25GBASE-CR",
3421 		"25GBASE-SR",
3422 		"25GBASE-LR",
3423 		"25GBASE-AOC",
3424 		"25GBASE-ACC",
3425 		"2.5GBASE-T",
3426 		"5GBASE-T"
3427 	};
3428 
3429 	if (ext && bit_pos > 7) return "Invalid_Ext";
3430 	if (bit_pos > 31) return "Invalid";
3431 
3432 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3433 }
3434 
3435 /* TODO: ERJ: I don't this is necessary anymore. */
3436 int
3437 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3438 {
3439 	device_t dev = pf->dev;
3440 	struct i40e_hw *hw = &pf->hw;
3441 	struct i40e_aq_desc desc;
3442 	enum i40e_status_code status;
3443 
3444 	struct i40e_aqc_get_link_status *aq_link_status =
3445 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3446 
3447 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3448 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3449 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3450 	if (status) {
3451 		device_printf(dev,
3452 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3453 		    __func__, i40e_stat_str(hw, status),
3454 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3455 		return (EIO);
3456 	}
3457 
3458 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3459 	return (0);
3460 }
3461 
3462 static char *
3463 ixl_phy_type_string_ls(u8 val)
3464 {
3465 	if (val >= 0x1F)
3466 		return ixl_phy_type_string(val - 0x1F, true);
3467 	else
3468 		return ixl_phy_type_string(val, false);
3469 }
3470 
3471 static int
3472 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3473 {
3474 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3475 	device_t dev = pf->dev;
3476 	struct sbuf *buf;
3477 	int error = 0;
3478 
3479 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3480 	if (!buf) {
3481 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3482 		return (ENOMEM);
3483 	}
3484 
3485 	struct i40e_aqc_get_link_status link_status;
3486 	error = ixl_aq_get_link_status(pf, &link_status);
3487 	if (error) {
3488 		sbuf_delete(buf);
3489 		return (error);
3490 	}
3491 
3492 	sbuf_printf(buf, "\n"
3493 	    "PHY Type : 0x%02x<%s>\n"
3494 	    "Speed    : 0x%02x\n"
3495 	    "Link info: 0x%02x\n"
3496 	    "AN info  : 0x%02x\n"
3497 	    "Ext info : 0x%02x\n"
3498 	    "Loopback : 0x%02x\n"
3499 	    "Max Frame: %d\n"
3500 	    "Config   : 0x%02x\n"
3501 	    "Power    : 0x%02x",
3502 	    link_status.phy_type,
3503 	    ixl_phy_type_string_ls(link_status.phy_type),
3504 	    link_status.link_speed,
3505 	    link_status.link_info,
3506 	    link_status.an_info,
3507 	    link_status.ext_info,
3508 	    link_status.loopback,
3509 	    link_status.max_frame_size,
3510 	    link_status.config,
3511 	    link_status.power_desc);
3512 
3513 	error = sbuf_finish(buf);
3514 	if (error)
3515 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3516 
3517 	sbuf_delete(buf);
3518 	return (error);
3519 }
3520 
3521 static int
3522 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3523 {
3524 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3525 	struct i40e_hw *hw = &pf->hw;
3526 	device_t dev = pf->dev;
3527 	enum i40e_status_code status;
3528 	struct i40e_aq_get_phy_abilities_resp abilities;
3529 	struct sbuf *buf;
3530 	int error = 0;
3531 
3532 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3533 	if (!buf) {
3534 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3535 		return (ENOMEM);
3536 	}
3537 
3538 	status = i40e_aq_get_phy_capabilities(hw,
3539 	    FALSE, arg2 != 0, &abilities, NULL);
3540 	if (status) {
3541 		device_printf(dev,
3542 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3543 		    __func__, i40e_stat_str(hw, status),
3544 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3545 		sbuf_delete(buf);
3546 		return (EIO);
3547 	}
3548 
3549 	sbuf_printf(buf, "\n"
3550 	    "PHY Type : %08x",
3551 	    abilities.phy_type);
3552 
3553 	if (abilities.phy_type != 0) {
3554 		sbuf_printf(buf, "<");
3555 		for (int i = 0; i < 32; i++)
3556 			if ((1 << i) & abilities.phy_type)
3557 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3558 		sbuf_printf(buf, ">");
3559 	}
3560 
3561 	sbuf_printf(buf, "\nPHY Ext  : %02x",
3562 	    abilities.phy_type_ext);
3563 
3564 	if (abilities.phy_type_ext != 0) {
3565 		sbuf_printf(buf, "<");
3566 		for (int i = 0; i < 4; i++)
3567 			if ((1 << i) & abilities.phy_type_ext)
3568 				sbuf_printf(buf, "%s,",
3569 				    ixl_phy_type_string(i, true));
3570 		sbuf_printf(buf, ">");
3571 	}
3572 
3573 	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3574 	if (abilities.link_speed != 0) {
3575 		u8 link_speed;
3576 		sbuf_printf(buf, " <");
3577 		for (int i = 0; i < 8; i++) {
3578 			link_speed = (1 << i) & abilities.link_speed;
3579 			if (link_speed)
3580 				sbuf_printf(buf, "%s, ",
3581 				    ixl_link_speed_string(link_speed));
3582 		}
3583 		sbuf_printf(buf, ">");
3584 	}
3585 
3586 	sbuf_printf(buf, "\n"
3587 	    "Abilities: %02x\n"
3588 	    "EEE cap  : %04x\n"
3589 	    "EEER reg : %08x\n"
3590 	    "D3 Lpan  : %02x\n"
3591 	    "ID       : %02x %02x %02x %02x\n"
3592 	    "ModType  : %02x %02x %02x\n"
3593 	    "ModType E: %01x\n"
3594 	    "FEC Cfg  : %02x\n"
3595 	    "Ext CC   : %02x",
3596 	    abilities.abilities, abilities.eee_capability,
3597 	    abilities.eeer_val, abilities.d3_lpan,
3598 	    abilities.phy_id[0], abilities.phy_id[1],
3599 	    abilities.phy_id[2], abilities.phy_id[3],
3600 	    abilities.module_type[0], abilities.module_type[1],
3601 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3602 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3603 	    abilities.ext_comp_code);
3604 
3605 	error = sbuf_finish(buf);
3606 	if (error)
3607 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3608 
3609 	sbuf_delete(buf);
3610 	return (error);
3611 }
3612 
3613 static int
3614 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)
3615 {
3616 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3617 	struct i40e_hw *hw = &pf->hw;
3618 	device_t dev = pf->dev;
3619 	struct sbuf *buf;
3620 	int error = 0;
3621 
3622 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3623 	if (buf == NULL) {
3624 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3625 		return (ENOMEM);
3626 	}
3627 
3628 	if (hw->mac.type == I40E_MAC_X722) {
3629 		sbuf_printf(buf, "\n"
3630 		    "PCS Link Control Register:                          unavailable\n"
3631 		    "PCS Link Status 1:                                  unavailable\n"
3632 		    "PCS Link Status 2:                                  unavailable\n"
3633 		    "XGMII FIFO Status:                                  unavailable\n"
3634 		    "Auto-Negotiation (AN) Status:                       unavailable\n"
3635 		    "KR PCS Status:                                      unavailable\n"
3636 		    "KR FEC Status 1 – FEC Correctable Blocks Counter:   unavailable\n"
3637 		    "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable"
3638 		);
3639 	} else {
3640 		sbuf_printf(buf, "\n"
3641 		    "PCS Link Control Register:                          %#010X\n"
3642 		    "PCS Link Status 1:                                  %#010X\n"
3643 		    "PCS Link Status 2:                                  %#010X\n"
3644 		    "XGMII FIFO Status:                                  %#010X\n"
3645 		    "Auto-Negotiation (AN) Status:                       %#010X\n"
3646 		    "KR PCS Status:                                      %#010X\n"
3647 		    "KR FEC Status 1 – FEC Correctable Blocks Counter:   %#010X\n"
3648 		    "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X",
3649 		    rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL),
3650 		    rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)),
3651 		    rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2),
3652 		    rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS),
3653 		    rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS),
3654 		    rd32(hw, I40E_PRTMAC_PCS_KR_STATUS),
3655 		    rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1),
3656 		    rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2)
3657 		);
3658 	}
3659 
3660 	error = sbuf_finish(buf);
3661 	if (error)
3662 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3663 
3664 	sbuf_delete(buf);
3665 	return (error);
3666 }
3667 
3668 static int
3669 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3670 {
3671 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3672 	struct ixl_vsi *vsi = &pf->vsi;
3673 	struct ixl_mac_filter *f;
3674 	device_t dev = pf->dev;
3675 	int error = 0, ftl_len = 0, ftl_counter = 0;
3676 
3677 	struct sbuf *buf;
3678 
3679 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3680 	if (!buf) {
3681 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3682 		return (ENOMEM);
3683 	}
3684 
3685 	sbuf_printf(buf, "\n");
3686 
3687 	/* Print MAC filters */
3688 	sbuf_printf(buf, "PF Filters:\n");
3689 	LIST_FOREACH(f, &vsi->ftl, ftle)
3690 		ftl_len++;
3691 
3692 	if (ftl_len < 1)
3693 		sbuf_printf(buf, "(none)\n");
3694 	else {
3695 		LIST_FOREACH(f, &vsi->ftl, ftle) {
3696 			sbuf_printf(buf,
3697 			    MAC_FORMAT ", vlan %4d, flags %#06x",
3698 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3699 			/* don't print '\n' for last entry */
3700 			if (++ftl_counter != ftl_len)
3701 				sbuf_printf(buf, "\n");
3702 		}
3703 	}
3704 
3705 #ifdef PCI_IOV
3706 	/* TODO: Give each VF its own filter list sysctl */
3707 	struct ixl_vf *vf;
3708 	if (pf->num_vfs > 0) {
3709 		sbuf_printf(buf, "\n\n");
3710 		for (int i = 0; i < pf->num_vfs; i++) {
3711 			vf = &pf->vfs[i];
3712 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3713 				continue;
3714 
3715 			vsi = &vf->vsi;
3716 			ftl_len = 0, ftl_counter = 0;
3717 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3718 			LIST_FOREACH(f, &vsi->ftl, ftle)
3719 				ftl_len++;
3720 
3721 			if (ftl_len < 1)
3722 				sbuf_printf(buf, "(none)\n");
3723 			else {
3724 				LIST_FOREACH(f, &vsi->ftl, ftle) {
3725 					sbuf_printf(buf,
3726 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3727 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3728 				}
3729 			}
3730 		}
3731 	}
3732 #endif
3733 
3734 	error = sbuf_finish(buf);
3735 	if (error)
3736 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3737 	sbuf_delete(buf);
3738 
3739 	return (error);
3740 }
3741 
3742 #define IXL_SW_RES_SIZE 0x14
3743 int
3744 ixl_res_alloc_cmp(const void *a, const void *b)
3745 {
3746 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3747 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3748 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3749 
3750 	return ((int)one->resource_type - (int)two->resource_type);
3751 }
3752 
3753 /*
3754  * Longest string length: 25
3755  */
3756 const char *
3757 ixl_switch_res_type_string(u8 type)
3758 {
3759 	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3760 		"VEB",
3761 		"VSI",
3762 		"Perfect Match MAC address",
3763 		"S-tag",
3764 		"(Reserved)",
3765 		"Multicast hash entry",
3766 		"Unicast hash entry",
3767 		"VLAN",
3768 		"VSI List entry",
3769 		"(Reserved)",
3770 		"VLAN Statistic Pool",
3771 		"Mirror Rule",
3772 		"Queue Set",
3773 		"Inner VLAN Forward filter",
3774 		"(Reserved)",
3775 		"Inner MAC",
3776 		"IP",
3777 		"GRE/VN1 Key",
3778 		"VN2 Key",
3779 		"Tunneling Port"
3780 	};
3781 
3782 	if (type < IXL_SW_RES_SIZE)
3783 		return ixl_switch_res_type_strings[type];
3784 	else
3785 		return "(Reserved)";
3786 }
3787 
3788 static int
3789 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3790 {
3791 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3792 	struct i40e_hw *hw = &pf->hw;
3793 	device_t dev = pf->dev;
3794 	struct sbuf *buf;
3795 	enum i40e_status_code status;
3796 	int error = 0;
3797 
3798 	u8 num_entries;
3799 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3800 
3801 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3802 	if (!buf) {
3803 		device_printf(dev, "Could not allocate sbuf for output.\n");
3804 		return (ENOMEM);
3805 	}
3806 
3807 	bzero(resp, sizeof(resp));
3808 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3809 				resp,
3810 				IXL_SW_RES_SIZE,
3811 				NULL);
3812 	if (status) {
3813 		device_printf(dev,
3814 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3815 		    __func__, i40e_stat_str(hw, status),
3816 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3817 		sbuf_delete(buf);
3818 		return (error);
3819 	}
3820 
3821 	/* Sort entries by type for display */
3822 	qsort(resp, num_entries,
3823 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3824 	    &ixl_res_alloc_cmp);
3825 
3826 	sbuf_cat(buf, "\n");
3827 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3828 	sbuf_printf(buf,
3829 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3830 	    "                          | (this)     | (all) | (this) | (all)       \n");
3831 	for (int i = 0; i < num_entries; i++) {
3832 		sbuf_printf(buf,
3833 		    "%25s | %10d   %5d   %6d   %12d",
3834 		    ixl_switch_res_type_string(resp[i].resource_type),
3835 		    resp[i].guaranteed,
3836 		    resp[i].total,
3837 		    resp[i].used,
3838 		    resp[i].total_unalloced);
3839 		if (i < num_entries - 1)
3840 			sbuf_cat(buf, "\n");
3841 	}
3842 
3843 	error = sbuf_finish(buf);
3844 	if (error)
3845 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3846 
3847 	sbuf_delete(buf);
3848 	return (error);
3849 }
3850 
3851 enum ixl_sw_seid_offset {
3852 	IXL_SW_SEID_EMP = 1,
3853 	IXL_SW_SEID_MAC_START = 2,
3854 	IXL_SW_SEID_MAC_END = 5,
3855 	IXL_SW_SEID_PF_START = 16,
3856 	IXL_SW_SEID_PF_END = 31,
3857 	IXL_SW_SEID_VF_START = 32,
3858 	IXL_SW_SEID_VF_END = 159,
3859 };
3860 
3861 /*
3862  * Caller must init and delete sbuf; this function will clear and
3863  * finish it for caller.
3864  *
3865  * Note: The SEID argument only applies for elements defined by FW at
3866  * power-on; these include the EMP, Ports, PFs and VFs.
3867  */
3868 static char *
3869 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3870 {
3871 	sbuf_clear(s);
3872 
3873 	/* If SEID is in certain ranges, then we can infer the
3874 	 * mapping of SEID to switch element.
3875 	 */
3876 	if (seid == IXL_SW_SEID_EMP) {
3877 		sbuf_cat(s, "EMP");
3878 		goto out;
3879 	} else if (seid >= IXL_SW_SEID_MAC_START &&
3880 	    seid <= IXL_SW_SEID_MAC_END) {
3881 		sbuf_printf(s, "MAC  %2d",
3882 		    seid - IXL_SW_SEID_MAC_START);
3883 		goto out;
3884 	} else if (seid >= IXL_SW_SEID_PF_START &&
3885 	    seid <= IXL_SW_SEID_PF_END) {
3886 		sbuf_printf(s, "PF  %3d",
3887 		    seid - IXL_SW_SEID_PF_START);
3888 		goto out;
3889 	} else if (seid >= IXL_SW_SEID_VF_START &&
3890 	    seid <= IXL_SW_SEID_VF_END) {
3891 		sbuf_printf(s, "VF  %3d",
3892 		    seid - IXL_SW_SEID_VF_START);
3893 		goto out;
3894 	}
3895 
3896 	switch (element_type) {
3897 	case I40E_AQ_SW_ELEM_TYPE_BMC:
3898 		sbuf_cat(s, "BMC");
3899 		break;
3900 	case I40E_AQ_SW_ELEM_TYPE_PV:
3901 		sbuf_cat(s, "PV");
3902 		break;
3903 	case I40E_AQ_SW_ELEM_TYPE_VEB:
3904 		sbuf_cat(s, "VEB");
3905 		break;
3906 	case I40E_AQ_SW_ELEM_TYPE_PA:
3907 		sbuf_cat(s, "PA");
3908 		break;
3909 	case I40E_AQ_SW_ELEM_TYPE_VSI:
3910 		sbuf_printf(s, "VSI");
3911 		break;
3912 	default:
3913 		sbuf_cat(s, "?");
3914 		break;
3915 	}
3916 
3917 out:
3918 	sbuf_finish(s);
3919 	return sbuf_data(s);
3920 }
3921 
3922 static int
3923 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3924 {
3925 	const struct i40e_aqc_switch_config_element_resp *one, *two;
3926 	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3927 	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3928 
3929 	return ((int)one->seid - (int)two->seid);
3930 }
3931 
3932 static int
3933 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3934 {
3935 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3936 	struct i40e_hw *hw = &pf->hw;
3937 	device_t dev = pf->dev;
3938 	struct sbuf *buf;
3939 	struct sbuf *nmbuf;
3940 	enum i40e_status_code status;
3941 	int error = 0;
3942 	u16 next = 0;
3943 	u8 aq_buf[I40E_AQ_LARGE_BUF];
3944 
3945 	struct i40e_aqc_switch_config_element_resp *elem;
3946 	struct i40e_aqc_get_switch_config_resp *sw_config;
3947 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3948 
3949 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3950 	if (!buf) {
3951 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3952 		return (ENOMEM);
3953 	}
3954 
3955 	status = i40e_aq_get_switch_config(hw, sw_config,
3956 	    sizeof(aq_buf), &next, NULL);
3957 	if (status) {
3958 		device_printf(dev,
3959 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3960 		    __func__, i40e_stat_str(hw, status),
3961 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3962 		sbuf_delete(buf);
3963 		return error;
3964 	}
3965 	if (next)
3966 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3967 		    __func__, next);
3968 
3969 	nmbuf = sbuf_new_auto();
3970 	if (!nmbuf) {
3971 		device_printf(dev, "Could not allocate sbuf for name output.\n");
3972 		sbuf_delete(buf);
3973 		return (ENOMEM);
3974 	}
3975 
3976 	/* Sort entries by SEID for display */
3977 	qsort(sw_config->element, sw_config->header.num_reported,
3978 	    sizeof(struct i40e_aqc_switch_config_element_resp),
3979 	    &ixl_sw_cfg_elem_seid_cmp);
3980 
3981 	sbuf_cat(buf, "\n");
3982 	/* Assuming <= 255 elements in switch */
3983 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3984 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3985 	/* Exclude:
3986 	 * Revision -- all elements are revision 1 for now
3987 	 */
3988 	sbuf_printf(buf,
3989 	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3990 	    "                |                 |                 | (uplink)\n");
3991 	for (int i = 0; i < sw_config->header.num_reported; i++) {
3992 		elem = &sw_config->element[i];
3993 
3994 		// "%4d (%8s) | %8s   %8s   %#8x",
3995 		sbuf_printf(buf, "%4d", elem->seid);
3996 		sbuf_cat(buf, " ");
3997 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3998 		    elem->element_type, elem->seid));
3999 		sbuf_cat(buf, " | ");
4000 		sbuf_printf(buf, "%4d", elem->uplink_seid);
4001 		sbuf_cat(buf, " ");
4002 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4003 		    0, elem->uplink_seid));
4004 		sbuf_cat(buf, " | ");
4005 		sbuf_printf(buf, "%4d", elem->downlink_seid);
4006 		sbuf_cat(buf, " ");
4007 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4008 		    0, elem->downlink_seid));
4009 		sbuf_cat(buf, " | ");
4010 		sbuf_printf(buf, "%8d", elem->connection_type);
4011 		if (i < sw_config->header.num_reported - 1)
4012 			sbuf_cat(buf, "\n");
4013 	}
4014 	sbuf_delete(nmbuf);
4015 
4016 	error = sbuf_finish(buf);
4017 	if (error)
4018 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4019 
4020 	sbuf_delete(buf);
4021 
4022 	return (error);
4023 }
4024 
4025 static int
4026 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
4027 {
4028 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4029 	struct i40e_hw *hw = &pf->hw;
4030 	device_t dev = pf->dev;
4031 	int requested_vlan = -1;
4032 	enum i40e_status_code status = 0;
4033 	int error = 0;
4034 
4035 	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
4036 	if ((error) || (req->newptr == NULL))
4037 	    return (error);
4038 
4039 	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
4040 		device_printf(dev, "Flags disallow setting of vlans\n");
4041 		return (ENODEV);
4042 	}
4043 
4044 	hw->switch_tag = requested_vlan;
4045 	device_printf(dev,
4046 	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
4047 	    hw->switch_tag, hw->first_tag, hw->second_tag);
4048 	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
4049 	if (status) {
4050 		device_printf(dev,
4051 		    "%s: aq_set_switch_config() error %s, aq error %s\n",
4052 		    __func__, i40e_stat_str(hw, status),
4053 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4054 		return (status);
4055 	}
4056 	return (0);
4057 }
4058 
4059 static int
4060 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4061 {
4062 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4063 	struct i40e_hw *hw = &pf->hw;
4064 	device_t dev = pf->dev;
4065 	struct sbuf *buf;
4066 	int error = 0;
4067 	enum i40e_status_code status;
4068 	u32 reg;
4069 
4070 	struct i40e_aqc_get_set_rss_key_data key_data;
4071 
4072 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4073 	if (!buf) {
4074 		device_printf(dev, "Could not allocate sbuf for output.\n");
4075 		return (ENOMEM);
4076 	}
4077 
4078 	bzero(&key_data, sizeof(key_data));
4079 
4080 	sbuf_cat(buf, "\n");
4081 	if (hw->mac.type == I40E_MAC_X722) {
4082 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4083 		if (status)
4084 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4085 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4086 	} else {
4087 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4088 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4089 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4090 		}
4091 	}
4092 
4093 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4094 
4095 	error = sbuf_finish(buf);
4096 	if (error)
4097 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4098 	sbuf_delete(buf);
4099 
4100 	return (error);
4101 }
4102 
4103 static void
4104 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4105 {
4106 	int i, j, k, width;
4107 	char c;
4108 
4109 	if (length < 1 || buf == NULL) return;
4110 
4111 	int byte_stride = 16;
4112 	int lines = length / byte_stride;
4113 	int rem = length % byte_stride;
4114 	if (rem > 0)
4115 		lines++;
4116 
4117 	for (i = 0; i < lines; i++) {
4118 		width = (rem > 0 && i == lines - 1)
4119 		    ? rem : byte_stride;
4120 
4121 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4122 
4123 		for (j = 0; j < width; j++)
4124 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4125 
4126 		if (width < byte_stride) {
4127 			for (k = 0; k < (byte_stride - width); k++)
4128 				sbuf_printf(sb, "   ");
4129 		}
4130 
4131 		if (!text) {
4132 			sbuf_printf(sb, "\n");
4133 			continue;
4134 		}
4135 
4136 		for (j = 0; j < width; j++) {
4137 			c = (char)buf[i * byte_stride + j];
4138 			if (c < 32 || c > 126)
4139 				sbuf_printf(sb, ".");
4140 			else
4141 				sbuf_printf(sb, "%c", c);
4142 
4143 			if (j == width - 1)
4144 				sbuf_printf(sb, "\n");
4145 		}
4146 	}
4147 }
4148 
4149 static int
4150 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4151 {
4152 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4153 	struct i40e_hw *hw = &pf->hw;
4154 	device_t dev = pf->dev;
4155 	struct sbuf *buf;
4156 	int error = 0;
4157 	enum i40e_status_code status;
4158 	u8 hlut[512];
4159 	u32 reg;
4160 
4161 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4162 	if (!buf) {
4163 		device_printf(dev, "Could not allocate sbuf for output.\n");
4164 		return (ENOMEM);
4165 	}
4166 
4167 	bzero(hlut, sizeof(hlut));
4168 	sbuf_cat(buf, "\n");
4169 	if (hw->mac.type == I40E_MAC_X722) {
4170 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4171 		if (status)
4172 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4173 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4174 	} else {
4175 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4176 			reg = rd32(hw, I40E_PFQF_HLUT(i));
4177 			bcopy(&reg, &hlut[i << 2], 4);
4178 		}
4179 	}
4180 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4181 
4182 	error = sbuf_finish(buf);
4183 	if (error)
4184 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4185 	sbuf_delete(buf);
4186 
4187 	return (error);
4188 }
4189 
4190 static int
4191 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4192 {
4193 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4194 	struct i40e_hw *hw = &pf->hw;
4195 	u64 hena;
4196 
4197 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4198 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4199 
4200 	return sysctl_handle_long(oidp, NULL, hena, req);
4201 }
4202 
4203 /*
4204  * Sysctl to disable firmware's link management
4205  *
4206  * 1 - Disable link management on this port
4207  * 0 - Re-enable link management
4208  *
4209  * On normal NVMs, firmware manages link by default.
4210  */
4211 static int
4212 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4213 {
4214 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4215 	struct i40e_hw *hw = &pf->hw;
4216 	device_t dev = pf->dev;
4217 	int requested_mode = -1;
4218 	enum i40e_status_code status = 0;
4219 	int error = 0;
4220 
4221 	/* Read in new mode */
4222 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4223 	if ((error) || (req->newptr == NULL))
4224 		return (error);
4225 	/* Check for sane value */
4226 	if (requested_mode < 0 || requested_mode > 1) {
4227 		device_printf(dev, "Valid modes are 0 or 1\n");
4228 		return (EINVAL);
4229 	}
4230 
4231 	/* Set new mode */
4232 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4233 	if (status) {
4234 		device_printf(dev,
4235 		    "%s: Error setting new phy debug mode %s,"
4236 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4237 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4238 		return (EIO);
4239 	}
4240 
4241 	return (0);
4242 }
4243 
4244 /*
4245  * Read some diagnostic data from a (Q)SFP+ module
4246  *
4247  *             SFP A2   QSFP Lower Page
4248  * Temperature 96-97	22-23
4249  * Vcc         98-99    26-27
4250  * TX power    102-103  34-35..40-41
4251  * RX power    104-105  50-51..56-57
4252  */
4253 static int
4254 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4255 {
4256 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4257 	device_t dev = pf->dev;
4258 	struct sbuf *sbuf;
4259 	int error = 0;
4260 	u8 output;
4261 
4262 	if (req->oldptr == NULL) {
4263 		error = SYSCTL_OUT(req, 0, 128);
4264 		return (0);
4265 	}
4266 
4267 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4268 	if (error) {
4269 		device_printf(dev, "Error reading from i2c\n");
4270 		return (error);
4271 	}
4272 
4273 	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4274 	if (output == 0x3) {
4275 		/*
4276 		 * Check for:
4277 		 * - Internally calibrated data
4278 		 * - Diagnostic monitoring is implemented
4279 		 */
4280 		pf->read_i2c_byte(pf, 92, 0xA0, &output);
4281 		if (!(output & 0x60)) {
4282 			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4283 			return (0);
4284 		}
4285 
4286 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4287 
4288 		for (u8 offset = 96; offset < 100; offset++) {
4289 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4290 			sbuf_printf(sbuf, "%02X ", output);
4291 		}
4292 		for (u8 offset = 102; offset < 106; offset++) {
4293 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4294 			sbuf_printf(sbuf, "%02X ", output);
4295 		}
4296 	} else if (output == 0xD || output == 0x11) {
4297 		/*
4298 		 * QSFP+ modules are always internally calibrated, and must indicate
4299 		 * what types of diagnostic monitoring are implemented
4300 		 */
4301 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4302 
4303 		for (u8 offset = 22; offset < 24; offset++) {
4304 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4305 			sbuf_printf(sbuf, "%02X ", output);
4306 		}
4307 		for (u8 offset = 26; offset < 28; offset++) {
4308 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4309 			sbuf_printf(sbuf, "%02X ", output);
4310 		}
4311 		/* Read the data from the first lane */
4312 		for (u8 offset = 34; offset < 36; offset++) {
4313 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4314 			sbuf_printf(sbuf, "%02X ", output);
4315 		}
4316 		for (u8 offset = 50; offset < 52; offset++) {
4317 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4318 			sbuf_printf(sbuf, "%02X ", output);
4319 		}
4320 	} else {
4321 		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4322 		return (0);
4323 	}
4324 
4325 	sbuf_finish(sbuf);
4326 	sbuf_delete(sbuf);
4327 
4328 	return (0);
4329 }
4330 
4331 /*
4332  * Sysctl to read a byte from I2C bus.
4333  *
4334  * Input: 32-bit value:
4335  * 	bits 0-7:   device address (0xA0 or 0xA2)
4336  * 	bits 8-15:  offset (0-255)
4337  *	bits 16-31: unused
4338  * Output: 8-bit value read
4339  */
4340 static int
4341 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4342 {
4343 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4344 	device_t dev = pf->dev;
4345 	int input = -1, error = 0;
4346 	u8 dev_addr, offset, output;
4347 
4348 	/* Read in I2C read parameters */
4349 	error = sysctl_handle_int(oidp, &input, 0, req);
4350 	if ((error) || (req->newptr == NULL))
4351 		return (error);
4352 	/* Validate device address */
4353 	dev_addr = input & 0xFF;
4354 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4355 		return (EINVAL);
4356 	}
4357 	offset = (input >> 8) & 0xFF;
4358 
4359 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4360 	if (error)
4361 		return (error);
4362 
4363 	device_printf(dev, "%02X\n", output);
4364 	return (0);
4365 }
4366 
4367 /*
4368  * Sysctl to write a byte to the I2C bus.
4369  *
4370  * Input: 32-bit value:
4371  * 	bits 0-7:   device address (0xA0 or 0xA2)
4372  * 	bits 8-15:  offset (0-255)
4373  *	bits 16-23: value to write
4374  *	bits 24-31: unused
4375  * Output: 8-bit value written
4376  */
4377 static int
4378 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4379 {
4380 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4381 	device_t dev = pf->dev;
4382 	int input = -1, error = 0;
4383 	u8 dev_addr, offset, value;
4384 
4385 	/* Read in I2C write parameters */
4386 	error = sysctl_handle_int(oidp, &input, 0, req);
4387 	if ((error) || (req->newptr == NULL))
4388 		return (error);
4389 	/* Validate device address */
4390 	dev_addr = input & 0xFF;
4391 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4392 		return (EINVAL);
4393 	}
4394 	offset = (input >> 8) & 0xFF;
4395 	value = (input >> 16) & 0xFF;
4396 
4397 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4398 	if (error)
4399 		return (error);
4400 
4401 	device_printf(dev, "%02X written\n", value);
4402 	return (0);
4403 }
4404 
4405 static int
4406 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4407     u8 bit_pos, int *is_set)
4408 {
4409 	device_t dev = pf->dev;
4410 	struct i40e_hw *hw = &pf->hw;
4411 	enum i40e_status_code status;
4412 
4413 	if (IXL_PF_IN_RECOVERY_MODE(pf))
4414 		return (EIO);
4415 
4416 	status = i40e_aq_get_phy_capabilities(hw,
4417 	    FALSE, FALSE, abilities, NULL);
4418 	if (status) {
4419 		device_printf(dev,
4420 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4421 		    __func__, i40e_stat_str(hw, status),
4422 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4423 		return (EIO);
4424 	}
4425 
4426 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4427 	return (0);
4428 }
4429 
4430 static int
4431 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4432     u8 bit_pos, int set)
4433 {
4434 	device_t dev = pf->dev;
4435 	struct i40e_hw *hw = &pf->hw;
4436 	struct i40e_aq_set_phy_config config;
4437 	enum i40e_status_code status;
4438 
4439 	/* Set new PHY config */
4440 	memset(&config, 0, sizeof(config));
4441 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4442 	if (set)
4443 		config.fec_config |= bit_pos;
4444 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4445 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4446 		config.phy_type = abilities->phy_type;
4447 		config.phy_type_ext = abilities->phy_type_ext;
4448 		config.link_speed = abilities->link_speed;
4449 		config.eee_capability = abilities->eee_capability;
4450 		config.eeer = abilities->eeer_val;
4451 		config.low_power_ctrl = abilities->d3_lpan;
4452 		status = i40e_aq_set_phy_config(hw, &config, NULL);
4453 
4454 		if (status) {
4455 			device_printf(dev,
4456 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4457 			    __func__, i40e_stat_str(hw, status),
4458 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4459 			return (EIO);
4460 		}
4461 	}
4462 
4463 	return (0);
4464 }
4465 
4466 static int
4467 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4468 {
4469 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4470 	int mode, error = 0;
4471 
4472 	struct i40e_aq_get_phy_abilities_resp abilities;
4473 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4474 	if (error)
4475 		return (error);
4476 	/* Read in new mode */
4477 	error = sysctl_handle_int(oidp, &mode, 0, req);
4478 	if ((error) || (req->newptr == NULL))
4479 		return (error);
4480 
4481 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4482 }
4483 
4484 static int
4485 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4486 {
4487 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4488 	int mode, error = 0;
4489 
4490 	struct i40e_aq_get_phy_abilities_resp abilities;
4491 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4492 	if (error)
4493 		return (error);
4494 	/* Read in new mode */
4495 	error = sysctl_handle_int(oidp, &mode, 0, req);
4496 	if ((error) || (req->newptr == NULL))
4497 		return (error);
4498 
4499 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4500 }
4501 
4502 static int
4503 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4504 {
4505 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4506 	int mode, error = 0;
4507 
4508 	struct i40e_aq_get_phy_abilities_resp abilities;
4509 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4510 	if (error)
4511 		return (error);
4512 	/* Read in new mode */
4513 	error = sysctl_handle_int(oidp, &mode, 0, req);
4514 	if ((error) || (req->newptr == NULL))
4515 		return (error);
4516 
4517 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4518 }
4519 
4520 static int
4521 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4522 {
4523 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4524 	int mode, error = 0;
4525 
4526 	struct i40e_aq_get_phy_abilities_resp abilities;
4527 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4528 	if (error)
4529 		return (error);
4530 	/* Read in new mode */
4531 	error = sysctl_handle_int(oidp, &mode, 0, req);
4532 	if ((error) || (req->newptr == NULL))
4533 		return (error);
4534 
4535 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4536 }
4537 
4538 static int
4539 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4540 {
4541 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4542 	int mode, error = 0;
4543 
4544 	struct i40e_aq_get_phy_abilities_resp abilities;
4545 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4546 	if (error)
4547 		return (error);
4548 	/* Read in new mode */
4549 	error = sysctl_handle_int(oidp, &mode, 0, req);
4550 	if ((error) || (req->newptr == NULL))
4551 		return (error);
4552 
4553 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4554 }
4555 
4556 static int
4557 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4558 {
4559 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4560 	struct i40e_hw *hw = &pf->hw;
4561 	device_t dev = pf->dev;
4562 	struct sbuf *buf;
4563 	int error = 0;
4564 	enum i40e_status_code status;
4565 
4566 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4567 	if (!buf) {
4568 		device_printf(dev, "Could not allocate sbuf for output.\n");
4569 		return (ENOMEM);
4570 	}
4571 
4572 	u8 *final_buff;
4573 	/* This amount is only necessary if reading the entire cluster into memory */
4574 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4575 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4576 	if (final_buff == NULL) {
4577 		device_printf(dev, "Could not allocate memory for output.\n");
4578 		goto out;
4579 	}
4580 	int final_buff_len = 0;
4581 
4582 	u8 cluster_id = 1;
4583 	bool more = true;
4584 
4585 	u8 dump_buf[4096];
4586 	u16 curr_buff_size = 4096;
4587 	u8 curr_next_table = 0;
4588 	u32 curr_next_index = 0;
4589 
4590 	u16 ret_buff_size;
4591 	u8 ret_next_table;
4592 	u32 ret_next_index;
4593 
4594 	sbuf_cat(buf, "\n");
4595 
4596 	while (more) {
4597 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4598 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4599 		if (status) {
4600 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4601 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4602 			goto free_out;
4603 		}
4604 
4605 		/* copy info out of temp buffer */
4606 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4607 		final_buff_len += ret_buff_size;
4608 
4609 		if (ret_next_table != curr_next_table) {
4610 			/* We're done with the current table; we can dump out read data. */
4611 			sbuf_printf(buf, "%d:", curr_next_table);
4612 			int bytes_printed = 0;
4613 			while (bytes_printed <= final_buff_len) {
4614 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4615 				bytes_printed += 16;
4616 			}
4617 				sbuf_cat(buf, "\n");
4618 
4619 			/* The entire cluster has been read; we're finished */
4620 			if (ret_next_table == 0xFF)
4621 				break;
4622 
4623 			/* Otherwise clear the output buffer and continue reading */
4624 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4625 			final_buff_len = 0;
4626 		}
4627 
4628 		if (ret_next_index == 0xFFFFFFFF)
4629 			ret_next_index = 0;
4630 
4631 		bzero(dump_buf, sizeof(dump_buf));
4632 		curr_next_table = ret_next_table;
4633 		curr_next_index = ret_next_index;
4634 	}
4635 
4636 free_out:
4637 	free(final_buff, M_IXL);
4638 out:
4639 	error = sbuf_finish(buf);
4640 	if (error)
4641 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4642 	sbuf_delete(buf);
4643 
4644 	return (error);
4645 }
4646 
4647 static int
4648 ixl_start_fw_lldp(struct ixl_pf *pf)
4649 {
4650 	struct i40e_hw *hw = &pf->hw;
4651 	enum i40e_status_code status;
4652 
4653 	status = i40e_aq_start_lldp(hw, false, NULL);
4654 	if (status != I40E_SUCCESS) {
4655 		switch (hw->aq.asq_last_status) {
4656 		case I40E_AQ_RC_EEXIST:
4657 			device_printf(pf->dev,
4658 			    "FW LLDP agent is already running\n");
4659 			break;
4660 		case I40E_AQ_RC_EPERM:
4661 			device_printf(pf->dev,
4662 			    "Device configuration forbids SW from starting "
4663 			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4664 			    "attribute to \"Enabled\" to use this sysctl\n");
4665 			return (EINVAL);
4666 		default:
4667 			device_printf(pf->dev,
4668 			    "Starting FW LLDP agent failed: error: %s, %s\n",
4669 			    i40e_stat_str(hw, status),
4670 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4671 			return (EINVAL);
4672 		}
4673 	}
4674 
4675 	ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4676 	return (0);
4677 }
4678 
4679 static int
4680 ixl_stop_fw_lldp(struct ixl_pf *pf)
4681 {
4682 	struct i40e_hw *hw = &pf->hw;
4683 	device_t dev = pf->dev;
4684 	enum i40e_status_code status;
4685 
4686 	if (hw->func_caps.npar_enable != 0) {
4687 		device_printf(dev,
4688 		    "Disabling FW LLDP agent is not supported on this device\n");
4689 		return (EINVAL);
4690 	}
4691 
4692 	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4693 		device_printf(dev,
4694 		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4695 		return (EINVAL);
4696 	}
4697 
4698 	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4699 	if (status != I40E_SUCCESS) {
4700 		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4701 			device_printf(dev,
4702 			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4703 			    i40e_stat_str(hw, status),
4704 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4705 			return (EINVAL);
4706 		}
4707 
4708 		device_printf(dev, "FW LLDP agent is already stopped\n");
4709 	}
4710 
4711 	i40e_aq_set_dcb_parameters(hw, true, NULL);
4712 	ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4713 	return (0);
4714 }
4715 
4716 static int
4717 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4718 {
4719 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4720 	int state, new_state, error = 0;
4721 
4722 	state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4723 
4724 	/* Read in new mode */
4725 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4726 	if ((error) || (req->newptr == NULL))
4727 		return (error);
4728 
4729 	/* Already in requested state */
4730 	if (new_state == state)
4731 		return (error);
4732 
4733 	if (new_state == 0)
4734 		return ixl_stop_fw_lldp(pf);
4735 
4736 	return ixl_start_fw_lldp(pf);
4737 }
4738 
4739 static int
4740 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4741 {
4742 	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4743 	int                   state, new_state;
4744 	int                   sysctl_handle_status = 0;
4745 	enum i40e_status_code cmd_status;
4746 
4747 	/* Init states' values */
4748 	state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4749 
4750 	/* Get requested mode */
4751 	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4752 	if ((sysctl_handle_status) || (req->newptr == NULL))
4753 		return (sysctl_handle_status);
4754 
4755 	/* Check if state has changed */
4756 	if (new_state == state)
4757 		return (0);
4758 
4759 	/* Set new state */
4760 	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4761 
4762 	/* Save new state or report error */
4763 	if (!cmd_status) {
4764 		if (new_state == 0)
4765 			ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4766 		else
4767 			ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4768 	} else if (cmd_status == I40E_ERR_CONFIG)
4769 		return (EPERM);
4770 	else
4771 		return (EIO);
4772 
4773 	return (0);
4774 }
4775 
4776 static int
4777 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4778 {
4779 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4780 	int error, state;
4781 
4782 	state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4783 
4784 	error = sysctl_handle_int(oidp, &state, 0, req);
4785 	if ((error) || (req->newptr == NULL))
4786 		return (error);
4787 
4788 	if (state == 0)
4789 		ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4790 	else
4791 		ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4792 
4793 	return (0);
4794 }
4795 
4796 
4797 int
4798 ixl_attach_get_link_status(struct ixl_pf *pf)
4799 {
4800 	struct i40e_hw *hw = &pf->hw;
4801 	device_t dev = pf->dev;
4802 	enum i40e_status_code status;
4803 
4804 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4805 	    (hw->aq.fw_maj_ver < 4)) {
4806 		i40e_msec_delay(75);
4807 		status = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4808 		if (status != I40E_SUCCESS) {
4809 			device_printf(dev,
4810 			    "%s link restart failed status: %s, aq_err=%s\n",
4811 			    __func__, i40e_stat_str(hw, status),
4812 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4813 			return (EINVAL);
4814 		}
4815 	}
4816 
4817 	/* Determine link state */
4818 	hw->phy.get_link_info = TRUE;
4819 	status = i40e_get_link_status(hw, &pf->link_up);
4820 	if (status != I40E_SUCCESS) {
4821 		device_printf(dev,
4822 		    "%s get link status, status: %s aq_err=%s\n",
4823 		    __func__, i40e_stat_str(hw, status),
4824 		    i40e_aq_str(hw, hw->aq.asq_last_status));
4825 		/*
4826 		 * Most probably FW has not finished configuring PHY.
4827 		 * Retry periodically in a timer callback.
4828 		 */
4829 		ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
4830 		pf->link_poll_start = getsbinuptime();
4831 		return (EAGAIN);
4832 	}
4833  	ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up);
4834 
4835 	/* Flow Control mode not set by user, read current FW settings */
4836 	if (pf->fc == -1)
4837 		pf->fc = hw->fc.current_mode;
4838 
4839 	return (0);
4840 }
4841 
4842 static int
4843 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4844 {
4845 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4846 	int requested = 0, error = 0;
4847 
4848 	/* Read in new mode */
4849 	error = sysctl_handle_int(oidp, &requested, 0, req);
4850 	if ((error) || (req->newptr == NULL))
4851 		return (error);
4852 
4853 	/* Initiate the PF reset later in the admin task */
4854 	ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4855 
4856 	return (error);
4857 }
4858 
4859 static int
4860 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4861 {
4862 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4863 	struct i40e_hw *hw = &pf->hw;
4864 	int requested = 0, error = 0;
4865 
4866 	/* Read in new mode */
4867 	error = sysctl_handle_int(oidp, &requested, 0, req);
4868 	if ((error) || (req->newptr == NULL))
4869 		return (error);
4870 
4871 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4872 
4873 	return (error);
4874 }
4875 
4876 static int
4877 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4878 {
4879 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4880 	struct i40e_hw *hw = &pf->hw;
4881 	int requested = 0, error = 0;
4882 
4883 	/* Read in new mode */
4884 	error = sysctl_handle_int(oidp, &requested, 0, req);
4885 	if ((error) || (req->newptr == NULL))
4886 		return (error);
4887 
4888 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4889 
4890 	return (error);
4891 }
4892 
4893 /*
4894  * Print out mapping of TX queue indexes and Rx queue indexes
4895  * to MSI-X vectors.
4896  */
4897 static int
4898 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4899 {
4900 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4901 	struct ixl_vsi *vsi = &pf->vsi;
4902 	device_t dev = pf->dev;
4903 	struct sbuf *buf;
4904 	int error = 0;
4905 
4906 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4907 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4908 
4909 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4910 	if (!buf) {
4911 		device_printf(dev, "Could not allocate sbuf for output.\n");
4912 		return (ENOMEM);
4913 	}
4914 
4915 	sbuf_cat(buf, "\n");
4916 	for (int i = 0; i < vsi->num_rx_queues; i++) {
4917 		rx_que = &vsi->rx_queues[i];
4918 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4919 	}
4920 	for (int i = 0; i < vsi->num_tx_queues; i++) {
4921 		tx_que = &vsi->tx_queues[i];
4922 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4923 	}
4924 
4925 	error = sbuf_finish(buf);
4926 	if (error)
4927 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4928 	sbuf_delete(buf);
4929 
4930 	return (error);
4931 }
4932