xref: /freebsd/sys/dev/ixl/ixl_pf_main.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "ixl_pf.h"
37 
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41 
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46 
47 static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int	ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int	ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54 
55 /* Sysctls */
56 static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63 
64 static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65 
66 /* Debug Sysctls */
67 static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
73 static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
74 static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
75 static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
76 static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
77 static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
80 static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
81 static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
82 static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
83 static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
85 static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
86 static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
87 
88 /* Debug Sysctls */
89 static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
90 static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
91 static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
92 static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
93 #ifdef IXL_DEBUG
94 static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
95 static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
96 #endif
97 
98 #ifdef IXL_IW
99 extern int ixl_enable_iwarp;
100 extern int ixl_limit_iwarp_msix;
101 #endif
102 
103 static const char * const ixl_fc_string[6] = {
104 	"None",
105 	"Rx",
106 	"Tx",
107 	"Full",
108 	"Priority",
109 	"Default"
110 };
111 
112 static char *ixl_fec_string[3] = {
113        "CL108 RS-FEC",
114        "CL74 FC-FEC/BASE-R",
115        "None"
116 };
117 
118 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
119 
120 /*
121 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
122 */
123 void
124 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
125 {
126 	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
127 	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
128 	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
129 
130 	sbuf_printf(buf,
131 	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
132 	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
133 	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
134 	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
135 	    IXL_NVM_VERSION_HI_SHIFT,
136 	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
137 	    IXL_NVM_VERSION_LO_SHIFT,
138 	    hw->nvm.eetrack,
139 	    oem_ver, oem_build, oem_patch);
140 }
141 
142 void
143 ixl_print_nvm_version(struct ixl_pf *pf)
144 {
145 	struct i40e_hw *hw = &pf->hw;
146 	device_t dev = pf->dev;
147 	struct sbuf *sbuf;
148 
149 	sbuf = sbuf_new_auto();
150 	ixl_nvm_version_str(hw, sbuf);
151 	sbuf_finish(sbuf);
152 	device_printf(dev, "%s\n", sbuf_data(sbuf));
153 	sbuf_delete(sbuf);
154 }
155 
156 /**
157  * ixl_get_fw_mode - Check the state of FW
158  * @hw: device hardware structure
159  *
160  * Identify state of FW. It might be in a recovery mode
161  * which limits functionality and requires special handling
162  * from the driver.
163  *
164  * @returns FW mode (normal, recovery, unexpected EMP reset)
165  */
166 static enum ixl_fw_mode
167 ixl_get_fw_mode(struct ixl_pf *pf)
168 {
169 	struct i40e_hw *hw = &pf->hw;
170 	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
171 	u32 fwsts;
172 
173 #ifdef IXL_DEBUG
174 	if (pf->recovery_mode)
175 		return IXL_FW_MODE_RECOVERY;
176 #endif
177 	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
178 
179 	/* Is set and has one of expected values */
180 	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
181 	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
182 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
183 	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
184 		fw_mode = IXL_FW_MODE_RECOVERY;
185 	else {
186 		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
187 		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
188 			fw_mode = IXL_FW_MODE_UEMPR;
189 	}
190 	return (fw_mode);
191 }
192 
193 /**
194  * ixl_pf_reset - Reset the PF
195  * @pf: PF structure
196  *
197  * Ensure that FW is in the right state and do the reset
198  * if needed.
199  *
200  * @returns zero on success, or an error code on failure.
201  */
202 int
203 ixl_pf_reset(struct ixl_pf *pf)
204 {
205 	struct i40e_hw *hw = &pf->hw;
206 	enum i40e_status_code status;
207 	enum ixl_fw_mode fw_mode;
208 
209 	fw_mode = ixl_get_fw_mode(pf);
210 	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
211 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
212 		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
213 		/* Don't try to reset device if it's in recovery mode */
214 		return (0);
215 	}
216 
217 	status = i40e_pf_reset(hw);
218 	if (status == I40E_SUCCESS)
219 		return (0);
220 
221 	/* Check FW mode again in case it has changed while
222 	 * waiting for reset to complete */
223 	fw_mode = ixl_get_fw_mode(pf);
224 	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
225 	if (fw_mode == IXL_FW_MODE_RECOVERY) {
226 		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
227 		return (0);
228 	}
229 
230 	if (fw_mode == IXL_FW_MODE_UEMPR)
231 		device_printf(pf->dev,
232 		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
233 	else
234 		device_printf(pf->dev, "PF reset failure %s\n",
235 		    i40e_stat_str(hw, status));
236 	return (EIO);
237 }
238 
239 /**
240  * ixl_setup_hmc - Setup LAN Host Memory Cache
241  * @pf: PF structure
242  *
243  * Init and configure LAN Host Memory Cache
244  *
245  * @returns 0 on success, EIO on error
246  */
247 int
248 ixl_setup_hmc(struct ixl_pf *pf)
249 {
250 	struct i40e_hw *hw = &pf->hw;
251 	enum i40e_status_code status;
252 
253 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
254 	    hw->func_caps.num_rx_qp, 0, 0);
255 	if (status) {
256 		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
257 		    i40e_stat_str(hw, status));
258 		return (EIO);
259 	}
260 
261 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
262 	if (status) {
263 		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
264 		    i40e_stat_str(hw, status));
265 		return (EIO);
266 	}
267 
268 	return (0);
269 }
270 
271 /**
272  * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
273  * @pf: PF structure
274  *
275  * Shutdown Host Memory Cache if configured.
276  *
277  */
278 void
279 ixl_shutdown_hmc(struct ixl_pf *pf)
280 {
281 	struct i40e_hw *hw = &pf->hw;
282 	enum i40e_status_code status;
283 
284 	/* HMC not configured, no need to shutdown */
285 	if (hw->hmc.hmc_obj == NULL)
286 		return;
287 
288 	status = i40e_shutdown_lan_hmc(hw);
289 	if (status)
290 		device_printf(pf->dev,
291 		    "Shutdown LAN HMC failed with code %s\n",
292 		    i40e_stat_str(hw, status));
293 }
294 /*
295  * Write PF ITR values to queue ITR registers.
296  */
297 void
298 ixl_configure_itr(struct ixl_pf *pf)
299 {
300 	ixl_configure_tx_itr(pf);
301 	ixl_configure_rx_itr(pf);
302 }
303 
304 /*********************************************************************
305  *
306  *  Get the hardware capabilities
307  *
308  **********************************************************************/
309 
310 int
311 ixl_get_hw_capabilities(struct ixl_pf *pf)
312 {
313 	struct i40e_aqc_list_capabilities_element_resp *buf;
314 	struct i40e_hw	*hw = &pf->hw;
315 	device_t 	dev = pf->dev;
316 	enum i40e_status_code status;
317 	int len, i2c_intfc_num;
318 	bool again = TRUE;
319 	u16 needed;
320 
321 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
322 		hw->func_caps.iwarp = 0;
323 		return (0);
324 	}
325 
326 	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
327 retry:
328 	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
329 	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
330 		device_printf(dev, "Unable to allocate cap memory\n");
331                 return (ENOMEM);
332 	}
333 
334 	/* This populates the hw struct */
335         status = i40e_aq_discover_capabilities(hw, buf, len,
336 	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
337 	free(buf, M_DEVBUF);
338 	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
339 	    (again == TRUE)) {
340 		/* retry once with a larger buffer */
341 		again = FALSE;
342 		len = needed;
343 		goto retry;
344 	} else if (status != I40E_SUCCESS) {
345 		device_printf(dev, "capability discovery failed; status %s, error %s\n",
346 		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
347 		return (ENODEV);
348 	}
349 
350 	/*
351 	 * Some devices have both MDIO and I2C; since this isn't reported
352 	 * by the FW, check registers to see if an I2C interface exists.
353 	 */
354 	i2c_intfc_num = ixl_find_i2c_interface(pf);
355 	if (i2c_intfc_num != -1)
356 		pf->has_i2c = true;
357 
358 	/* Determine functions to use for driver I2C accesses */
359 	switch (pf->i2c_access_method) {
360 	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
361 		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
362 			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
363 			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
364 		} else {
365 			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
366 			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
367 		}
368 		break;
369 	}
370 	case IXL_I2C_ACCESS_METHOD_AQ:
371 		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
372 		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
373 		break;
374 	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
375 		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
376 		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
377 		break;
378 	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
379 		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
380 		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
381 		break;
382 	default:
383 		/* Should not happen */
384 		device_printf(dev, "Error setting I2C access functions\n");
385 		break;
386 	}
387 
388 	/* Print a subset of the capability information. */
389 	device_printf(dev,
390 	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
391 	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
392 	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
393 	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
394 	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
395 	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
396 	    "MDIO shared");
397 
398 	return (0);
399 }
400 
401 /* For the set_advertise sysctl */
402 void
403 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
404 {
405 	device_t dev = pf->dev;
406 	int err;
407 
408 	/* Make sure to initialize the device to the complete list of
409 	 * supported speeds on driver load, to ensure unloading and
410 	 * reloading the driver will restore this value.
411 	 */
412 	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
413 	if (err) {
414 		/* Non-fatal error */
415 		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
416 			      __func__, err);
417 		return;
418 	}
419 
420 	pf->advertised_speed =
421 	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
422 }
423 
424 int
425 ixl_teardown_hw_structs(struct ixl_pf *pf)
426 {
427 	enum i40e_status_code status = 0;
428 	struct i40e_hw *hw = &pf->hw;
429 	device_t dev = pf->dev;
430 
431 	/* Shutdown LAN HMC */
432 	if (hw->hmc.hmc_obj) {
433 		status = i40e_shutdown_lan_hmc(hw);
434 		if (status) {
435 			device_printf(dev,
436 			    "init: LAN HMC shutdown failure; status %s\n",
437 			    i40e_stat_str(hw, status));
438 			goto err_out;
439 		}
440 	}
441 
442 	/* Shutdown admin queue */
443 	ixl_disable_intr0(hw);
444 	status = i40e_shutdown_adminq(hw);
445 	if (status)
446 		device_printf(dev,
447 		    "init: Admin Queue shutdown failure; status %s\n",
448 		    i40e_stat_str(hw, status));
449 
450 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
451 err_out:
452 	return (status);
453 }
454 
455 static u_int
456 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
457 {
458 	struct ixl_vsi *vsi = arg;
459 
460 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
461 
462 	return (1);
463 }
464 
465 /*********************************************************************
466  * 	Filter Routines
467  *
468  *	Routines for multicast and vlan filter management.
469  *
470  *********************************************************************/
471 void
472 ixl_add_multi(struct ixl_vsi *vsi)
473 {
474 	struct ifnet		*ifp = vsi->ifp;
475 	struct i40e_hw		*hw = vsi->hw;
476 	int			mcnt = 0, flags;
477 
478 	IOCTL_DEBUGOUT("ixl_add_multi: begin");
479 
480 	/*
481 	** First just get a count, to decide if we
482 	** we simply use multicast promiscuous.
483 	*/
484 	mcnt = if_llmaddr_count(ifp);
485 	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
486 		/* delete existing MC filters */
487 		ixl_del_hw_filters(vsi, mcnt);
488 		i40e_aq_set_vsi_multicast_promiscuous(hw,
489 		    vsi->seid, TRUE, NULL);
490 		return;
491 	}
492 
493 	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
494 	if (mcnt > 0) {
495 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
496 		ixl_add_hw_filters(vsi, flags, mcnt);
497 	}
498 
499 	IOCTL_DEBUGOUT("ixl_add_multi: end");
500 }
501 
502 static u_int
503 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
504 {
505 	struct ixl_mac_filter *f = arg;
506 
507 	if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
508 		return (1);
509 	else
510 		return (0);
511 }
512 
513 int
514 ixl_del_multi(struct ixl_vsi *vsi)
515 {
516 	struct ifnet		*ifp = vsi->ifp;
517 	struct ixl_mac_filter	*f;
518 	int			mcnt = 0;
519 
520 	IOCTL_DEBUGOUT("ixl_del_multi: begin");
521 
522 	/* Search for removed multicast addresses */
523 	SLIST_FOREACH(f, &vsi->ftl, next)
524 		if ((f->flags & IXL_FILTER_USED) &&
525 		    (f->flags & IXL_FILTER_MC) &&
526 		    (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
527 			f->flags |= IXL_FILTER_DEL;
528 			mcnt++;
529 		}
530 
531 	if (mcnt > 0)
532 		ixl_del_hw_filters(vsi, mcnt);
533 
534 	return (mcnt);
535 }
536 
537 void
538 ixl_link_up_msg(struct ixl_pf *pf)
539 {
540 	struct i40e_hw *hw = &pf->hw;
541 	struct ifnet *ifp = pf->vsi.ifp;
542 	char *req_fec_string, *neg_fec_string;
543 	u8 fec_abilities;
544 
545 	fec_abilities = hw->phy.link_info.req_fec_info;
546 	/* If both RS and KR are requested, only show RS */
547 	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
548 		req_fec_string = ixl_fec_string[0];
549 	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
550 		req_fec_string = ixl_fec_string[1];
551 	else
552 		req_fec_string = ixl_fec_string[2];
553 
554 	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
555 		neg_fec_string = ixl_fec_string[0];
556 	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
557 		neg_fec_string = ixl_fec_string[1];
558 	else
559 		neg_fec_string = ixl_fec_string[2];
560 
561 	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
562 	    ifp->if_xname,
563 	    ixl_link_speed_string(hw->phy.link_info.link_speed),
564 	    req_fec_string, neg_fec_string,
565 	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
566 	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
567 	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
568 		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
569 		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
570 		ixl_fc_string[1] : ixl_fc_string[0]);
571 }
572 
573 /*
574  * Configure admin queue/misc interrupt cause registers in hardware.
575  */
576 void
577 ixl_configure_intr0_msix(struct ixl_pf *pf)
578 {
579 	struct i40e_hw *hw = &pf->hw;
580 	u32 reg;
581 
582 	/* First set up the adminq - vector 0 */
583 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
584 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
585 
586 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
587 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
588 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
589 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
590 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
591 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
592 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
593 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
594 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
595 
596 	/*
597 	 * 0x7FF is the end of the queue list.
598 	 * This means we won't use MSI-X vector 0 for a queue interrupt
599 	 * in MSI-X mode.
600 	 */
601 	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
602 	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
603 	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
604 
605 	wr32(hw, I40E_PFINT_DYN_CTL0,
606 	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
607 	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
608 
609 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
610 }
611 
612 void
613 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
614 {
615 	/* Display supported media types */
616 	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
617 		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
618 
619 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
620 		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
621 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
622 		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
623 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
624 		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
625 
626 	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
627 		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
628 
629 	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
630 		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
631 
632 	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
633 	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
634 	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
635 		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
636 
637 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
638 		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
639 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
640 		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
641 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
642 		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
643 
644 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
645 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
646 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
647 	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
648 	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
649 		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
650 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
651 		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
652 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
653 		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
654 
655 	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
656 		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
657 
658 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
659 	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
660 		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
661 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
662 		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
663 	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
664 		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
665 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
666 		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
667 	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
668 		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
669 
670 	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
671 		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
672 
673 	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
674 		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
675 	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
676 		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
677 
678 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
679 		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
680 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
681 		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
682 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
683 		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
684 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
685 		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
686 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
687 		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
688 	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
689 		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
690 }
691 
692 /*********************************************************************
693  *
694  *  Get Firmware Switch configuration
695  *	- this will need to be more robust when more complex
696  *	  switch configurations are enabled.
697  *
698  **********************************************************************/
699 int
700 ixl_switch_config(struct ixl_pf *pf)
701 {
702 	struct i40e_hw	*hw = &pf->hw;
703 	struct ixl_vsi	*vsi = &pf->vsi;
704 	device_t 	dev = iflib_get_dev(vsi->ctx);
705 	struct i40e_aqc_get_switch_config_resp *sw_config;
706 	u8	aq_buf[I40E_AQ_LARGE_BUF];
707 	int	ret;
708 	u16	next = 0;
709 
710 	memset(&aq_buf, 0, sizeof(aq_buf));
711 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
712 	ret = i40e_aq_get_switch_config(hw, sw_config,
713 	    sizeof(aq_buf), &next, NULL);
714 	if (ret) {
715 		device_printf(dev, "aq_get_switch_config() failed, error %d,"
716 		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
717 		return (ret);
718 	}
719 	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
720 		device_printf(dev,
721 		    "Switch config: header reported: %d in structure, %d total\n",
722 		    LE16_TO_CPU(sw_config->header.num_reported),
723 		    LE16_TO_CPU(sw_config->header.num_total));
724 		for (int i = 0;
725 		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
726 			device_printf(dev,
727 			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
728 			    sw_config->element[i].element_type,
729 			    LE16_TO_CPU(sw_config->element[i].seid),
730 			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
731 			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
732 		}
733 	}
734 	/* Simplified due to a single VSI */
735 	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
736 	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
737 	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
738 	return (ret);
739 }
740 
741 void
742 ixl_free_mac_filters(struct ixl_vsi *vsi)
743 {
744 	struct ixl_mac_filter *f;
745 
746 	while (!SLIST_EMPTY(&vsi->ftl)) {
747 		f = SLIST_FIRST(&vsi->ftl);
748 		SLIST_REMOVE_HEAD(&vsi->ftl, next);
749 		free(f, M_DEVBUF);
750 	}
751 
752 	vsi->num_hw_filters = 0;
753 }
754 
755 void
756 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
757 {
758 	struct sysctl_oid *tree;
759 	struct sysctl_oid_list *child;
760 	struct sysctl_oid_list *vsi_list;
761 
762 	tree = device_get_sysctl_tree(vsi->dev);
763 	child = SYSCTL_CHILDREN(tree);
764 	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
765 			CTLFLAG_RD, NULL, "VSI Number");
766 
767 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
768 	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
769 
770 	if (queues_sysctls)
771 		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
772 }
773 
774 /*
775  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
776  * Writes to the ITR registers immediately.
777  */
778 static int
779 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
780 {
781 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
782 	device_t dev = pf->dev;
783 	int error = 0;
784 	int requested_tx_itr;
785 
786 	requested_tx_itr = pf->tx_itr;
787 	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
788 	if ((error) || (req->newptr == NULL))
789 		return (error);
790 	if (pf->dynamic_tx_itr) {
791 		device_printf(dev,
792 		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
793 		    return (EINVAL);
794 	}
795 	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
796 		device_printf(dev,
797 		    "Invalid TX itr value; value must be between 0 and %d\n",
798 		        IXL_MAX_ITR);
799 		return (EINVAL);
800 	}
801 
802 	pf->tx_itr = requested_tx_itr;
803 	ixl_configure_tx_itr(pf);
804 
805 	return (error);
806 }
807 
808 /*
809  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
810  * Writes to the ITR registers immediately.
811  */
812 static int
813 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
814 {
815 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
816 	device_t dev = pf->dev;
817 	int error = 0;
818 	int requested_rx_itr;
819 
820 	requested_rx_itr = pf->rx_itr;
821 	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
822 	if ((error) || (req->newptr == NULL))
823 		return (error);
824 	if (pf->dynamic_rx_itr) {
825 		device_printf(dev,
826 		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
827 		    return (EINVAL);
828 	}
829 	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
830 		device_printf(dev,
831 		    "Invalid RX itr value; value must be between 0 and %d\n",
832 		        IXL_MAX_ITR);
833 		return (EINVAL);
834 	}
835 
836 	pf->rx_itr = requested_rx_itr;
837 	ixl_configure_rx_itr(pf);
838 
839 	return (error);
840 }
841 
842 void
843 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
844 	struct sysctl_oid_list *child,
845 	struct i40e_hw_port_stats *stats)
846 {
847 	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
848 	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
849 	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
850 
851 	struct i40e_eth_stats *eth_stats = &stats->eth;
852 	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
853 
854 	struct ixl_sysctl_info ctls[] =
855 	{
856 		{&stats->crc_errors, "crc_errors", "CRC Errors"},
857 		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
858 		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
859 		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
860 		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
861 		/* Packet Reception Stats */
862 		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
863 		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
864 		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
865 		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
866 		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
867 		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
868 		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
869 		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
870 		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
871 		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
872 		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
873 		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
874 		/* Packet Transmission Stats */
875 		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
876 		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
877 		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
878 		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
879 		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
880 		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
881 		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
882 		/* Flow control */
883 		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
884 		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
885 		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
886 		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
887 		/* End */
888 		{0,0,0}
889 	};
890 
891 	struct ixl_sysctl_info *entry = ctls;
892 	while (entry->stat != 0)
893 	{
894 		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
895 				CTLFLAG_RD, entry->stat,
896 				entry->description);
897 		entry++;
898 	}
899 }
900 
901 void
902 ixl_set_rss_key(struct ixl_pf *pf)
903 {
904 	struct i40e_hw *hw = &pf->hw;
905 	struct ixl_vsi *vsi = &pf->vsi;
906 	device_t	dev = pf->dev;
907 	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
908 	enum i40e_status_code status;
909 
910 #ifdef RSS
911         /* Fetch the configured RSS key */
912         rss_getkey((uint8_t *) &rss_seed);
913 #else
914 	ixl_get_default_rss_key(rss_seed);
915 #endif
916 	/* Fill out hash function seed */
917 	if (hw->mac.type == I40E_MAC_X722) {
918 		struct i40e_aqc_get_set_rss_key_data key_data;
919 		bcopy(rss_seed, &key_data, 52);
920 		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
921 		if (status)
922 			device_printf(dev,
923 			    "i40e_aq_set_rss_key status %s, error %s\n",
924 			    i40e_stat_str(hw, status),
925 			    i40e_aq_str(hw, hw->aq.asq_last_status));
926 	} else {
927 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
928 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
929 	}
930 }
931 
932 /*
933  * Configure enabled PCTYPES for RSS.
934  */
935 void
936 ixl_set_rss_pctypes(struct ixl_pf *pf)
937 {
938 	struct i40e_hw *hw = &pf->hw;
939 	u64		set_hena = 0, hena;
940 
941 #ifdef RSS
942 	u32		rss_hash_config;
943 
944 	rss_hash_config = rss_gethashconfig();
945 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
946                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
947 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
948                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
949 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
950                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
951 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
952                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
953 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
954 		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
955 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
956                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
957         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
958                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
959 #else
960 	if (hw->mac.type == I40E_MAC_X722)
961 		set_hena = IXL_DEFAULT_RSS_HENA_X722;
962 	else
963 		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
964 #endif
965 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
966 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
967 	hena |= set_hena;
968 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
969 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
970 
971 }
972 
973 /*
974 ** Setup the PF's RSS parameters.
975 */
976 void
977 ixl_config_rss(struct ixl_pf *pf)
978 {
979 	ixl_set_rss_key(pf);
980 	ixl_set_rss_pctypes(pf);
981 	ixl_set_rss_hlut(pf);
982 }
983 
984 /*
985  * In some firmware versions there is default MAC/VLAN filter
986  * configured which interferes with filters managed by driver.
987  * Make sure it's removed.
988  */
989 void
990 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
991 {
992 	struct i40e_aqc_remove_macvlan_element_data e;
993 
994 	bzero(&e, sizeof(e));
995 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
996 	e.vlan_tag = 0;
997 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
998 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
999 
1000 	bzero(&e, sizeof(e));
1001 	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1002 	e.vlan_tag = 0;
1003 	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1004 		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1005 	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1006 }
1007 
1008 /*
1009 ** Initialize filter list and add filters that the hardware
1010 ** needs to know about.
1011 **
1012 ** Requires VSI's seid to be set before calling.
1013 */
1014 void
1015 ixl_init_filters(struct ixl_vsi *vsi)
1016 {
1017 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1018 
1019 	ixl_dbg_filter(pf, "%s: start\n", __func__);
1020 
1021 	/* Initialize mac filter list for VSI */
1022 	SLIST_INIT(&vsi->ftl);
1023 	vsi->num_hw_filters = 0;
1024 
1025 	/* Receive broadcast Ethernet frames */
1026 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1027 
1028 	if (IXL_VSI_IS_VF(vsi))
1029 		return;
1030 
1031 	ixl_del_default_hw_filters(vsi);
1032 
1033 	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1034 
1035 	/*
1036 	 * Prevent Tx flow control frames from being sent out by
1037 	 * non-firmware transmitters.
1038 	 * This affects every VSI in the PF.
1039 	 */
1040 #ifndef IXL_DEBUG_FC
1041 	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1042 #else
1043 	if (pf->enable_tx_fc_filter)
1044 		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1045 #endif
1046 }
1047 
1048 /*
1049 ** This routine adds mulicast filters
1050 */
1051 void
1052 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1053 {
1054 	struct ixl_mac_filter *f;
1055 
1056 	/* Does one already exist */
1057 	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1058 	if (f != NULL)
1059 		return;
1060 
1061 	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1062 	if (f != NULL)
1063 		f->flags |= IXL_FILTER_MC;
1064 	else
1065 		printf("WARNING: no filter available!!\n");
1066 }
1067 
1068 void
1069 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1070 {
1071 	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1072 }
1073 
1074 /*
1075  * This routine adds a MAC/VLAN filter to the software filter
1076  * list, then adds that new filter to the HW if it doesn't already
1077  * exist in the SW filter list.
1078  */
1079 void
1080 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1081 {
1082 	struct ixl_mac_filter	*f, *tmp;
1083 	struct ixl_pf		*pf;
1084 	device_t		dev;
1085 
1086 	pf = vsi->back;
1087 	dev = pf->dev;
1088 
1089 	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1090 	    MAC_FORMAT_ARGS(macaddr), vlan);
1091 
1092 	/* Does one already exist */
1093 	f = ixl_find_filter(vsi, macaddr, vlan);
1094 	if (f != NULL)
1095 		return;
1096 	/*
1097 	** Is this the first vlan being registered, if so we
1098 	** need to remove the ANY filter that indicates we are
1099 	** not in a vlan, and replace that with a 0 filter.
1100 	*/
1101 	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1102 		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1103 		if (tmp != NULL) {
1104 			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1105 			ixl_add_filter(vsi, macaddr, 0);
1106 		}
1107 	}
1108 
1109 	f = ixl_new_filter(vsi, macaddr, vlan);
1110 	if (f == NULL) {
1111 		device_printf(dev, "WARNING: no filter available!!\n");
1112 		return;
1113 	}
1114 	if (f->vlan != IXL_VLAN_ANY)
1115 		f->flags |= IXL_FILTER_VLAN;
1116 	else
1117 		vsi->num_macs++;
1118 
1119 	f->flags |= IXL_FILTER_USED;
1120 	ixl_add_hw_filters(vsi, f->flags, 1);
1121 }
1122 
1123 void
1124 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1125 {
1126 	struct ixl_mac_filter *f;
1127 
1128 	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1129 	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1130 	    MAC_FORMAT_ARGS(macaddr), vlan);
1131 
1132 	f = ixl_find_filter(vsi, macaddr, vlan);
1133 	if (f == NULL)
1134 		return;
1135 
1136 	f->flags |= IXL_FILTER_DEL;
1137 	ixl_del_hw_filters(vsi, 1);
1138 	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1139 		vsi->num_macs--;
1140 
1141 	/* Check if this is the last vlan removal */
1142 	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1143 		/* Switch back to a non-vlan filter */
1144 		ixl_del_filter(vsi, macaddr, 0);
1145 		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1146 	}
1147 	return;
1148 }
1149 
1150 /*
1151 ** Find the filter with both matching mac addr and vlan id
1152 */
1153 struct ixl_mac_filter *
1154 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1155 {
1156 	struct ixl_mac_filter	*f;
1157 
1158 	SLIST_FOREACH(f, &vsi->ftl, next) {
1159 		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
1160 		    && (f->vlan == vlan)) {
1161 			return (f);
1162 		}
1163 	}
1164 
1165 	return (NULL);
1166 }
1167 
1168 /*
1169 ** This routine takes additions to the vsi filter
1170 ** table and creates an Admin Queue call to create
1171 ** the filters in the hardware.
1172 */
1173 void
1174 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
1175 {
1176 	struct i40e_aqc_add_macvlan_element_data *a, *b;
1177 	struct ixl_mac_filter	*f;
1178 	struct ixl_pf		*pf;
1179 	struct i40e_hw		*hw;
1180 	device_t		dev;
1181 	enum i40e_status_code	status;
1182 	int			j = 0;
1183 
1184 	pf = vsi->back;
1185 	dev = vsi->dev;
1186 	hw = &pf->hw;
1187 
1188 	ixl_dbg_filter(pf,
1189 	    "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt);
1190 
1191 	if (cnt < 1) {
1192 		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1193 		return;
1194 	}
1195 
1196 	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1197 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1198 	if (a == NULL) {
1199 		device_printf(dev, "add_hw_filters failed to get memory\n");
1200 		return;
1201 	}
1202 
1203 	/*
1204 	** Scan the filter list, each time we find one
1205 	** we add it to the admin queue array and turn off
1206 	** the add bit.
1207 	*/
1208 	SLIST_FOREACH(f, &vsi->ftl, next) {
1209 		if ((f->flags & flags) == flags) {
1210 			b = &a[j]; // a pox on fvl long names :)
1211 			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1212 			if (f->vlan == IXL_VLAN_ANY) {
1213 				b->vlan_tag = 0;
1214 				b->flags = CPU_TO_LE16(
1215 				    I40E_AQC_MACVLAN_ADD_IGNORE_VLAN);
1216 			} else {
1217 				b->vlan_tag = CPU_TO_LE16(f->vlan);
1218 				b->flags = 0;
1219 			}
1220 			b->flags |= CPU_TO_LE16(
1221 			    I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1222 			f->flags &= ~IXL_FILTER_ADD;
1223 			j++;
1224 
1225 			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1226 			    MAC_FORMAT_ARGS(f->macaddr));
1227 		}
1228 		if (j == cnt)
1229 			break;
1230 	}
1231 	if (j > 0) {
1232 		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1233 		if (status)
1234 			device_printf(dev, "i40e_aq_add_macvlan status %s, "
1235 			    "error %s\n", i40e_stat_str(hw, status),
1236 			    i40e_aq_str(hw, hw->aq.asq_last_status));
1237 		else
1238 			vsi->num_hw_filters += j;
1239 	}
1240 	free(a, M_DEVBUF);
1241 	return;
1242 }
1243 
1244 /*
1245 ** This routine takes removals in the vsi filter
1246 ** table and creates an Admin Queue call to delete
1247 ** the filters in the hardware.
1248 */
1249 void
1250 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
1251 {
1252 	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1253 	struct ixl_pf		*pf;
1254 	struct i40e_hw		*hw;
1255 	device_t		dev;
1256 	struct ixl_mac_filter	*f, *f_temp;
1257 	enum i40e_status_code	status;
1258 	int			j = 0;
1259 
1260 	pf = vsi->back;
1261 	hw = &pf->hw;
1262 	dev = vsi->dev;
1263 
1264 	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1265 
1266 	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1267 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1268 	if (d == NULL) {
1269 		device_printf(dev, "%s: failed to get memory\n", __func__);
1270 		return;
1271 	}
1272 
1273 	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
1274 		if (f->flags & IXL_FILTER_DEL) {
1275 			e = &d[j]; // a pox on fvl long names :)
1276 			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1277 			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1278 			if (f->vlan == IXL_VLAN_ANY) {
1279 				e->vlan_tag = 0;
1280 				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1281 			} else {
1282 				e->vlan_tag = f->vlan;
1283 			}
1284 
1285 			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1286 			    MAC_FORMAT_ARGS(f->macaddr));
1287 
1288 			/* delete entry from vsi list */
1289 			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
1290 			free(f, M_DEVBUF);
1291 			j++;
1292 		}
1293 		if (j == cnt)
1294 			break;
1295 	}
1296 	if (j > 0) {
1297 		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1298 		if (status) {
1299 			int sc = 0;
1300 			for (int i = 0; i < j; i++)
1301 				sc += (!d[i].error_code);
1302 			vsi->num_hw_filters -= sc;
1303 			device_printf(dev,
1304 			    "Failed to remove %d/%d filters, error %s\n",
1305 			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
1306 		} else
1307 			vsi->num_hw_filters -= j;
1308 	}
1309 	free(d, M_DEVBUF);
1310 
1311 	ixl_dbg_filter(pf, "%s: end\n", __func__);
1312 	return;
1313 }
1314 
1315 int
1316 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1317 {
1318 	struct i40e_hw	*hw = &pf->hw;
1319 	int		error = 0;
1320 	u32		reg;
1321 	u16		pf_qidx;
1322 
1323 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1324 
1325 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1326 	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1327 	    pf_qidx, vsi_qidx);
1328 
1329 	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1330 
1331 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1332 	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1333 	    I40E_QTX_ENA_QENA_STAT_MASK;
1334 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1335 	/* Verify the enable took */
1336 	for (int j = 0; j < 10; j++) {
1337 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1338 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1339 			break;
1340 		i40e_usec_delay(10);
1341 	}
1342 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1343 		device_printf(pf->dev, "TX queue %d still disabled!\n",
1344 		    pf_qidx);
1345 		error = ETIMEDOUT;
1346 	}
1347 
1348 	return (error);
1349 }
1350 
1351 int
1352 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1353 {
1354 	struct i40e_hw	*hw = &pf->hw;
1355 	int		error = 0;
1356 	u32		reg;
1357 	u16		pf_qidx;
1358 
1359 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1360 
1361 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1362 	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1363 	    pf_qidx, vsi_qidx);
1364 
1365 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1366 	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1367 	    I40E_QRX_ENA_QENA_STAT_MASK;
1368 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1369 	/* Verify the enable took */
1370 	for (int j = 0; j < 10; j++) {
1371 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1372 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1373 			break;
1374 		i40e_usec_delay(10);
1375 	}
1376 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1377 		device_printf(pf->dev, "RX queue %d still disabled!\n",
1378 		    pf_qidx);
1379 		error = ETIMEDOUT;
1380 	}
1381 
1382 	return (error);
1383 }
1384 
1385 int
1386 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1387 {
1388 	int error = 0;
1389 
1390 	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1391 	/* Called function already prints error message */
1392 	if (error)
1393 		return (error);
1394 	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1395 	return (error);
1396 }
1397 
1398 /*
1399  * Returns error on first ring that is detected hung.
1400  */
1401 int
1402 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1403 {
1404 	struct i40e_hw	*hw = &pf->hw;
1405 	int		error = 0;
1406 	u32		reg;
1407 	u16		pf_qidx;
1408 
1409 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1410 
1411 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1412 	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1413 	    pf_qidx, vsi_qidx);
1414 
1415 	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1416 	i40e_usec_delay(500);
1417 
1418 	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1419 	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1420 	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1421 	/* Verify the disable took */
1422 	for (int j = 0; j < 10; j++) {
1423 		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1424 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1425 			break;
1426 		i40e_msec_delay(10);
1427 	}
1428 	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1429 		device_printf(pf->dev, "TX queue %d still enabled!\n",
1430 		    pf_qidx);
1431 		error = ETIMEDOUT;
1432 	}
1433 
1434 	return (error);
1435 }
1436 
1437 /*
1438  * Returns error on first ring that is detected hung.
1439  */
1440 int
1441 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1442 {
1443 	struct i40e_hw	*hw = &pf->hw;
1444 	int		error = 0;
1445 	u32		reg;
1446 	u16		pf_qidx;
1447 
1448 	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1449 
1450 	ixl_dbg(pf, IXL_DBG_EN_DIS,
1451 	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1452 	    pf_qidx, vsi_qidx);
1453 
1454 	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1455 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1456 	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1457 	/* Verify the disable took */
1458 	for (int j = 0; j < 10; j++) {
1459 		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1460 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1461 			break;
1462 		i40e_msec_delay(10);
1463 	}
1464 	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1465 		device_printf(pf->dev, "RX queue %d still enabled!\n",
1466 		    pf_qidx);
1467 		error = ETIMEDOUT;
1468 	}
1469 
1470 	return (error);
1471 }
1472 
1473 int
1474 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1475 {
1476 	int error = 0;
1477 
1478 	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1479 	/* Called function already prints error message */
1480 	if (error)
1481 		return (error);
1482 	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1483 	return (error);
1484 }
1485 
1486 static void
1487 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1488 {
1489 	struct i40e_hw *hw = &pf->hw;
1490 	device_t dev = pf->dev;
1491 	struct ixl_vf *vf;
1492 	bool mdd_detected = false;
1493 	bool pf_mdd_detected = false;
1494 	bool vf_mdd_detected = false;
1495 	u16 vf_num, queue;
1496 	u8 pf_num, event;
1497 	u8 pf_mdet_num, vp_mdet_num;
1498 	u32 reg;
1499 
1500 	/* find what triggered the MDD event */
1501 	reg = rd32(hw, I40E_GL_MDET_TX);
1502 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1503 		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1504 		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1505 		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1506 		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1507 		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1508 		    I40E_GL_MDET_TX_EVENT_SHIFT;
1509 		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1510 		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1511 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1512 		mdd_detected = true;
1513 	}
1514 
1515 	if (!mdd_detected)
1516 		return;
1517 
1518 	reg = rd32(hw, I40E_PF_MDET_TX);
1519 	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1520 		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1521 		pf_mdet_num = hw->pf_id;
1522 		pf_mdd_detected = true;
1523 	}
1524 
1525 	/* Check if MDD was caused by a VF */
1526 	for (int i = 0; i < pf->num_vfs; i++) {
1527 		vf = &(pf->vfs[i]);
1528 		reg = rd32(hw, I40E_VP_MDET_TX(i));
1529 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1530 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1531 			vp_mdet_num = i;
1532 			vf->num_mdd_events++;
1533 			vf_mdd_detected = true;
1534 		}
1535 	}
1536 
1537 	/* Print out an error message */
1538 	if (vf_mdd_detected && pf_mdd_detected)
1539 		device_printf(dev,
1540 		    "Malicious Driver Detection event %d"
1541 		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1542 		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1543 	else if (vf_mdd_detected && !pf_mdd_detected)
1544 		device_printf(dev,
1545 		    "Malicious Driver Detection event %d"
1546 		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1547 		    event, queue, pf_num, vf_num, vp_mdet_num);
1548 	else if (!vf_mdd_detected && pf_mdd_detected)
1549 		device_printf(dev,
1550 		    "Malicious Driver Detection event %d"
1551 		    " on TX queue %d, pf number %d (PF-%d)\n",
1552 		    event, queue, pf_num, pf_mdet_num);
1553 	/* Theoretically shouldn't happen */
1554 	else
1555 		device_printf(dev,
1556 		    "TX Malicious Driver Detection event (unknown)\n");
1557 }
1558 
1559 static void
1560 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1561 {
1562 	struct i40e_hw *hw = &pf->hw;
1563 	device_t dev = pf->dev;
1564 	struct ixl_vf *vf;
1565 	bool mdd_detected = false;
1566 	bool pf_mdd_detected = false;
1567 	bool vf_mdd_detected = false;
1568 	u16 queue;
1569 	u8 pf_num, event;
1570 	u8 pf_mdet_num, vp_mdet_num;
1571 	u32 reg;
1572 
1573 	/*
1574 	 * GL_MDET_RX doesn't contain VF number information, unlike
1575 	 * GL_MDET_TX.
1576 	 */
1577 	reg = rd32(hw, I40E_GL_MDET_RX);
1578 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1579 		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1580 		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1581 		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1582 		    I40E_GL_MDET_RX_EVENT_SHIFT;
1583 		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1584 		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1585 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1586 		mdd_detected = true;
1587 	}
1588 
1589 	if (!mdd_detected)
1590 		return;
1591 
1592 	reg = rd32(hw, I40E_PF_MDET_RX);
1593 	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1594 		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1595 		pf_mdet_num = hw->pf_id;
1596 		pf_mdd_detected = true;
1597 	}
1598 
1599 	/* Check if MDD was caused by a VF */
1600 	for (int i = 0; i < pf->num_vfs; i++) {
1601 		vf = &(pf->vfs[i]);
1602 		reg = rd32(hw, I40E_VP_MDET_RX(i));
1603 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1604 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1605 			vp_mdet_num = i;
1606 			vf->num_mdd_events++;
1607 			vf_mdd_detected = true;
1608 		}
1609 	}
1610 
1611 	/* Print out an error message */
1612 	if (vf_mdd_detected && pf_mdd_detected)
1613 		device_printf(dev,
1614 		    "Malicious Driver Detection event %d"
1615 		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1616 		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1617 	else if (vf_mdd_detected && !pf_mdd_detected)
1618 		device_printf(dev,
1619 		    "Malicious Driver Detection event %d"
1620 		    " on RX queue %d, pf number %d, (VF-%d)\n",
1621 		    event, queue, pf_num, vp_mdet_num);
1622 	else if (!vf_mdd_detected && pf_mdd_detected)
1623 		device_printf(dev,
1624 		    "Malicious Driver Detection event %d"
1625 		    " on RX queue %d, pf number %d (PF-%d)\n",
1626 		    event, queue, pf_num, pf_mdet_num);
1627 	/* Theoretically shouldn't happen */
1628 	else
1629 		device_printf(dev,
1630 		    "RX Malicious Driver Detection event (unknown)\n");
1631 }
1632 
1633 /**
1634  * ixl_handle_mdd_event
1635  *
1636  * Called from interrupt handler to identify possibly malicious vfs
1637  * (But also detects events from the PF, as well)
1638  **/
1639 void
1640 ixl_handle_mdd_event(struct ixl_pf *pf)
1641 {
1642 	struct i40e_hw *hw = &pf->hw;
1643 	u32 reg;
1644 
1645 	/*
1646 	 * Handle both TX/RX because it's possible they could
1647 	 * both trigger in the same interrupt.
1648 	 */
1649 	ixl_handle_tx_mdd_event(pf);
1650 	ixl_handle_rx_mdd_event(pf);
1651 
1652 	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1653 
1654 	/* re-enable mdd interrupt cause */
1655 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1656 	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1657 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1658 	ixl_flush(hw);
1659 }
1660 
1661 void
1662 ixl_enable_intr0(struct i40e_hw *hw)
1663 {
1664 	u32		reg;
1665 
1666 	/* Use IXL_ITR_NONE so ITR isn't updated here */
1667 	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1668 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1669 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1670 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1671 }
1672 
1673 void
1674 ixl_disable_intr0(struct i40e_hw *hw)
1675 {
1676 	u32		reg;
1677 
1678 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1679 	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1680 	ixl_flush(hw);
1681 }
1682 
1683 void
1684 ixl_enable_queue(struct i40e_hw *hw, int id)
1685 {
1686 	u32		reg;
1687 
1688 	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1689 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1690 	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1691 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1692 }
1693 
1694 void
1695 ixl_disable_queue(struct i40e_hw *hw, int id)
1696 {
1697 	u32		reg;
1698 
1699 	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1700 	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1701 }
1702 
1703 void
1704 ixl_handle_empr_reset(struct ixl_pf *pf)
1705 {
1706 	struct ixl_vsi	*vsi = &pf->vsi;
1707 	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1708 
1709 	ixl_prepare_for_reset(pf, is_up);
1710 	/*
1711 	 * i40e_pf_reset checks the type of reset and acts
1712 	 * accordingly. If EMP or Core reset was performed
1713 	 * doing PF reset is not necessary and it sometimes
1714 	 * fails.
1715 	 */
1716 	ixl_pf_reset(pf);
1717 
1718 	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1719 	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1720 		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1721 		device_printf(pf->dev,
1722 		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1723 		pf->link_up = FALSE;
1724 		ixl_update_link_status(pf);
1725 	}
1726 
1727 	ixl_rebuild_hw_structs_after_reset(pf, is_up);
1728 
1729 	atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
1730 }
1731 
1732 void
1733 ixl_update_stats_counters(struct ixl_pf *pf)
1734 {
1735 	struct i40e_hw	*hw = &pf->hw;
1736 	struct ixl_vsi	*vsi = &pf->vsi;
1737 	struct ixl_vf	*vf;
1738 	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1739 
1740 	struct i40e_hw_port_stats *nsd = &pf->stats;
1741 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1742 
1743 	/* Update hw stats */
1744 	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1745 			   pf->stat_offsets_loaded,
1746 			   &osd->crc_errors, &nsd->crc_errors);
1747 	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1748 			   pf->stat_offsets_loaded,
1749 			   &osd->illegal_bytes, &nsd->illegal_bytes);
1750 	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1751 			   I40E_GLPRT_GORCL(hw->port),
1752 			   pf->stat_offsets_loaded,
1753 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1754 	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1755 			   I40E_GLPRT_GOTCL(hw->port),
1756 			   pf->stat_offsets_loaded,
1757 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1758 	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1759 			   pf->stat_offsets_loaded,
1760 			   &osd->eth.rx_discards,
1761 			   &nsd->eth.rx_discards);
1762 	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1763 			   I40E_GLPRT_UPRCL(hw->port),
1764 			   pf->stat_offsets_loaded,
1765 			   &osd->eth.rx_unicast,
1766 			   &nsd->eth.rx_unicast);
1767 	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1768 			   I40E_GLPRT_UPTCL(hw->port),
1769 			   pf->stat_offsets_loaded,
1770 			   &osd->eth.tx_unicast,
1771 			   &nsd->eth.tx_unicast);
1772 	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1773 			   I40E_GLPRT_MPRCL(hw->port),
1774 			   pf->stat_offsets_loaded,
1775 			   &osd->eth.rx_multicast,
1776 			   &nsd->eth.rx_multicast);
1777 	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1778 			   I40E_GLPRT_MPTCL(hw->port),
1779 			   pf->stat_offsets_loaded,
1780 			   &osd->eth.tx_multicast,
1781 			   &nsd->eth.tx_multicast);
1782 	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1783 			   I40E_GLPRT_BPRCL(hw->port),
1784 			   pf->stat_offsets_loaded,
1785 			   &osd->eth.rx_broadcast,
1786 			   &nsd->eth.rx_broadcast);
1787 	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1788 			   I40E_GLPRT_BPTCL(hw->port),
1789 			   pf->stat_offsets_loaded,
1790 			   &osd->eth.tx_broadcast,
1791 			   &nsd->eth.tx_broadcast);
1792 
1793 	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1794 			   pf->stat_offsets_loaded,
1795 			   &osd->tx_dropped_link_down,
1796 			   &nsd->tx_dropped_link_down);
1797 	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1798 			   pf->stat_offsets_loaded,
1799 			   &osd->mac_local_faults,
1800 			   &nsd->mac_local_faults);
1801 	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1802 			   pf->stat_offsets_loaded,
1803 			   &osd->mac_remote_faults,
1804 			   &nsd->mac_remote_faults);
1805 	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1806 			   pf->stat_offsets_loaded,
1807 			   &osd->rx_length_errors,
1808 			   &nsd->rx_length_errors);
1809 
1810 	/* Flow control (LFC) stats */
1811 	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1812 			   pf->stat_offsets_loaded,
1813 			   &osd->link_xon_rx, &nsd->link_xon_rx);
1814 	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1815 			   pf->stat_offsets_loaded,
1816 			   &osd->link_xon_tx, &nsd->link_xon_tx);
1817 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1818 			   pf->stat_offsets_loaded,
1819 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
1820 	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1821 			   pf->stat_offsets_loaded,
1822 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1823 
1824 	/*
1825 	 * For watchdog management we need to know if we have been paused
1826 	 * during the last interval, so capture that here.
1827 	 */
1828 	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
1829 		vsi->shared->isc_pause_frames = 1;
1830 
1831 	/* Packet size stats rx */
1832 	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1833 			   I40E_GLPRT_PRC64L(hw->port),
1834 			   pf->stat_offsets_loaded,
1835 			   &osd->rx_size_64, &nsd->rx_size_64);
1836 	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1837 			   I40E_GLPRT_PRC127L(hw->port),
1838 			   pf->stat_offsets_loaded,
1839 			   &osd->rx_size_127, &nsd->rx_size_127);
1840 	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1841 			   I40E_GLPRT_PRC255L(hw->port),
1842 			   pf->stat_offsets_loaded,
1843 			   &osd->rx_size_255, &nsd->rx_size_255);
1844 	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1845 			   I40E_GLPRT_PRC511L(hw->port),
1846 			   pf->stat_offsets_loaded,
1847 			   &osd->rx_size_511, &nsd->rx_size_511);
1848 	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1849 			   I40E_GLPRT_PRC1023L(hw->port),
1850 			   pf->stat_offsets_loaded,
1851 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1852 	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1853 			   I40E_GLPRT_PRC1522L(hw->port),
1854 			   pf->stat_offsets_loaded,
1855 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1856 	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1857 			   I40E_GLPRT_PRC9522L(hw->port),
1858 			   pf->stat_offsets_loaded,
1859 			   &osd->rx_size_big, &nsd->rx_size_big);
1860 
1861 	/* Packet size stats tx */
1862 	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1863 			   I40E_GLPRT_PTC64L(hw->port),
1864 			   pf->stat_offsets_loaded,
1865 			   &osd->tx_size_64, &nsd->tx_size_64);
1866 	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1867 			   I40E_GLPRT_PTC127L(hw->port),
1868 			   pf->stat_offsets_loaded,
1869 			   &osd->tx_size_127, &nsd->tx_size_127);
1870 	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1871 			   I40E_GLPRT_PTC255L(hw->port),
1872 			   pf->stat_offsets_loaded,
1873 			   &osd->tx_size_255, &nsd->tx_size_255);
1874 	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1875 			   I40E_GLPRT_PTC511L(hw->port),
1876 			   pf->stat_offsets_loaded,
1877 			   &osd->tx_size_511, &nsd->tx_size_511);
1878 	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1879 			   I40E_GLPRT_PTC1023L(hw->port),
1880 			   pf->stat_offsets_loaded,
1881 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1882 	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1883 			   I40E_GLPRT_PTC1522L(hw->port),
1884 			   pf->stat_offsets_loaded,
1885 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1886 	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1887 			   I40E_GLPRT_PTC9522L(hw->port),
1888 			   pf->stat_offsets_loaded,
1889 			   &osd->tx_size_big, &nsd->tx_size_big);
1890 
1891 	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1892 			   pf->stat_offsets_loaded,
1893 			   &osd->rx_undersize, &nsd->rx_undersize);
1894 	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1895 			   pf->stat_offsets_loaded,
1896 			   &osd->rx_fragments, &nsd->rx_fragments);
1897 	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1898 			   pf->stat_offsets_loaded,
1899 			   &osd->rx_oversize, &nsd->rx_oversize);
1900 	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1901 			   pf->stat_offsets_loaded,
1902 			   &osd->rx_jabber, &nsd->rx_jabber);
1903 	/* EEE */
1904 	i40e_get_phy_lpi_status(hw, nsd);
1905 
1906 	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
1907 			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
1908 			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
1909 
1910 	pf->stat_offsets_loaded = true;
1911 	/* End hw stats */
1912 
1913 	/* Update vsi stats */
1914 	ixl_update_vsi_stats(vsi);
1915 
1916 	for (int i = 0; i < pf->num_vfs; i++) {
1917 		vf = &pf->vfs[i];
1918 		if (vf->vf_flags & VF_FLAG_ENABLED)
1919 			ixl_update_eth_stats(&pf->vfs[i].vsi);
1920 	}
1921 }
1922 
1923 /**
1924  * Update VSI-specific ethernet statistics counters.
1925  **/
1926 void
1927 ixl_update_eth_stats(struct ixl_vsi *vsi)
1928 {
1929 	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1930 	struct i40e_hw *hw = &pf->hw;
1931 	struct i40e_eth_stats *es;
1932 	struct i40e_eth_stats *oes;
1933 	u16 stat_idx = vsi->info.stat_counter_idx;
1934 
1935 	es = &vsi->eth_stats;
1936 	oes = &vsi->eth_stats_offsets;
1937 
1938 	/* Gather up the stats that the hw collects */
1939 	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
1940 			   vsi->stat_offsets_loaded,
1941 			   &oes->tx_errors, &es->tx_errors);
1942 	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
1943 			   vsi->stat_offsets_loaded,
1944 			   &oes->rx_discards, &es->rx_discards);
1945 
1946 	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
1947 			   I40E_GLV_GORCL(stat_idx),
1948 			   vsi->stat_offsets_loaded,
1949 			   &oes->rx_bytes, &es->rx_bytes);
1950 	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
1951 			   I40E_GLV_UPRCL(stat_idx),
1952 			   vsi->stat_offsets_loaded,
1953 			   &oes->rx_unicast, &es->rx_unicast);
1954 	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
1955 			   I40E_GLV_MPRCL(stat_idx),
1956 			   vsi->stat_offsets_loaded,
1957 			   &oes->rx_multicast, &es->rx_multicast);
1958 	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
1959 			   I40E_GLV_BPRCL(stat_idx),
1960 			   vsi->stat_offsets_loaded,
1961 			   &oes->rx_broadcast, &es->rx_broadcast);
1962 
1963 	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
1964 			   I40E_GLV_GOTCL(stat_idx),
1965 			   vsi->stat_offsets_loaded,
1966 			   &oes->tx_bytes, &es->tx_bytes);
1967 	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
1968 			   I40E_GLV_UPTCL(stat_idx),
1969 			   vsi->stat_offsets_loaded,
1970 			   &oes->tx_unicast, &es->tx_unicast);
1971 	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
1972 			   I40E_GLV_MPTCL(stat_idx),
1973 			   vsi->stat_offsets_loaded,
1974 			   &oes->tx_multicast, &es->tx_multicast);
1975 	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
1976 			   I40E_GLV_BPTCL(stat_idx),
1977 			   vsi->stat_offsets_loaded,
1978 			   &oes->tx_broadcast, &es->tx_broadcast);
1979 	vsi->stat_offsets_loaded = true;
1980 }
1981 
1982 void
1983 ixl_update_vsi_stats(struct ixl_vsi *vsi)
1984 {
1985 	struct ixl_pf		*pf;
1986 	struct ifnet		*ifp;
1987 	struct i40e_eth_stats	*es;
1988 	u64			tx_discards;
1989 
1990 	struct i40e_hw_port_stats *nsd;
1991 
1992 	pf = vsi->back;
1993 	ifp = vsi->ifp;
1994 	es = &vsi->eth_stats;
1995 	nsd = &pf->stats;
1996 
1997 	ixl_update_eth_stats(vsi);
1998 
1999 	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2000 
2001 	/* Update ifnet stats */
2002 	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2003 	                   es->rx_multicast +
2004 			   es->rx_broadcast);
2005 	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2006 	                   es->tx_multicast +
2007 			   es->tx_broadcast);
2008 	IXL_SET_IBYTES(vsi, es->rx_bytes);
2009 	IXL_SET_OBYTES(vsi, es->tx_bytes);
2010 	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2011 	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2012 
2013 	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2014 	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
2015 	    nsd->rx_jabber);
2016 	IXL_SET_OERRORS(vsi, es->tx_errors);
2017 	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2018 	IXL_SET_OQDROPS(vsi, tx_discards);
2019 	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2020 	IXL_SET_COLLISIONS(vsi, 0);
2021 }
2022 
2023 /**
2024  * Reset all of the stats for the given pf
2025  **/
2026 void
2027 ixl_pf_reset_stats(struct ixl_pf *pf)
2028 {
2029 	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2030 	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2031 	pf->stat_offsets_loaded = false;
2032 }
2033 
2034 /**
2035  * Resets all stats of the given vsi
2036  **/
2037 void
2038 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2039 {
2040 	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2041 	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2042 	vsi->stat_offsets_loaded = false;
2043 }
2044 
2045 /**
2046  * Read and update a 48 bit stat from the hw
2047  *
2048  * Since the device stats are not reset at PFReset, they likely will not
2049  * be zeroed when the driver starts.  We'll save the first values read
2050  * and use them as offsets to be subtracted from the raw values in order
2051  * to report stats that count from zero.
2052  **/
2053 void
2054 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2055 	bool offset_loaded, u64 *offset, u64 *stat)
2056 {
2057 	u64 new_data;
2058 
2059 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2060 	new_data = rd64(hw, loreg);
2061 #else
2062 	/*
2063 	 * Use two rd32's instead of one rd64; FreeBSD versions before
2064 	 * 10 don't support 64-bit bus reads/writes.
2065 	 */
2066 	new_data = rd32(hw, loreg);
2067 	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2068 #endif
2069 
2070 	if (!offset_loaded)
2071 		*offset = new_data;
2072 	if (new_data >= *offset)
2073 		*stat = new_data - *offset;
2074 	else
2075 		*stat = (new_data + ((u64)1 << 48)) - *offset;
2076 	*stat &= 0xFFFFFFFFFFFFULL;
2077 }
2078 
2079 /**
2080  * Read and update a 32 bit stat from the hw
2081  **/
2082 void
2083 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2084 	bool offset_loaded, u64 *offset, u64 *stat)
2085 {
2086 	u32 new_data;
2087 
2088 	new_data = rd32(hw, reg);
2089 	if (!offset_loaded)
2090 		*offset = new_data;
2091 	if (new_data >= *offset)
2092 		*stat = (u32)(new_data - *offset);
2093 	else
2094 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2095 }
2096 
2097 /**
2098  * Add subset of device sysctls safe to use in recovery mode
2099  */
2100 void
2101 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2102 {
2103 	device_t dev = pf->dev;
2104 
2105 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2106 	struct sysctl_oid_list *ctx_list =
2107 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2108 
2109 	struct sysctl_oid *debug_node;
2110 	struct sysctl_oid_list *debug_list;
2111 
2112 	SYSCTL_ADD_PROC(ctx, ctx_list,
2113 	    OID_AUTO, "fw_version",
2114 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2115 	    ixl_sysctl_show_fw, "A", "Firmware version");
2116 
2117 	/* Add sysctls meant to print debug information, but don't list them
2118 	 * in "sysctl -a" output. */
2119 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2120 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2121 	    "Debug Sysctls");
2122 	debug_list = SYSCTL_CHILDREN(debug_node);
2123 
2124 	SYSCTL_ADD_UINT(ctx, debug_list,
2125 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2126 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2127 
2128 	SYSCTL_ADD_UINT(ctx, debug_list,
2129 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2130 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2131 
2132 	SYSCTL_ADD_PROC(ctx, debug_list,
2133 	    OID_AUTO, "dump_debug_data",
2134 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2135 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2136 
2137 	SYSCTL_ADD_PROC(ctx, debug_list,
2138 	    OID_AUTO, "do_pf_reset",
2139 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2140 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2141 
2142 	SYSCTL_ADD_PROC(ctx, debug_list,
2143 	    OID_AUTO, "do_core_reset",
2144 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2145 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2146 
2147 	SYSCTL_ADD_PROC(ctx, debug_list,
2148 	    OID_AUTO, "do_global_reset",
2149 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2150 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2151 
2152 	SYSCTL_ADD_PROC(ctx, debug_list,
2153 	    OID_AUTO, "queue_interrupt_table",
2154 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2155 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2156 }
2157 
2158 void
2159 ixl_add_device_sysctls(struct ixl_pf *pf)
2160 {
2161 	device_t dev = pf->dev;
2162 	struct i40e_hw *hw = &pf->hw;
2163 
2164 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2165 	struct sysctl_oid_list *ctx_list =
2166 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2167 
2168 	struct sysctl_oid *debug_node;
2169 	struct sysctl_oid_list *debug_list;
2170 
2171 	struct sysctl_oid *fec_node;
2172 	struct sysctl_oid_list *fec_list;
2173 	struct sysctl_oid *eee_node;
2174 	struct sysctl_oid_list *eee_list;
2175 
2176 	/* Set up sysctls */
2177 	SYSCTL_ADD_PROC(ctx, ctx_list,
2178 	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2179 	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2180 
2181 	SYSCTL_ADD_PROC(ctx, ctx_list,
2182 	    OID_AUTO, "advertise_speed",
2183 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2184 	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2185 
2186 	SYSCTL_ADD_PROC(ctx, ctx_list,
2187 	    OID_AUTO, "supported_speeds",
2188 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2189 	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2190 
2191 	SYSCTL_ADD_PROC(ctx, ctx_list,
2192 	    OID_AUTO, "current_speed",
2193 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2194 	    ixl_sysctl_current_speed, "A", "Current Port Speed");
2195 
2196 	SYSCTL_ADD_PROC(ctx, ctx_list,
2197 	    OID_AUTO, "fw_version",
2198 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2199 	    ixl_sysctl_show_fw, "A", "Firmware version");
2200 
2201 	SYSCTL_ADD_PROC(ctx, ctx_list,
2202 	    OID_AUTO, "unallocated_queues",
2203 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2204 	    ixl_sysctl_unallocated_queues, "I",
2205 	    "Queues not allocated to a PF or VF");
2206 
2207 	SYSCTL_ADD_PROC(ctx, ctx_list,
2208 	    OID_AUTO, "tx_itr",
2209 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2210 	    ixl_sysctl_pf_tx_itr, "I",
2211 	    "Immediately set TX ITR value for all queues");
2212 
2213 	SYSCTL_ADD_PROC(ctx, ctx_list,
2214 	    OID_AUTO, "rx_itr",
2215 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2216 	    ixl_sysctl_pf_rx_itr, "I",
2217 	    "Immediately set RX ITR value for all queues");
2218 
2219 	SYSCTL_ADD_INT(ctx, ctx_list,
2220 	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2221 	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2222 
2223 	SYSCTL_ADD_INT(ctx, ctx_list,
2224 	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2225 	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2226 
2227 	/* Add FEC sysctls for 25G adapters */
2228 	if (i40e_is_25G_device(hw->device_id)) {
2229 		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2230 		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2231 		    "FEC Sysctls");
2232 		fec_list = SYSCTL_CHILDREN(fec_node);
2233 
2234 		SYSCTL_ADD_PROC(ctx, fec_list,
2235 		    OID_AUTO, "fc_ability",
2236 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2237 		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2238 
2239 		SYSCTL_ADD_PROC(ctx, fec_list,
2240 		    OID_AUTO, "rs_ability",
2241 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2242 		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2243 
2244 		SYSCTL_ADD_PROC(ctx, fec_list,
2245 		    OID_AUTO, "fc_requested",
2246 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2247 		    ixl_sysctl_fec_fc_request, "I",
2248 		    "FC FEC mode requested on link");
2249 
2250 		SYSCTL_ADD_PROC(ctx, fec_list,
2251 		    OID_AUTO, "rs_requested",
2252 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2253 		    ixl_sysctl_fec_rs_request, "I",
2254 		    "RS FEC mode requested on link");
2255 
2256 		SYSCTL_ADD_PROC(ctx, fec_list,
2257 		    OID_AUTO, "auto_fec_enabled",
2258 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2259 		    ixl_sysctl_fec_auto_enable, "I",
2260 		    "Let FW decide FEC ability/request modes");
2261 	}
2262 
2263 	SYSCTL_ADD_PROC(ctx, ctx_list,
2264 	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2265 	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2266 
2267 	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2268 	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2269 	    "Energy Efficient Ethernet (EEE) Sysctls");
2270 	eee_list = SYSCTL_CHILDREN(eee_node);
2271 
2272 	SYSCTL_ADD_PROC(ctx, eee_list,
2273 	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2274 	    pf, 0, ixl_sysctl_eee_enable, "I",
2275 	    "Enable Energy Efficient Ethernet (EEE)");
2276 
2277 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2278 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2279 	    "TX LPI status");
2280 
2281 	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2282 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2283 	    "RX LPI status");
2284 
2285 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2286 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2287 	    "TX LPI count");
2288 
2289 	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2290 	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2291 	    "RX LPI count");
2292 
2293 	/* Add sysctls meant to print debug information, but don't list them
2294 	 * in "sysctl -a" output. */
2295 	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2296 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2297 	    "Debug Sysctls");
2298 	debug_list = SYSCTL_CHILDREN(debug_node);
2299 
2300 	SYSCTL_ADD_UINT(ctx, debug_list,
2301 	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2302 	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2303 
2304 	SYSCTL_ADD_UINT(ctx, debug_list,
2305 	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2306 	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2307 
2308 	SYSCTL_ADD_PROC(ctx, debug_list,
2309 	    OID_AUTO, "link_status",
2310 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2311 	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2312 
2313 	SYSCTL_ADD_PROC(ctx, debug_list,
2314 	    OID_AUTO, "phy_abilities",
2315 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2316 	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2317 
2318 	SYSCTL_ADD_PROC(ctx, debug_list,
2319 	    OID_AUTO, "filter_list",
2320 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2321 	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2322 
2323 	SYSCTL_ADD_PROC(ctx, debug_list,
2324 	    OID_AUTO, "hw_res_alloc",
2325 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2326 	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2327 
2328 	SYSCTL_ADD_PROC(ctx, debug_list,
2329 	    OID_AUTO, "switch_config",
2330 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2331 	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2332 
2333 	SYSCTL_ADD_PROC(ctx, debug_list,
2334 	    OID_AUTO, "switch_vlans",
2335 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2336 	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2337 
2338 	SYSCTL_ADD_PROC(ctx, debug_list,
2339 	    OID_AUTO, "rss_key",
2340 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2341 	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2342 
2343 	SYSCTL_ADD_PROC(ctx, debug_list,
2344 	    OID_AUTO, "rss_lut",
2345 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2346 	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2347 
2348 	SYSCTL_ADD_PROC(ctx, debug_list,
2349 	    OID_AUTO, "rss_hena",
2350 	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2351 	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2352 
2353 	SYSCTL_ADD_PROC(ctx, debug_list,
2354 	    OID_AUTO, "disable_fw_link_management",
2355 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2356 	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2357 
2358 	SYSCTL_ADD_PROC(ctx, debug_list,
2359 	    OID_AUTO, "dump_debug_data",
2360 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2361 	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2362 
2363 	SYSCTL_ADD_PROC(ctx, debug_list,
2364 	    OID_AUTO, "do_pf_reset",
2365 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2366 	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2367 
2368 	SYSCTL_ADD_PROC(ctx, debug_list,
2369 	    OID_AUTO, "do_core_reset",
2370 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2371 	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2372 
2373 	SYSCTL_ADD_PROC(ctx, debug_list,
2374 	    OID_AUTO, "do_global_reset",
2375 	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2376 	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2377 
2378 	SYSCTL_ADD_PROC(ctx, debug_list,
2379 	    OID_AUTO, "queue_interrupt_table",
2380 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2381 	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2382 
2383 	if (pf->has_i2c) {
2384 		SYSCTL_ADD_PROC(ctx, debug_list,
2385 		    OID_AUTO, "read_i2c_byte",
2386 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2387 		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2388 
2389 		SYSCTL_ADD_PROC(ctx, debug_list,
2390 		    OID_AUTO, "write_i2c_byte",
2391 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2392 		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2393 
2394 		SYSCTL_ADD_PROC(ctx, debug_list,
2395 		    OID_AUTO, "read_i2c_diag_data",
2396 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2397 		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2398 	}
2399 }
2400 
2401 /*
2402  * Primarily for finding out how many queues can be assigned to VFs,
2403  * at runtime.
2404  */
2405 static int
2406 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2407 {
2408 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2409 	int queues;
2410 
2411 	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2412 
2413 	return sysctl_handle_int(oidp, NULL, queues, req);
2414 }
2415 
2416 static const char *
2417 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2418 {
2419 	const char * link_speed_str[] = {
2420 		"Unknown",
2421 		"100 Mbps",
2422 		"1 Gbps",
2423 		"10 Gbps",
2424 		"40 Gbps",
2425 		"20 Gbps",
2426 		"25 Gbps",
2427 		"2.5 Gbps",
2428 		"5 Gbps"
2429 	};
2430 	int index;
2431 
2432 	switch (link_speed) {
2433 	case I40E_LINK_SPEED_100MB:
2434 		index = 1;
2435 		break;
2436 	case I40E_LINK_SPEED_1GB:
2437 		index = 2;
2438 		break;
2439 	case I40E_LINK_SPEED_10GB:
2440 		index = 3;
2441 		break;
2442 	case I40E_LINK_SPEED_40GB:
2443 		index = 4;
2444 		break;
2445 	case I40E_LINK_SPEED_20GB:
2446 		index = 5;
2447 		break;
2448 	case I40E_LINK_SPEED_25GB:
2449 		index = 6;
2450 		break;
2451 	case I40E_LINK_SPEED_2_5GB:
2452 		index = 7;
2453 		break;
2454 	case I40E_LINK_SPEED_5GB:
2455 		index = 8;
2456 		break;
2457 	case I40E_LINK_SPEED_UNKNOWN:
2458 	default:
2459 		index = 0;
2460 		break;
2461 	}
2462 
2463 	return (link_speed_str[index]);
2464 }
2465 
2466 int
2467 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2468 {
2469 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2470 	struct i40e_hw *hw = &pf->hw;
2471 	int error = 0;
2472 
2473 	ixl_update_link_status(pf);
2474 
2475 	error = sysctl_handle_string(oidp,
2476 	    __DECONST(void *,
2477 		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2478 	    8, req);
2479 
2480 	return (error);
2481 }
2482 
2483 /*
2484  * Converts 8-bit speeds value to and from sysctl flags and
2485  * Admin Queue flags.
2486  */
2487 static u8
2488 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2489 {
2490 #define SPEED_MAP_SIZE 8
2491 	static u16 speedmap[SPEED_MAP_SIZE] = {
2492 		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2493 		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2494 		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2495 		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2496 		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2497 		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2498 		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2499 		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2500 	};
2501 	u8 retval = 0;
2502 
2503 	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2504 		if (to_aq)
2505 			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2506 		else
2507 			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2508 	}
2509 
2510 	return (retval);
2511 }
2512 
2513 int
2514 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2515 {
2516 	struct i40e_hw *hw = &pf->hw;
2517 	device_t dev = pf->dev;
2518 	struct i40e_aq_get_phy_abilities_resp abilities;
2519 	struct i40e_aq_set_phy_config config;
2520 	enum i40e_status_code aq_error = 0;
2521 
2522 	/* Get current capability information */
2523 	aq_error = i40e_aq_get_phy_capabilities(hw,
2524 	    FALSE, FALSE, &abilities, NULL);
2525 	if (aq_error) {
2526 		device_printf(dev,
2527 		    "%s: Error getting phy capabilities %d,"
2528 		    " aq error: %d\n", __func__, aq_error,
2529 		    hw->aq.asq_last_status);
2530 		return (EIO);
2531 	}
2532 
2533 	/* Prepare new config */
2534 	bzero(&config, sizeof(config));
2535 	if (from_aq)
2536 		config.link_speed = speeds;
2537 	else
2538 		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2539 	config.phy_type = abilities.phy_type;
2540 	config.phy_type_ext = abilities.phy_type_ext;
2541 	config.abilities = abilities.abilities
2542 	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2543 	config.eee_capability = abilities.eee_capability;
2544 	config.eeer = abilities.eeer_val;
2545 	config.low_power_ctrl = abilities.d3_lpan;
2546 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2547 	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2548 
2549 	/* Do aq command & restart link */
2550 	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2551 	if (aq_error) {
2552 		device_printf(dev,
2553 		    "%s: Error setting new phy config %d,"
2554 		    " aq error: %d\n", __func__, aq_error,
2555 		    hw->aq.asq_last_status);
2556 		return (EIO);
2557 	}
2558 
2559 	return (0);
2560 }
2561 
2562 /*
2563 ** Supported link speeds
2564 **	Flags:
2565 **	 0x1 - 100 Mb
2566 **	 0x2 - 1G
2567 **	 0x4 - 10G
2568 **	 0x8 - 20G
2569 **	0x10 - 25G
2570 **	0x20 - 40G
2571 **	0x40 - 2.5G
2572 **	0x80 - 5G
2573 */
2574 static int
2575 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2576 {
2577 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2578 	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2579 
2580 	return sysctl_handle_int(oidp, NULL, supported, req);
2581 }
2582 
2583 /*
2584 ** Control link advertise speed:
2585 **	Flags:
2586 **	 0x1 - advertise 100 Mb
2587 **	 0x2 - advertise 1G
2588 **	 0x4 - advertise 10G
2589 **	 0x8 - advertise 20G
2590 **	0x10 - advertise 25G
2591 **	0x20 - advertise 40G
2592 **	0x40 - advertise 2.5G
2593 **	0x80 - advertise 5G
2594 **
2595 **	Set to 0 to disable link
2596 */
2597 int
2598 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2599 {
2600 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2601 	device_t dev = pf->dev;
2602 	u8 converted_speeds;
2603 	int requested_ls = 0;
2604 	int error = 0;
2605 
2606 	/* Read in new mode */
2607 	requested_ls = pf->advertised_speed;
2608 	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2609 	if ((error) || (req->newptr == NULL))
2610 		return (error);
2611 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2612 		device_printf(dev, "Interface is currently in FW recovery mode. "
2613 				"Setting advertise speed not supported\n");
2614 		return (EINVAL);
2615 	}
2616 
2617 	/* Error out if bits outside of possible flag range are set */
2618 	if ((requested_ls & ~((u8)0xFF)) != 0) {
2619 		device_printf(dev, "Input advertised speed out of range; "
2620 		    "valid flags are: 0x%02x\n",
2621 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2622 		return (EINVAL);
2623 	}
2624 
2625 	/* Check if adapter supports input value */
2626 	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2627 	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2628 		device_printf(dev, "Invalid advertised speed; "
2629 		    "valid flags are: 0x%02x\n",
2630 		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2631 		return (EINVAL);
2632 	}
2633 
2634 	error = ixl_set_advertised_speeds(pf, requested_ls, false);
2635 	if (error)
2636 		return (error);
2637 
2638 	pf->advertised_speed = requested_ls;
2639 	ixl_update_link_status(pf);
2640 	return (0);
2641 }
2642 
2643 /*
2644  * Input: bitmap of enum i40e_aq_link_speed
2645  */
2646 u64
2647 ixl_max_aq_speed_to_value(u8 link_speeds)
2648 {
2649 	if (link_speeds & I40E_LINK_SPEED_40GB)
2650 		return IF_Gbps(40);
2651 	if (link_speeds & I40E_LINK_SPEED_25GB)
2652 		return IF_Gbps(25);
2653 	if (link_speeds & I40E_LINK_SPEED_20GB)
2654 		return IF_Gbps(20);
2655 	if (link_speeds & I40E_LINK_SPEED_10GB)
2656 		return IF_Gbps(10);
2657 	if (link_speeds & I40E_LINK_SPEED_5GB)
2658 		return IF_Gbps(5);
2659 	if (link_speeds & I40E_LINK_SPEED_2_5GB)
2660 		return IF_Mbps(2500);
2661 	if (link_speeds & I40E_LINK_SPEED_1GB)
2662 		return IF_Gbps(1);
2663 	if (link_speeds & I40E_LINK_SPEED_100MB)
2664 		return IF_Mbps(100);
2665 	else
2666 		/* Minimum supported link speed */
2667 		return IF_Mbps(100);
2668 }
2669 
2670 /*
2671 ** Get the width and transaction speed of
2672 ** the bus this adapter is plugged into.
2673 */
2674 void
2675 ixl_get_bus_info(struct ixl_pf *pf)
2676 {
2677 	struct i40e_hw *hw = &pf->hw;
2678 	device_t dev = pf->dev;
2679         u16 link;
2680         u32 offset, num_ports;
2681 	u64 max_speed;
2682 
2683 	/* Some devices don't use PCIE */
2684 	if (hw->mac.type == I40E_MAC_X722)
2685 		return;
2686 
2687         /* Read PCI Express Capabilities Link Status Register */
2688         pci_find_cap(dev, PCIY_EXPRESS, &offset);
2689         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2690 
2691 	/* Fill out hw struct with PCIE info */
2692 	i40e_set_pci_config_data(hw, link);
2693 
2694 	/* Use info to print out bandwidth messages */
2695         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2696             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2697             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2698             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2699             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2700             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2701             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2702             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2703             ("Unknown"));
2704 
2705 	/*
2706 	 * If adapter is in slot with maximum supported speed,
2707 	 * no warning message needs to be printed out.
2708 	 */
2709 	if (hw->bus.speed >= i40e_bus_speed_8000
2710 	    && hw->bus.width >= i40e_bus_width_pcie_x8)
2711 		return;
2712 
2713 	num_ports = bitcount32(hw->func_caps.valid_functions);
2714 	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2715 
2716 	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2717                 device_printf(dev, "PCI-Express bandwidth available"
2718                     " for this device may be insufficient for"
2719                     " optimal performance.\n");
2720                 device_printf(dev, "Please move the device to a different"
2721 		    " PCI-e link with more lanes and/or higher"
2722 		    " transfer rate.\n");
2723         }
2724 }
2725 
2726 static int
2727 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2728 {
2729 	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
2730 	struct i40e_hw	*hw = &pf->hw;
2731 	struct sbuf	*sbuf;
2732 
2733 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2734 	ixl_nvm_version_str(hw, sbuf);
2735 	sbuf_finish(sbuf);
2736 	sbuf_delete(sbuf);
2737 
2738 	return (0);
2739 }
2740 
2741 void
2742 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2743 {
2744 	u8 nvma_ptr = nvma->config & 0xFF;
2745 	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2746 	const char * cmd_str;
2747 
2748 	switch (nvma->command) {
2749 	case I40E_NVM_READ:
2750 		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2751 		    nvma->offset == 0 && nvma->data_size == 1) {
2752 			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2753 			return;
2754 		}
2755 		cmd_str = "READ ";
2756 		break;
2757 	case I40E_NVM_WRITE:
2758 		cmd_str = "WRITE";
2759 		break;
2760 	default:
2761 		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2762 		return;
2763 	}
2764 	device_printf(dev,
2765 	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2766 	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2767 }
2768 
2769 int
2770 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2771 {
2772 	struct i40e_hw *hw = &pf->hw;
2773 	struct i40e_nvm_access *nvma;
2774 	device_t dev = pf->dev;
2775 	enum i40e_status_code status = 0;
2776 	size_t nvma_size, ifd_len, exp_len;
2777 	int err, perrno;
2778 
2779 	DEBUGFUNC("ixl_handle_nvmupd_cmd");
2780 
2781 	/* Sanity checks */
2782 	nvma_size = sizeof(struct i40e_nvm_access);
2783 	ifd_len = ifd->ifd_len;
2784 
2785 	if (ifd_len < nvma_size ||
2786 	    ifd->ifd_data == NULL) {
2787 		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
2788 		    __func__);
2789 		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
2790 		    __func__, ifd_len, nvma_size);
2791 		device_printf(dev, "%s: data pointer: %p\n", __func__,
2792 		    ifd->ifd_data);
2793 		return (EINVAL);
2794 	}
2795 
2796 	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
2797 	err = copyin(ifd->ifd_data, nvma, ifd_len);
2798 	if (err) {
2799 		device_printf(dev, "%s: Cannot get request from user space\n",
2800 		    __func__);
2801 		free(nvma, M_IXL);
2802 		return (err);
2803 	}
2804 
2805 	if (pf->dbg_mask & IXL_DBG_NVMUPD)
2806 		ixl_print_nvm_cmd(dev, nvma);
2807 
2808 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
2809 		int count = 0;
2810 		while (count++ < 100) {
2811 			i40e_msec_delay(100);
2812 			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
2813 				break;
2814 		}
2815 	}
2816 
2817 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
2818 		device_printf(dev,
2819 		    "%s: timeout waiting for EMP reset to finish\n",
2820 		    __func__);
2821 		free(nvma, M_IXL);
2822 		return (-EBUSY);
2823 	}
2824 
2825 	if (nvma->data_size < 1 || nvma->data_size > 4096) {
2826 		device_printf(dev,
2827 		    "%s: invalid request, data size not in supported range\n",
2828 		    __func__);
2829 		free(nvma, M_IXL);
2830 		return (EINVAL);
2831 	}
2832 
2833 	/*
2834 	 * Older versions of the NVM update tool don't set ifd_len to the size
2835 	 * of the entire buffer passed to the ioctl. Check the data_size field
2836 	 * in the contained i40e_nvm_access struct and ensure everything is
2837 	 * copied in from userspace.
2838 	 */
2839 	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
2840 
2841 	if (ifd_len < exp_len) {
2842 		ifd_len = exp_len;
2843 		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
2844 		err = copyin(ifd->ifd_data, nvma, ifd_len);
2845 		if (err) {
2846 			device_printf(dev, "%s: Cannot get request from user space\n",
2847 					__func__);
2848 			free(nvma, M_IXL);
2849 			return (err);
2850 		}
2851 	}
2852 
2853 	// TODO: Might need a different lock here
2854 	// IXL_PF_LOCK(pf);
2855 	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
2856 	// IXL_PF_UNLOCK(pf);
2857 
2858 	err = copyout(nvma, ifd->ifd_data, ifd_len);
2859 	free(nvma, M_IXL);
2860 	if (err) {
2861 		device_printf(dev, "%s: Cannot return data to user space\n",
2862 				__func__);
2863 		return (err);
2864 	}
2865 
2866 	/* Let the nvmupdate report errors, show them only when debug is enabled */
2867 	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
2868 		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
2869 		    i40e_stat_str(hw, status), perrno);
2870 
2871 	/*
2872 	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
2873 	 * to run this ioctl again. So use -EACCES for -EPERM instead.
2874 	 */
2875 	if (perrno == -EPERM)
2876 		return (-EACCES);
2877 	else
2878 		return (perrno);
2879 }
2880 
2881 int
2882 ixl_find_i2c_interface(struct ixl_pf *pf)
2883 {
2884 	struct i40e_hw *hw = &pf->hw;
2885 	bool i2c_en, port_matched;
2886 	u32 reg;
2887 
2888 	for (int i = 0; i < 4; i++) {
2889 		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
2890 		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
2891 		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
2892 		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
2893 		    & BIT(hw->port);
2894 		if (i2c_en && port_matched)
2895 			return (i);
2896 	}
2897 
2898 	return (-1);
2899 }
2900 
2901 static char *
2902 ixl_phy_type_string(u32 bit_pos, bool ext)
2903 {
2904 	static char * phy_types_str[32] = {
2905 		"SGMII",
2906 		"1000BASE-KX",
2907 		"10GBASE-KX4",
2908 		"10GBASE-KR",
2909 		"40GBASE-KR4",
2910 		"XAUI",
2911 		"XFI",
2912 		"SFI",
2913 		"XLAUI",
2914 		"XLPPI",
2915 		"40GBASE-CR4",
2916 		"10GBASE-CR1",
2917 		"SFP+ Active DA",
2918 		"QSFP+ Active DA",
2919 		"Reserved (14)",
2920 		"Reserved (15)",
2921 		"Reserved (16)",
2922 		"100BASE-TX",
2923 		"1000BASE-T",
2924 		"10GBASE-T",
2925 		"10GBASE-SR",
2926 		"10GBASE-LR",
2927 		"10GBASE-SFP+Cu",
2928 		"10GBASE-CR1",
2929 		"40GBASE-CR4",
2930 		"40GBASE-SR4",
2931 		"40GBASE-LR4",
2932 		"1000BASE-SX",
2933 		"1000BASE-LX",
2934 		"1000BASE-T Optical",
2935 		"20GBASE-KR2",
2936 		"Reserved (31)"
2937 	};
2938 	static char * ext_phy_types_str[8] = {
2939 		"25GBASE-KR",
2940 		"25GBASE-CR",
2941 		"25GBASE-SR",
2942 		"25GBASE-LR",
2943 		"25GBASE-AOC",
2944 		"25GBASE-ACC",
2945 		"2.5GBASE-T",
2946 		"5GBASE-T"
2947 	};
2948 
2949 	if (ext && bit_pos > 7) return "Invalid_Ext";
2950 	if (bit_pos > 31) return "Invalid";
2951 
2952 	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
2953 }
2954 
2955 /* TODO: ERJ: I don't this is necessary anymore. */
2956 int
2957 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
2958 {
2959 	device_t dev = pf->dev;
2960 	struct i40e_hw *hw = &pf->hw;
2961 	struct i40e_aq_desc desc;
2962 	enum i40e_status_code status;
2963 
2964 	struct i40e_aqc_get_link_status *aq_link_status =
2965 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
2966 
2967 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
2968 	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
2969 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
2970 	if (status) {
2971 		device_printf(dev,
2972 		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
2973 		    __func__, i40e_stat_str(hw, status),
2974 		    i40e_aq_str(hw, hw->aq.asq_last_status));
2975 		return (EIO);
2976 	}
2977 
2978 	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
2979 	return (0);
2980 }
2981 
2982 static char *
2983 ixl_phy_type_string_ls(u8 val)
2984 {
2985 	if (val >= 0x1F)
2986 		return ixl_phy_type_string(val - 0x1F, true);
2987 	else
2988 		return ixl_phy_type_string(val, false);
2989 }
2990 
2991 static int
2992 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
2993 {
2994 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2995 	device_t dev = pf->dev;
2996 	struct sbuf *buf;
2997 	int error = 0;
2998 
2999 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3000 	if (!buf) {
3001 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3002 		return (ENOMEM);
3003 	}
3004 
3005 	struct i40e_aqc_get_link_status link_status;
3006 	error = ixl_aq_get_link_status(pf, &link_status);
3007 	if (error) {
3008 		sbuf_delete(buf);
3009 		return (error);
3010 	}
3011 
3012 	sbuf_printf(buf, "\n"
3013 	    "PHY Type : 0x%02x<%s>\n"
3014 	    "Speed    : 0x%02x\n"
3015 	    "Link info: 0x%02x\n"
3016 	    "AN info  : 0x%02x\n"
3017 	    "Ext info : 0x%02x\n"
3018 	    "Loopback : 0x%02x\n"
3019 	    "Max Frame: %d\n"
3020 	    "Config   : 0x%02x\n"
3021 	    "Power    : 0x%02x",
3022 	    link_status.phy_type,
3023 	    ixl_phy_type_string_ls(link_status.phy_type),
3024 	    link_status.link_speed,
3025 	    link_status.link_info,
3026 	    link_status.an_info,
3027 	    link_status.ext_info,
3028 	    link_status.loopback,
3029 	    link_status.max_frame_size,
3030 	    link_status.config,
3031 	    link_status.power_desc);
3032 
3033 	error = sbuf_finish(buf);
3034 	if (error)
3035 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3036 
3037 	sbuf_delete(buf);
3038 	return (error);
3039 }
3040 
3041 static int
3042 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3043 {
3044 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3045 	struct i40e_hw *hw = &pf->hw;
3046 	device_t dev = pf->dev;
3047 	enum i40e_status_code status;
3048 	struct i40e_aq_get_phy_abilities_resp abilities;
3049 	struct sbuf *buf;
3050 	int error = 0;
3051 
3052 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3053 	if (!buf) {
3054 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3055 		return (ENOMEM);
3056 	}
3057 
3058 	status = i40e_aq_get_phy_capabilities(hw,
3059 	    FALSE, FALSE, &abilities, NULL);
3060 	if (status) {
3061 		device_printf(dev,
3062 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3063 		    __func__, i40e_stat_str(hw, status),
3064 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3065 		sbuf_delete(buf);
3066 		return (EIO);
3067 	}
3068 
3069 	sbuf_printf(buf, "\n"
3070 	    "PHY Type : %08x",
3071 	    abilities.phy_type);
3072 
3073 	if (abilities.phy_type != 0) {
3074 		sbuf_printf(buf, "<");
3075 		for (int i = 0; i < 32; i++)
3076 			if ((1 << i) & abilities.phy_type)
3077 				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3078 		sbuf_printf(buf, ">");
3079 	}
3080 
3081 	sbuf_printf(buf, "\nPHY Ext  : %02x",
3082 	    abilities.phy_type_ext);
3083 
3084 	if (abilities.phy_type_ext != 0) {
3085 		sbuf_printf(buf, "<");
3086 		for (int i = 0; i < 4; i++)
3087 			if ((1 << i) & abilities.phy_type_ext)
3088 				sbuf_printf(buf, "%s,",
3089 				    ixl_phy_type_string(i, true));
3090 		sbuf_printf(buf, ">");
3091 	}
3092 
3093 	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3094 	if (abilities.link_speed != 0) {
3095 		u8 link_speed;
3096 		sbuf_printf(buf, " <");
3097 		for (int i = 0; i < 8; i++) {
3098 			link_speed = (1 << i) & abilities.link_speed;
3099 			if (link_speed)
3100 				sbuf_printf(buf, "%s, ",
3101 				    ixl_link_speed_string(link_speed));
3102 		}
3103 		sbuf_printf(buf, ">");
3104 	}
3105 
3106 	sbuf_printf(buf, "\n"
3107 	    "Abilities: %02x\n"
3108 	    "EEE cap  : %04x\n"
3109 	    "EEER reg : %08x\n"
3110 	    "D3 Lpan  : %02x\n"
3111 	    "ID       : %02x %02x %02x %02x\n"
3112 	    "ModType  : %02x %02x %02x\n"
3113 	    "ModType E: %01x\n"
3114 	    "FEC Cfg  : %02x\n"
3115 	    "Ext CC   : %02x",
3116 	    abilities.abilities, abilities.eee_capability,
3117 	    abilities.eeer_val, abilities.d3_lpan,
3118 	    abilities.phy_id[0], abilities.phy_id[1],
3119 	    abilities.phy_id[2], abilities.phy_id[3],
3120 	    abilities.module_type[0], abilities.module_type[1],
3121 	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3122 	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3123 	    abilities.ext_comp_code);
3124 
3125 	error = sbuf_finish(buf);
3126 	if (error)
3127 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3128 
3129 	sbuf_delete(buf);
3130 	return (error);
3131 }
3132 
3133 static int
3134 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3135 {
3136 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3137 	struct ixl_vsi *vsi = &pf->vsi;
3138 	struct ixl_mac_filter *f;
3139 	device_t dev = pf->dev;
3140 	int error = 0, ftl_len = 0, ftl_counter = 0;
3141 
3142 	struct sbuf *buf;
3143 
3144 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3145 	if (!buf) {
3146 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3147 		return (ENOMEM);
3148 	}
3149 
3150 	sbuf_printf(buf, "\n");
3151 
3152 	/* Print MAC filters */
3153 	sbuf_printf(buf, "PF Filters:\n");
3154 	SLIST_FOREACH(f, &vsi->ftl, next)
3155 		ftl_len++;
3156 
3157 	if (ftl_len < 1)
3158 		sbuf_printf(buf, "(none)\n");
3159 	else {
3160 		SLIST_FOREACH(f, &vsi->ftl, next) {
3161 			sbuf_printf(buf,
3162 			    MAC_FORMAT ", vlan %4d, flags %#06x",
3163 			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3164 			/* don't print '\n' for last entry */
3165 			if (++ftl_counter != ftl_len)
3166 				sbuf_printf(buf, "\n");
3167 		}
3168 	}
3169 
3170 #ifdef PCI_IOV
3171 	/* TODO: Give each VF its own filter list sysctl */
3172 	struct ixl_vf *vf;
3173 	if (pf->num_vfs > 0) {
3174 		sbuf_printf(buf, "\n\n");
3175 		for (int i = 0; i < pf->num_vfs; i++) {
3176 			vf = &pf->vfs[i];
3177 			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3178 				continue;
3179 
3180 			vsi = &vf->vsi;
3181 			ftl_len = 0, ftl_counter = 0;
3182 			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3183 			SLIST_FOREACH(f, &vsi->ftl, next)
3184 				ftl_len++;
3185 
3186 			if (ftl_len < 1)
3187 				sbuf_printf(buf, "(none)\n");
3188 			else {
3189 				SLIST_FOREACH(f, &vsi->ftl, next) {
3190 					sbuf_printf(buf,
3191 					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3192 					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3193 				}
3194 			}
3195 		}
3196 	}
3197 #endif
3198 
3199 	error = sbuf_finish(buf);
3200 	if (error)
3201 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3202 	sbuf_delete(buf);
3203 
3204 	return (error);
3205 }
3206 
3207 #define IXL_SW_RES_SIZE 0x14
3208 int
3209 ixl_res_alloc_cmp(const void *a, const void *b)
3210 {
3211 	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3212 	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3213 	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3214 
3215 	return ((int)one->resource_type - (int)two->resource_type);
3216 }
3217 
3218 /*
3219  * Longest string length: 25
3220  */
3221 const char *
3222 ixl_switch_res_type_string(u8 type)
3223 {
3224 	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3225 		"VEB",
3226 		"VSI",
3227 		"Perfect Match MAC address",
3228 		"S-tag",
3229 		"(Reserved)",
3230 		"Multicast hash entry",
3231 		"Unicast hash entry",
3232 		"VLAN",
3233 		"VSI List entry",
3234 		"(Reserved)",
3235 		"VLAN Statistic Pool",
3236 		"Mirror Rule",
3237 		"Queue Set",
3238 		"Inner VLAN Forward filter",
3239 		"(Reserved)",
3240 		"Inner MAC",
3241 		"IP",
3242 		"GRE/VN1 Key",
3243 		"VN2 Key",
3244 		"Tunneling Port"
3245 	};
3246 
3247 	if (type < IXL_SW_RES_SIZE)
3248 		return ixl_switch_res_type_strings[type];
3249 	else
3250 		return "(Reserved)";
3251 }
3252 
3253 static int
3254 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3255 {
3256 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3257 	struct i40e_hw *hw = &pf->hw;
3258 	device_t dev = pf->dev;
3259 	struct sbuf *buf;
3260 	enum i40e_status_code status;
3261 	int error = 0;
3262 
3263 	u8 num_entries;
3264 	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3265 
3266 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3267 	if (!buf) {
3268 		device_printf(dev, "Could not allocate sbuf for output.\n");
3269 		return (ENOMEM);
3270 	}
3271 
3272 	bzero(resp, sizeof(resp));
3273 	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3274 				resp,
3275 				IXL_SW_RES_SIZE,
3276 				NULL);
3277 	if (status) {
3278 		device_printf(dev,
3279 		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3280 		    __func__, i40e_stat_str(hw, status),
3281 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3282 		sbuf_delete(buf);
3283 		return (error);
3284 	}
3285 
3286 	/* Sort entries by type for display */
3287 	qsort(resp, num_entries,
3288 	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3289 	    &ixl_res_alloc_cmp);
3290 
3291 	sbuf_cat(buf, "\n");
3292 	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3293 	sbuf_printf(buf,
3294 	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3295 	    "                          | (this)     | (all) | (this) | (all)       \n");
3296 	for (int i = 0; i < num_entries; i++) {
3297 		sbuf_printf(buf,
3298 		    "%25s | %10d   %5d   %6d   %12d",
3299 		    ixl_switch_res_type_string(resp[i].resource_type),
3300 		    resp[i].guaranteed,
3301 		    resp[i].total,
3302 		    resp[i].used,
3303 		    resp[i].total_unalloced);
3304 		if (i < num_entries - 1)
3305 			sbuf_cat(buf, "\n");
3306 	}
3307 
3308 	error = sbuf_finish(buf);
3309 	if (error)
3310 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3311 
3312 	sbuf_delete(buf);
3313 	return (error);
3314 }
3315 
3316 enum ixl_sw_seid_offset {
3317 	IXL_SW_SEID_EMP = 1,
3318 	IXL_SW_SEID_MAC_START = 2,
3319 	IXL_SW_SEID_MAC_END = 5,
3320 	IXL_SW_SEID_PF_START = 16,
3321 	IXL_SW_SEID_PF_END = 31,
3322 	IXL_SW_SEID_VF_START = 32,
3323 	IXL_SW_SEID_VF_END = 159,
3324 };
3325 
3326 /*
3327  * Caller must init and delete sbuf; this function will clear and
3328  * finish it for caller.
3329  *
3330  * Note: The SEID argument only applies for elements defined by FW at
3331  * power-on; these include the EMP, Ports, PFs and VFs.
3332  */
3333 static char *
3334 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3335 {
3336 	sbuf_clear(s);
3337 
3338 	/* If SEID is in certain ranges, then we can infer the
3339 	 * mapping of SEID to switch element.
3340 	 */
3341 	if (seid == IXL_SW_SEID_EMP) {
3342 		sbuf_cat(s, "EMP");
3343 		goto out;
3344 	} else if (seid >= IXL_SW_SEID_MAC_START &&
3345 	    seid <= IXL_SW_SEID_MAC_END) {
3346 		sbuf_printf(s, "MAC  %2d",
3347 		    seid - IXL_SW_SEID_MAC_START);
3348 		goto out;
3349 	} else if (seid >= IXL_SW_SEID_PF_START &&
3350 	    seid <= IXL_SW_SEID_PF_END) {
3351 		sbuf_printf(s, "PF  %3d",
3352 		    seid - IXL_SW_SEID_PF_START);
3353 		goto out;
3354 	} else if (seid >= IXL_SW_SEID_VF_START &&
3355 	    seid <= IXL_SW_SEID_VF_END) {
3356 		sbuf_printf(s, "VF  %3d",
3357 		    seid - IXL_SW_SEID_VF_START);
3358 		goto out;
3359 	}
3360 
3361 	switch (element_type) {
3362 	case I40E_AQ_SW_ELEM_TYPE_BMC:
3363 		sbuf_cat(s, "BMC");
3364 		break;
3365 	case I40E_AQ_SW_ELEM_TYPE_PV:
3366 		sbuf_cat(s, "PV");
3367 		break;
3368 	case I40E_AQ_SW_ELEM_TYPE_VEB:
3369 		sbuf_cat(s, "VEB");
3370 		break;
3371 	case I40E_AQ_SW_ELEM_TYPE_PA:
3372 		sbuf_cat(s, "PA");
3373 		break;
3374 	case I40E_AQ_SW_ELEM_TYPE_VSI:
3375 		sbuf_printf(s, "VSI");
3376 		break;
3377 	default:
3378 		sbuf_cat(s, "?");
3379 		break;
3380 	}
3381 
3382 out:
3383 	sbuf_finish(s);
3384 	return sbuf_data(s);
3385 }
3386 
3387 static int
3388 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3389 {
3390 	const struct i40e_aqc_switch_config_element_resp *one, *two;
3391 	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3392 	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3393 
3394 	return ((int)one->seid - (int)two->seid);
3395 }
3396 
3397 static int
3398 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3399 {
3400 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3401 	struct i40e_hw *hw = &pf->hw;
3402 	device_t dev = pf->dev;
3403 	struct sbuf *buf;
3404 	struct sbuf *nmbuf;
3405 	enum i40e_status_code status;
3406 	int error = 0;
3407 	u16 next = 0;
3408 	u8 aq_buf[I40E_AQ_LARGE_BUF];
3409 
3410 	struct i40e_aqc_switch_config_element_resp *elem;
3411 	struct i40e_aqc_get_switch_config_resp *sw_config;
3412 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3413 
3414 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3415 	if (!buf) {
3416 		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3417 		return (ENOMEM);
3418 	}
3419 
3420 	status = i40e_aq_get_switch_config(hw, sw_config,
3421 	    sizeof(aq_buf), &next, NULL);
3422 	if (status) {
3423 		device_printf(dev,
3424 		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3425 		    __func__, i40e_stat_str(hw, status),
3426 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3427 		sbuf_delete(buf);
3428 		return error;
3429 	}
3430 	if (next)
3431 		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3432 		    __func__, next);
3433 
3434 	nmbuf = sbuf_new_auto();
3435 	if (!nmbuf) {
3436 		device_printf(dev, "Could not allocate sbuf for name output.\n");
3437 		sbuf_delete(buf);
3438 		return (ENOMEM);
3439 	}
3440 
3441 	/* Sort entries by SEID for display */
3442 	qsort(sw_config->element, sw_config->header.num_reported,
3443 	    sizeof(struct i40e_aqc_switch_config_element_resp),
3444 	    &ixl_sw_cfg_elem_seid_cmp);
3445 
3446 	sbuf_cat(buf, "\n");
3447 	/* Assuming <= 255 elements in switch */
3448 	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3449 	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3450 	/* Exclude:
3451 	 * Revision -- all elements are revision 1 for now
3452 	 */
3453 	sbuf_printf(buf,
3454 	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3455 	    "                |                 |                 | (uplink)\n");
3456 	for (int i = 0; i < sw_config->header.num_reported; i++) {
3457 		elem = &sw_config->element[i];
3458 
3459 		// "%4d (%8s) | %8s   %8s   %#8x",
3460 		sbuf_printf(buf, "%4d", elem->seid);
3461 		sbuf_cat(buf, " ");
3462 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3463 		    elem->element_type, elem->seid));
3464 		sbuf_cat(buf, " | ");
3465 		sbuf_printf(buf, "%4d", elem->uplink_seid);
3466 		sbuf_cat(buf, " ");
3467 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3468 		    0, elem->uplink_seid));
3469 		sbuf_cat(buf, " | ");
3470 		sbuf_printf(buf, "%4d", elem->downlink_seid);
3471 		sbuf_cat(buf, " ");
3472 		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3473 		    0, elem->downlink_seid));
3474 		sbuf_cat(buf, " | ");
3475 		sbuf_printf(buf, "%8d", elem->connection_type);
3476 		if (i < sw_config->header.num_reported - 1)
3477 			sbuf_cat(buf, "\n");
3478 	}
3479 	sbuf_delete(nmbuf);
3480 
3481 	error = sbuf_finish(buf);
3482 	if (error)
3483 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3484 
3485 	sbuf_delete(buf);
3486 
3487 	return (error);
3488 }
3489 
3490 static int
3491 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3492 {
3493 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3494 	struct i40e_hw *hw = &pf->hw;
3495 	device_t dev = pf->dev;
3496 	int requested_vlan = -1;
3497 	enum i40e_status_code status = 0;
3498 	int error = 0;
3499 
3500 	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3501 	if ((error) || (req->newptr == NULL))
3502 	    return (error);
3503 
3504 	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3505 		device_printf(dev, "Flags disallow setting of vlans\n");
3506 		return (ENODEV);
3507 	}
3508 
3509 	hw->switch_tag = requested_vlan;
3510 	device_printf(dev,
3511 	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3512 	    hw->switch_tag, hw->first_tag, hw->second_tag);
3513 	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3514 	if (status) {
3515 		device_printf(dev,
3516 		    "%s: aq_set_switch_config() error %s, aq error %s\n",
3517 		    __func__, i40e_stat_str(hw, status),
3518 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3519 		return (status);
3520 	}
3521 	return (0);
3522 }
3523 
3524 static int
3525 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3526 {
3527 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3528 	struct i40e_hw *hw = &pf->hw;
3529 	device_t dev = pf->dev;
3530 	struct sbuf *buf;
3531 	int error = 0;
3532 	enum i40e_status_code status;
3533 	u32 reg;
3534 
3535 	struct i40e_aqc_get_set_rss_key_data key_data;
3536 
3537 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3538 	if (!buf) {
3539 		device_printf(dev, "Could not allocate sbuf for output.\n");
3540 		return (ENOMEM);
3541 	}
3542 
3543 	bzero(&key_data, sizeof(key_data));
3544 
3545 	sbuf_cat(buf, "\n");
3546 	if (hw->mac.type == I40E_MAC_X722) {
3547 		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3548 		if (status)
3549 			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3550 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3551 	} else {
3552 		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3553 			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3554 			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3555 		}
3556 	}
3557 
3558 	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3559 
3560 	error = sbuf_finish(buf);
3561 	if (error)
3562 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3563 	sbuf_delete(buf);
3564 
3565 	return (error);
3566 }
3567 
3568 static void
3569 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3570 {
3571 	int i, j, k, width;
3572 	char c;
3573 
3574 	if (length < 1 || buf == NULL) return;
3575 
3576 	int byte_stride = 16;
3577 	int lines = length / byte_stride;
3578 	int rem = length % byte_stride;
3579 	if (rem > 0)
3580 		lines++;
3581 
3582 	for (i = 0; i < lines; i++) {
3583 		width = (rem > 0 && i == lines - 1)
3584 		    ? rem : byte_stride;
3585 
3586 		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3587 
3588 		for (j = 0; j < width; j++)
3589 			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3590 
3591 		if (width < byte_stride) {
3592 			for (k = 0; k < (byte_stride - width); k++)
3593 				sbuf_printf(sb, "   ");
3594 		}
3595 
3596 		if (!text) {
3597 			sbuf_printf(sb, "\n");
3598 			continue;
3599 		}
3600 
3601 		for (j = 0; j < width; j++) {
3602 			c = (char)buf[i * byte_stride + j];
3603 			if (c < 32 || c > 126)
3604 				sbuf_printf(sb, ".");
3605 			else
3606 				sbuf_printf(sb, "%c", c);
3607 
3608 			if (j == width - 1)
3609 				sbuf_printf(sb, "\n");
3610 		}
3611 	}
3612 }
3613 
3614 static int
3615 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3616 {
3617 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3618 	struct i40e_hw *hw = &pf->hw;
3619 	device_t dev = pf->dev;
3620 	struct sbuf *buf;
3621 	int error = 0;
3622 	enum i40e_status_code status;
3623 	u8 hlut[512];
3624 	u32 reg;
3625 
3626 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3627 	if (!buf) {
3628 		device_printf(dev, "Could not allocate sbuf for output.\n");
3629 		return (ENOMEM);
3630 	}
3631 
3632 	bzero(hlut, sizeof(hlut));
3633 	sbuf_cat(buf, "\n");
3634 	if (hw->mac.type == I40E_MAC_X722) {
3635 		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3636 		if (status)
3637 			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3638 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3639 	} else {
3640 		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3641 			reg = rd32(hw, I40E_PFQF_HLUT(i));
3642 			bcopy(&reg, &hlut[i << 2], 4);
3643 		}
3644 	}
3645 	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3646 
3647 	error = sbuf_finish(buf);
3648 	if (error)
3649 		device_printf(dev, "Error finishing sbuf: %d\n", error);
3650 	sbuf_delete(buf);
3651 
3652 	return (error);
3653 }
3654 
3655 static int
3656 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3657 {
3658 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3659 	struct i40e_hw *hw = &pf->hw;
3660 	u64 hena;
3661 
3662 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3663 	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3664 
3665 	return sysctl_handle_long(oidp, NULL, hena, req);
3666 }
3667 
3668 /*
3669  * Sysctl to disable firmware's link management
3670  *
3671  * 1 - Disable link management on this port
3672  * 0 - Re-enable link management
3673  *
3674  * On normal NVMs, firmware manages link by default.
3675  */
3676 static int
3677 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3678 {
3679 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3680 	struct i40e_hw *hw = &pf->hw;
3681 	device_t dev = pf->dev;
3682 	int requested_mode = -1;
3683 	enum i40e_status_code status = 0;
3684 	int error = 0;
3685 
3686 	/* Read in new mode */
3687 	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
3688 	if ((error) || (req->newptr == NULL))
3689 		return (error);
3690 	/* Check for sane value */
3691 	if (requested_mode < 0 || requested_mode > 1) {
3692 		device_printf(dev, "Valid modes are 0 or 1\n");
3693 		return (EINVAL);
3694 	}
3695 
3696 	/* Set new mode */
3697 	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
3698 	if (status) {
3699 		device_printf(dev,
3700 		    "%s: Error setting new phy debug mode %s,"
3701 		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
3702 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3703 		return (EIO);
3704 	}
3705 
3706 	return (0);
3707 }
3708 
3709 /*
3710  * Read some diagnostic data from a (Q)SFP+ module
3711  *
3712  *             SFP A2   QSFP Lower Page
3713  * Temperature 96-97	22-23
3714  * Vcc         98-99    26-27
3715  * TX power    102-103  34-35..40-41
3716  * RX power    104-105  50-51..56-57
3717  */
3718 static int
3719 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
3720 {
3721 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3722 	device_t dev = pf->dev;
3723 	struct sbuf *sbuf;
3724 	int error = 0;
3725 	u8 output;
3726 
3727 	if (req->oldptr == NULL) {
3728 		error = SYSCTL_OUT(req, 0, 128);
3729 		return (0);
3730 	}
3731 
3732 	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
3733 	if (error) {
3734 		device_printf(dev, "Error reading from i2c\n");
3735 		return (error);
3736 	}
3737 
3738 	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
3739 	if (output == 0x3) {
3740 		/*
3741 		 * Check for:
3742 		 * - Internally calibrated data
3743 		 * - Diagnostic monitoring is implemented
3744 		 */
3745 		pf->read_i2c_byte(pf, 92, 0xA0, &output);
3746 		if (!(output & 0x60)) {
3747 			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
3748 			return (0);
3749 		}
3750 
3751 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3752 
3753 		for (u8 offset = 96; offset < 100; offset++) {
3754 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
3755 			sbuf_printf(sbuf, "%02X ", output);
3756 		}
3757 		for (u8 offset = 102; offset < 106; offset++) {
3758 			pf->read_i2c_byte(pf, offset, 0xA2, &output);
3759 			sbuf_printf(sbuf, "%02X ", output);
3760 		}
3761 	} else if (output == 0xD || output == 0x11) {
3762 		/*
3763 		 * QSFP+ modules are always internally calibrated, and must indicate
3764 		 * what types of diagnostic monitoring are implemented
3765 		 */
3766 		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3767 
3768 		for (u8 offset = 22; offset < 24; offset++) {
3769 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3770 			sbuf_printf(sbuf, "%02X ", output);
3771 		}
3772 		for (u8 offset = 26; offset < 28; offset++) {
3773 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3774 			sbuf_printf(sbuf, "%02X ", output);
3775 		}
3776 		/* Read the data from the first lane */
3777 		for (u8 offset = 34; offset < 36; offset++) {
3778 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3779 			sbuf_printf(sbuf, "%02X ", output);
3780 		}
3781 		for (u8 offset = 50; offset < 52; offset++) {
3782 			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3783 			sbuf_printf(sbuf, "%02X ", output);
3784 		}
3785 	} else {
3786 		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
3787 		return (0);
3788 	}
3789 
3790 	sbuf_finish(sbuf);
3791 	sbuf_delete(sbuf);
3792 
3793 	return (0);
3794 }
3795 
3796 /*
3797  * Sysctl to read a byte from I2C bus.
3798  *
3799  * Input: 32-bit value:
3800  * 	bits 0-7:   device address (0xA0 or 0xA2)
3801  * 	bits 8-15:  offset (0-255)
3802  *	bits 16-31: unused
3803  * Output: 8-bit value read
3804  */
3805 static int
3806 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
3807 {
3808 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3809 	device_t dev = pf->dev;
3810 	int input = -1, error = 0;
3811 	u8 dev_addr, offset, output;
3812 
3813 	/* Read in I2C read parameters */
3814 	error = sysctl_handle_int(oidp, &input, 0, req);
3815 	if ((error) || (req->newptr == NULL))
3816 		return (error);
3817 	/* Validate device address */
3818 	dev_addr = input & 0xFF;
3819 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
3820 		return (EINVAL);
3821 	}
3822 	offset = (input >> 8) & 0xFF;
3823 
3824 	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
3825 	if (error)
3826 		return (error);
3827 
3828 	device_printf(dev, "%02X\n", output);
3829 	return (0);
3830 }
3831 
3832 /*
3833  * Sysctl to write a byte to the I2C bus.
3834  *
3835  * Input: 32-bit value:
3836  * 	bits 0-7:   device address (0xA0 or 0xA2)
3837  * 	bits 8-15:  offset (0-255)
3838  *	bits 16-23: value to write
3839  *	bits 24-31: unused
3840  * Output: 8-bit value written
3841  */
3842 static int
3843 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
3844 {
3845 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3846 	device_t dev = pf->dev;
3847 	int input = -1, error = 0;
3848 	u8 dev_addr, offset, value;
3849 
3850 	/* Read in I2C write parameters */
3851 	error = sysctl_handle_int(oidp, &input, 0, req);
3852 	if ((error) || (req->newptr == NULL))
3853 		return (error);
3854 	/* Validate device address */
3855 	dev_addr = input & 0xFF;
3856 	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
3857 		return (EINVAL);
3858 	}
3859 	offset = (input >> 8) & 0xFF;
3860 	value = (input >> 16) & 0xFF;
3861 
3862 	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
3863 	if (error)
3864 		return (error);
3865 
3866 	device_printf(dev, "%02X written\n", value);
3867 	return (0);
3868 }
3869 
3870 static int
3871 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
3872     u8 bit_pos, int *is_set)
3873 {
3874 	device_t dev = pf->dev;
3875 	struct i40e_hw *hw = &pf->hw;
3876 	enum i40e_status_code status;
3877 
3878 	if (IXL_PF_IN_RECOVERY_MODE(pf))
3879 		return (EIO);
3880 
3881 	status = i40e_aq_get_phy_capabilities(hw,
3882 	    FALSE, FALSE, abilities, NULL);
3883 	if (status) {
3884 		device_printf(dev,
3885 		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3886 		    __func__, i40e_stat_str(hw, status),
3887 		    i40e_aq_str(hw, hw->aq.asq_last_status));
3888 		return (EIO);
3889 	}
3890 
3891 	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
3892 	return (0);
3893 }
3894 
3895 static int
3896 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
3897     u8 bit_pos, int set)
3898 {
3899 	device_t dev = pf->dev;
3900 	struct i40e_hw *hw = &pf->hw;
3901 	struct i40e_aq_set_phy_config config;
3902 	enum i40e_status_code status;
3903 
3904 	/* Set new PHY config */
3905 	memset(&config, 0, sizeof(config));
3906 	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
3907 	if (set)
3908 		config.fec_config |= bit_pos;
3909 	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
3910 		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3911 		config.phy_type = abilities->phy_type;
3912 		config.phy_type_ext = abilities->phy_type_ext;
3913 		config.link_speed = abilities->link_speed;
3914 		config.eee_capability = abilities->eee_capability;
3915 		config.eeer = abilities->eeer_val;
3916 		config.low_power_ctrl = abilities->d3_lpan;
3917 		status = i40e_aq_set_phy_config(hw, &config, NULL);
3918 
3919 		if (status) {
3920 			device_printf(dev,
3921 			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
3922 			    __func__, i40e_stat_str(hw, status),
3923 			    i40e_aq_str(hw, hw->aq.asq_last_status));
3924 			return (EIO);
3925 		}
3926 	}
3927 
3928 	return (0);
3929 }
3930 
3931 static int
3932 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
3933 {
3934 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3935 	int mode, error = 0;
3936 
3937 	struct i40e_aq_get_phy_abilities_resp abilities;
3938 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
3939 	if (error)
3940 		return (error);
3941 	/* Read in new mode */
3942 	error = sysctl_handle_int(oidp, &mode, 0, req);
3943 	if ((error) || (req->newptr == NULL))
3944 		return (error);
3945 
3946 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
3947 }
3948 
3949 static int
3950 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
3951 {
3952 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3953 	int mode, error = 0;
3954 
3955 	struct i40e_aq_get_phy_abilities_resp abilities;
3956 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
3957 	if (error)
3958 		return (error);
3959 	/* Read in new mode */
3960 	error = sysctl_handle_int(oidp, &mode, 0, req);
3961 	if ((error) || (req->newptr == NULL))
3962 		return (error);
3963 
3964 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
3965 }
3966 
3967 static int
3968 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
3969 {
3970 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3971 	int mode, error = 0;
3972 
3973 	struct i40e_aq_get_phy_abilities_resp abilities;
3974 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
3975 	if (error)
3976 		return (error);
3977 	/* Read in new mode */
3978 	error = sysctl_handle_int(oidp, &mode, 0, req);
3979 	if ((error) || (req->newptr == NULL))
3980 		return (error);
3981 
3982 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
3983 }
3984 
3985 static int
3986 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
3987 {
3988 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3989 	int mode, error = 0;
3990 
3991 	struct i40e_aq_get_phy_abilities_resp abilities;
3992 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
3993 	if (error)
3994 		return (error);
3995 	/* Read in new mode */
3996 	error = sysctl_handle_int(oidp, &mode, 0, req);
3997 	if ((error) || (req->newptr == NULL))
3998 		return (error);
3999 
4000 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4001 }
4002 
4003 static int
4004 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4005 {
4006 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4007 	int mode, error = 0;
4008 
4009 	struct i40e_aq_get_phy_abilities_resp abilities;
4010 	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4011 	if (error)
4012 		return (error);
4013 	/* Read in new mode */
4014 	error = sysctl_handle_int(oidp, &mode, 0, req);
4015 	if ((error) || (req->newptr == NULL))
4016 		return (error);
4017 
4018 	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4019 }
4020 
4021 static int
4022 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4023 {
4024 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4025 	struct i40e_hw *hw = &pf->hw;
4026 	device_t dev = pf->dev;
4027 	struct sbuf *buf;
4028 	int error = 0;
4029 	enum i40e_status_code status;
4030 
4031 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4032 	if (!buf) {
4033 		device_printf(dev, "Could not allocate sbuf for output.\n");
4034 		return (ENOMEM);
4035 	}
4036 
4037 	u8 *final_buff;
4038 	/* This amount is only necessary if reading the entire cluster into memory */
4039 #define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4040 	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT);
4041 	if (final_buff == NULL) {
4042 		device_printf(dev, "Could not allocate memory for output.\n");
4043 		goto out;
4044 	}
4045 	int final_buff_len = 0;
4046 
4047 	u8 cluster_id = 1;
4048 	bool more = true;
4049 
4050 	u8 dump_buf[4096];
4051 	u16 curr_buff_size = 4096;
4052 	u8 curr_next_table = 0;
4053 	u32 curr_next_index = 0;
4054 
4055 	u16 ret_buff_size;
4056 	u8 ret_next_table;
4057 	u32 ret_next_index;
4058 
4059 	sbuf_cat(buf, "\n");
4060 
4061 	while (more) {
4062 		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4063 		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4064 		if (status) {
4065 			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4066 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4067 			goto free_out;
4068 		}
4069 
4070 		/* copy info out of temp buffer */
4071 		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4072 		final_buff_len += ret_buff_size;
4073 
4074 		if (ret_next_table != curr_next_table) {
4075 			/* We're done with the current table; we can dump out read data. */
4076 			sbuf_printf(buf, "%d:", curr_next_table);
4077 			int bytes_printed = 0;
4078 			while (bytes_printed <= final_buff_len) {
4079 				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4080 				bytes_printed += 16;
4081 			}
4082 				sbuf_cat(buf, "\n");
4083 
4084 			/* The entire cluster has been read; we're finished */
4085 			if (ret_next_table == 0xFF)
4086 				break;
4087 
4088 			/* Otherwise clear the output buffer and continue reading */
4089 			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4090 			final_buff_len = 0;
4091 		}
4092 
4093 		if (ret_next_index == 0xFFFFFFFF)
4094 			ret_next_index = 0;
4095 
4096 		bzero(dump_buf, sizeof(dump_buf));
4097 		curr_next_table = ret_next_table;
4098 		curr_next_index = ret_next_index;
4099 	}
4100 
4101 free_out:
4102 	free(final_buff, M_DEVBUF);
4103 out:
4104 	error = sbuf_finish(buf);
4105 	if (error)
4106 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4107 	sbuf_delete(buf);
4108 
4109 	return (error);
4110 }
4111 
4112 static int
4113 ixl_start_fw_lldp(struct ixl_pf *pf)
4114 {
4115 	struct i40e_hw *hw = &pf->hw;
4116 	enum i40e_status_code status;
4117 
4118 	status = i40e_aq_start_lldp(hw, false, NULL);
4119 	if (status != I40E_SUCCESS) {
4120 		switch (hw->aq.asq_last_status) {
4121 		case I40E_AQ_RC_EEXIST:
4122 			device_printf(pf->dev,
4123 			    "FW LLDP agent is already running\n");
4124 			break;
4125 		case I40E_AQ_RC_EPERM:
4126 			device_printf(pf->dev,
4127 			    "Device configuration forbids SW from starting "
4128 			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4129 			    "attribute to \"Enabled\" to use this sysctl\n");
4130 			return (EINVAL);
4131 		default:
4132 			device_printf(pf->dev,
4133 			    "Starting FW LLDP agent failed: error: %s, %s\n",
4134 			    i40e_stat_str(hw, status),
4135 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4136 			return (EINVAL);
4137 		}
4138 	}
4139 
4140 	atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4141 	return (0);
4142 }
4143 
4144 static int
4145 ixl_stop_fw_lldp(struct ixl_pf *pf)
4146 {
4147 	struct i40e_hw *hw = &pf->hw;
4148 	device_t dev = pf->dev;
4149 	enum i40e_status_code status;
4150 
4151 	if (hw->func_caps.npar_enable != 0) {
4152 		device_printf(dev,
4153 		    "Disabling FW LLDP agent is not supported on this device\n");
4154 		return (EINVAL);
4155 	}
4156 
4157 	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4158 		device_printf(dev,
4159 		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4160 		return (EINVAL);
4161 	}
4162 
4163 	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4164 	if (status != I40E_SUCCESS) {
4165 		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4166 			device_printf(dev,
4167 			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4168 			    i40e_stat_str(hw, status),
4169 			    i40e_aq_str(hw, hw->aq.asq_last_status));
4170 			return (EINVAL);
4171 		}
4172 
4173 		device_printf(dev, "FW LLDP agent is already stopped\n");
4174 	}
4175 
4176 #ifndef EXTERNAL_RELEASE
4177 	/* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */
4178 #endif
4179 	i40e_aq_set_dcb_parameters(hw, true, NULL);
4180 	atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4181 	return (0);
4182 }
4183 
4184 static int
4185 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4186 {
4187 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4188 	int state, new_state, error = 0;
4189 
4190 	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4191 
4192 	/* Read in new mode */
4193 	error = sysctl_handle_int(oidp, &new_state, 0, req);
4194 	if ((error) || (req->newptr == NULL))
4195 		return (error);
4196 
4197 	/* Already in requested state */
4198 	if (new_state == state)
4199 		return (error);
4200 
4201 	if (new_state == 0)
4202 		return ixl_stop_fw_lldp(pf);
4203 
4204 	return ixl_start_fw_lldp(pf);
4205 }
4206 
4207 static int
4208 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4209 {
4210 	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4211 	int                   state, new_state;
4212 	int                   sysctl_handle_status = 0;
4213 	enum i40e_status_code cmd_status;
4214 
4215 	/* Init states' values */
4216 	state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4217 
4218 	/* Get requested mode */
4219 	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4220 	if ((sysctl_handle_status) || (req->newptr == NULL))
4221 		return (sysctl_handle_status);
4222 
4223 	/* Check if state has changed */
4224 	if (new_state == state)
4225 		return (0);
4226 
4227 	/* Set new state */
4228 	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4229 
4230 	/* Save new state or report error */
4231 	if (!cmd_status) {
4232 		if (new_state == 0)
4233 			atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4234 		else
4235 			atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4236 	} else if (cmd_status == I40E_ERR_CONFIG)
4237 		return (EPERM);
4238 	else
4239 		return (EIO);
4240 
4241 	return (0);
4242 }
4243 
4244 int
4245 ixl_attach_get_link_status(struct ixl_pf *pf)
4246 {
4247 	struct i40e_hw *hw = &pf->hw;
4248 	device_t dev = pf->dev;
4249 	int error = 0;
4250 
4251 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4252 	    (hw->aq.fw_maj_ver < 4)) {
4253 		i40e_msec_delay(75);
4254 		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4255 		if (error) {
4256 			device_printf(dev, "link restart failed, aq_err=%d\n",
4257 			    pf->hw.aq.asq_last_status);
4258 			return error;
4259 		}
4260 	}
4261 
4262 	/* Determine link state */
4263 	hw->phy.get_link_info = TRUE;
4264 	i40e_get_link_status(hw, &pf->link_up);
4265 	return (0);
4266 }
4267 
4268 static int
4269 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4270 {
4271 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4272 	int requested = 0, error = 0;
4273 
4274 	/* Read in new mode */
4275 	error = sysctl_handle_int(oidp, &requested, 0, req);
4276 	if ((error) || (req->newptr == NULL))
4277 		return (error);
4278 
4279 	/* Initiate the PF reset later in the admin task */
4280 	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4281 
4282 	return (error);
4283 }
4284 
4285 static int
4286 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4287 {
4288 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4289 	struct i40e_hw *hw = &pf->hw;
4290 	int requested = 0, error = 0;
4291 
4292 	/* Read in new mode */
4293 	error = sysctl_handle_int(oidp, &requested, 0, req);
4294 	if ((error) || (req->newptr == NULL))
4295 		return (error);
4296 
4297 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4298 
4299 	return (error);
4300 }
4301 
4302 static int
4303 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4304 {
4305 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4306 	struct i40e_hw *hw = &pf->hw;
4307 	int requested = 0, error = 0;
4308 
4309 	/* Read in new mode */
4310 	error = sysctl_handle_int(oidp, &requested, 0, req);
4311 	if ((error) || (req->newptr == NULL))
4312 		return (error);
4313 
4314 	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4315 
4316 	return (error);
4317 }
4318 
4319 /*
4320  * Print out mapping of TX queue indexes and Rx queue indexes
4321  * to MSI-X vectors.
4322  */
4323 static int
4324 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4325 {
4326 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4327 	struct ixl_vsi *vsi = &pf->vsi;
4328 	device_t dev = pf->dev;
4329 	struct sbuf *buf;
4330 	int error = 0;
4331 
4332 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4333 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4334 
4335 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4336 	if (!buf) {
4337 		device_printf(dev, "Could not allocate sbuf for output.\n");
4338 		return (ENOMEM);
4339 	}
4340 
4341 	sbuf_cat(buf, "\n");
4342 	for (int i = 0; i < vsi->num_rx_queues; i++) {
4343 		rx_que = &vsi->rx_queues[i];
4344 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4345 	}
4346 	for (int i = 0; i < vsi->num_tx_queues; i++) {
4347 		tx_que = &vsi->tx_queues[i];
4348 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4349 	}
4350 
4351 	error = sbuf_finish(buf);
4352 	if (error)
4353 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4354 	sbuf_delete(buf);
4355 
4356 	return (error);
4357 }
4358