xref: /freebsd/sys/dev/ixl/ixl_pf_iflib.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2020, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf.h"
36 
37 void
38 ixl_configure_tx_itr(struct ixl_pf *pf)
39 {
40 	struct i40e_hw		*hw = &pf->hw;
41 	struct ixl_vsi		*vsi = &pf->vsi;
42 	struct ixl_tx_queue	*que = vsi->tx_queues;
43 
44 	vsi->tx_itr_setting = pf->tx_itr;
45 
46 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
47 		struct tx_ring	*txr = &que->txr;
48 
49 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
50 		    vsi->tx_itr_setting);
51 		txr->itr = vsi->tx_itr_setting;
52 		txr->latency = IXL_AVE_LATENCY;
53 	}
54 }
55 
56 void
57 ixl_configure_rx_itr(struct ixl_pf *pf)
58 {
59 	struct i40e_hw		*hw = &pf->hw;
60 	struct ixl_vsi		*vsi = &pf->vsi;
61 	struct ixl_rx_queue	*que = vsi->rx_queues;
62 
63 	vsi->rx_itr_setting = pf->rx_itr;
64 
65 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
66 		struct rx_ring 	*rxr = &que->rxr;
67 
68 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
69 		    vsi->rx_itr_setting);
70 		rxr->itr = vsi->rx_itr_setting;
71 		rxr->latency = IXL_AVE_LATENCY;
72 	}
73 }
74 
75 int
76 ixl_intr(void *arg)
77 {
78 	struct ixl_pf		*pf = arg;
79 	struct i40e_hw		*hw =  &pf->hw;
80 	struct ixl_vsi		*vsi = &pf->vsi;
81 	struct ixl_rx_queue	*que = vsi->rx_queues;
82         u32			icr0;
83 
84 	++que->irqs;
85 
86 	/* Clear PBA at start of ISR if using legacy interrupts */
87 	if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
88 		wr32(hw, I40E_PFINT_DYN_CTL0,
89 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
90 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
91 
92 	icr0 = rd32(hw, I40E_PFINT_ICR0);
93 
94 
95 #ifdef PCI_IOV
96 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
97 		iflib_iov_intr_deferred(vsi->ctx);
98 #endif
99 
100 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
101 		iflib_admin_intr_deferred(vsi->ctx);
102 
103 	ixl_enable_intr0(hw);
104 
105 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
106 		return (FILTER_SCHEDULE_THREAD);
107 	else
108 		return (FILTER_HANDLED);
109 }
110 
111 /*********************************************************************
112  *
113  *  MSI-X VSI Interrupt Service routine
114  *
115  **********************************************************************/
116 int
117 ixl_msix_que(void *arg)
118 {
119 	struct ixl_rx_queue *rx_que = arg;
120 
121 	++rx_que->irqs;
122 
123 	ixl_set_queue_rx_itr(rx_que);
124 
125 	return (FILTER_SCHEDULE_THREAD);
126 }
127 
128 /*********************************************************************
129  *
130  *  MSI-X Admin Queue Interrupt Service routine
131  *
132  **********************************************************************/
133 int
134 ixl_msix_adminq(void *arg)
135 {
136 	struct ixl_pf	*pf = arg;
137 	struct i40e_hw	*hw = &pf->hw;
138 	device_t	dev = pf->dev;
139 	u32		reg, mask, rstat_reg;
140 	bool		do_task = FALSE;
141 
142 	DDPRINTF(dev, "begin");
143 
144 	++pf->admin_irq;
145 
146 	reg = rd32(hw, I40E_PFINT_ICR0);
147 	/*
148 	 * For masking off interrupt causes that need to be handled before
149 	 * they can be re-enabled
150 	 */
151 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
152 
153 	/* Check on the cause */
154 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
155 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
156 		do_task = TRUE;
157 	}
158 
159 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
160 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
161 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
162 		do_task = TRUE;
163 	}
164 
165 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
166 		const char *reset_type;
167 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
168 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
169 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
170 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
171 		switch (rstat_reg) {
172 		/* These others might be handled similarly to an EMPR reset */
173 		case I40E_RESET_CORER:
174 			reset_type = "CORER";
175 			break;
176 		case I40E_RESET_GLOBR:
177 			reset_type = "GLOBR";
178 			break;
179 		case I40E_RESET_EMPR:
180 			reset_type = "EMPR";
181 			break;
182 		default:
183 			reset_type = "POR";
184 			break;
185 		}
186 		device_printf(dev, "Reset Requested! (%s)\n", reset_type);
187 		/* overload admin queue task to check reset progress */
188 		atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
189 		do_task = TRUE;
190 	}
191 
192 	/*
193 	 * PE / PCI / ECC exceptions are all handled in the same way:
194 	 * mask out these three causes, then request a PF reset
195 	 */
196 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
197 		device_printf(dev, "ECC Error detected!\n");
198 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
199 		device_printf(dev, "PCI Exception detected!\n");
200 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
201 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
202 	/* Checks against the conditions above */
203 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
204 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
205 		atomic_set_32(&pf->state,
206 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
207 		do_task = TRUE;
208 	}
209 
210 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
211 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
212 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
213 			device_printf(dev, "HMC Error detected!\n");
214 			device_printf(dev, "INFO 0x%08x\n", reg);
215 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
216 			device_printf(dev, "DATA 0x%08x\n", reg);
217 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
218 		}
219 	}
220 
221 #ifdef PCI_IOV
222 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
223 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
224 		iflib_iov_intr_deferred(pf->vsi.ctx);
225 	}
226 #endif
227 
228 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
229 	ixl_enable_intr0(hw);
230 
231 	if (do_task)
232 		return (FILTER_SCHEDULE_THREAD);
233 	else
234 		return (FILTER_HANDLED);
235 }
236 
237 /*
238  * Configure queue interrupt cause registers in hardware.
239  *
240  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
241  */
242 void
243 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
244 {
245 	struct i40e_hw *hw = &pf->hw;
246 	struct ixl_vsi *vsi = &pf->vsi;
247 	u32		reg;
248 	u16		vector = 1;
249 
250 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
251 		/* Make sure interrupt is disabled */
252 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
253 		/* Set linked list head to point to corresponding RX queue
254 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
255 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
256 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
257 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
258 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
259 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
260 
261 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
262 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
263 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
264 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
265 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
266 		wr32(hw, I40E_QINT_RQCTL(i), reg);
267 
268 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
269 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
270 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
271 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
272 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
273 		wr32(hw, I40E_QINT_TQCTL(i), reg);
274 	}
275 }
276 
277 /*
278  * Configure for single interrupt vector operation
279  */
280 void
281 ixl_configure_legacy(struct ixl_pf *pf)
282 {
283 	struct i40e_hw	*hw = &pf->hw;
284 	struct ixl_vsi	*vsi = &pf->vsi;
285 	u32 reg;
286 
287 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
288 
289 	/* Setup "other" causes */
290 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
291 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
292 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
293 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
294 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
295 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
296 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
297 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
298 	    ;
299 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
300 
301 	/* No ITR for non-queue interrupts */
302 	wr32(hw, I40E_PFINT_STAT_CTL0,
303 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
304 
305 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
306 	wr32(hw, I40E_PFINT_LNKLST0, 0);
307 
308 	/* Associate the queue pair to the vector and enable the q int */
309 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
310 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
311 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
312 	wr32(hw, I40E_QINT_RQCTL(0), reg);
313 
314 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
315 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
316 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
317 	wr32(hw, I40E_QINT_TQCTL(0), reg);
318 }
319 
320 void
321 ixl_free_pci_resources(struct ixl_pf *pf)
322 {
323 	struct ixl_vsi		*vsi = &pf->vsi;
324 	device_t		dev = iflib_get_dev(vsi->ctx);
325 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
326 
327 	/* We may get here before stations are set up */
328 	if (rx_que == NULL)
329 		goto early;
330 
331 	/*
332 	**  Release all MSI-X VSI resources:
333 	*/
334 	iflib_irq_free(vsi->ctx, &vsi->irq);
335 
336 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
337 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
338 early:
339 	if (pf->pci_mem != NULL)
340 		bus_release_resource(dev, SYS_RES_MEMORY,
341 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
342 }
343 
344 /*********************************************************************
345  *
346  *  Setup networking device structure and register an interface.
347  *
348  **********************************************************************/
349 int
350 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
351 {
352 	struct ixl_vsi *vsi = &pf->vsi;
353 	if_ctx_t ctx = vsi->ctx;
354 	struct i40e_hw *hw = &pf->hw;
355 	struct ifnet *ifp = iflib_get_ifp(ctx);
356 	struct i40e_aq_get_phy_abilities_resp abilities;
357 	enum i40e_status_code aq_error = 0;
358 
359 	INIT_DBG_DEV(dev, "begin");
360 
361 	vsi->shared->isc_max_frame_size =
362 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
363 	    + ETHER_VLAN_ENCAP_LEN;
364 
365 	if (IXL_PF_IN_RECOVERY_MODE(pf))
366 		goto only_auto;
367 
368 	aq_error = i40e_aq_get_phy_capabilities(hw,
369 	    FALSE, TRUE, &abilities, NULL);
370 	/* May need delay to detect fiber correctly */
371 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
372 		i40e_msec_delay(200);
373 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
374 		    TRUE, &abilities, NULL);
375 	}
376 	if (aq_error) {
377 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
378 			device_printf(dev, "Unknown PHY type detected!\n");
379 		else
380 			device_printf(dev,
381 			    "Error getting supported media types, err %d,"
382 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
383 	} else {
384 		pf->supported_speeds = abilities.link_speed;
385 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
386 
387 		ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
388 	}
389 
390 only_auto:
391 	/* Use autoselect media by default */
392 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
393 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
394 
395 	return (0);
396 }
397 
398 /*
399 ** Run when the Admin Queue gets a link state change interrupt.
400 */
401 void
402 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
403 {
404 	struct i40e_hw *hw = &pf->hw;
405 	device_t dev = iflib_get_dev(pf->vsi.ctx);
406 	struct i40e_aqc_get_link_status *status =
407 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
408 
409 	/* Request link status from adapter */
410 	hw->phy.get_link_info = TRUE;
411 	i40e_get_link_status(hw, &pf->link_up);
412 
413 	/* Print out message if an unqualified module is found */
414 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
415 	    (pf->advertised_speed) &&
416 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
417 	    (!(status->link_info & I40E_AQ_LINK_UP)))
418 		device_printf(dev, "Link failed because "
419 		    "an unqualified module was detected!\n");
420 
421 	/* OS link info is updated elsewhere */
422 }
423 
424 /*********************************************************************
425  *
426  *  Initialize the VSI:  this handles contexts, which means things
427  *  			 like the number of descriptors, buffer size,
428  *			 plus we init the rings thru this function.
429  *
430  **********************************************************************/
431 int
432 ixl_initialize_vsi(struct ixl_vsi *vsi)
433 {
434 	struct ixl_pf *pf = vsi->back;
435 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
436 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
437 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
438 	device_t		dev = iflib_get_dev(vsi->ctx);
439 	struct i40e_hw		*hw = vsi->hw;
440 	struct i40e_vsi_context	ctxt;
441 	int 			tc_queues;
442 	int			err = 0;
443 
444 	memset(&ctxt, 0, sizeof(ctxt));
445 	ctxt.seid = vsi->seid;
446 	if (pf->veb_seid != 0)
447 		ctxt.uplink_seid = pf->veb_seid;
448 	ctxt.pf_num = hw->pf_id;
449 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
450 	if (err) {
451 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
452 		    " aq_error %d\n", err, hw->aq.asq_last_status);
453 		return (err);
454 	}
455 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
456 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
457 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
458 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
459 	    ctxt.uplink_seid, ctxt.vsi_number,
460 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
461 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
462 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
463 	/*
464 	** Set the queue and traffic class bits
465 	**  - when multiple traffic classes are supported
466 	**    this will need to be more robust.
467 	*/
468 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
469 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
470 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
471 	ctxt.info.queue_mapping[0] = 0;
472 	/*
473 	 * This VSI will only use traffic class 0; start traffic class 0's
474 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
475 	 * the driver may not use all of them).
476 	 */
477 	tc_queues = fls(pf->qtag.num_allocated) - 1;
478 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
479 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
480 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
481 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
482 
483 	/* Set VLAN receive stripping mode */
484 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
485 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
486 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
487 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
488 	else
489 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
490 
491 #ifdef IXL_IW
492 	/* Set TCP Enable for iWARP capable VSI */
493 	if (ixl_enable_iwarp && pf->iw_enabled) {
494 		ctxt.info.valid_sections |=
495 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
496 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
497 	}
498 #endif
499 	/* Save VSI number and info for use later */
500 	vsi->vsi_num = ctxt.vsi_number;
501 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
502 
503 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
504 
505 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
506 	if (err) {
507 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
508 		    " aq_error %d\n", err, hw->aq.asq_last_status);
509 		return (err);
510 	}
511 
512 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
513 		struct tx_ring		*txr = &tx_que->txr;
514 		struct i40e_hmc_obj_txq tctx;
515 		u32			txctl;
516 
517 		/* Setup the HMC TX Context  */
518 		bzero(&tctx, sizeof(tctx));
519 		tctx.new_context = 1;
520 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
521 		tctx.qlen = scctx->isc_ntxd[0];
522 		tctx.fc_ena = 0;	/* Disable FCoE */
523 		/*
524 		 * This value needs to pulled from the VSI that this queue
525 		 * is assigned to. Index into array is traffic class.
526 		 */
527 		tctx.rdylist = vsi->info.qs_handle[0];
528 		/*
529 		 * Set these to enable Head Writeback
530 		 * - Address is last entry in TX ring (reserved for HWB index)
531 		 * Leave these as 0 for Descriptor Writeback
532 		 */
533 		if (vsi->enable_head_writeback) {
534 			tctx.head_wb_ena = 1;
535 			tctx.head_wb_addr = txr->tx_paddr +
536 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
537 		} else {
538 			tctx.head_wb_ena = 0;
539 			tctx.head_wb_addr = 0;
540 		}
541 		tctx.rdylist_act = 0;
542 		err = i40e_clear_lan_tx_queue_context(hw, i);
543 		if (err) {
544 			device_printf(dev, "Unable to clear TX context\n");
545 			break;
546 		}
547 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
548 		if (err) {
549 			device_printf(dev, "Unable to set TX context\n");
550 			break;
551 		}
552 		/* Associate the ring with this PF */
553 		txctl = I40E_QTX_CTL_PF_QUEUE;
554 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
555 		    I40E_QTX_CTL_PF_INDX_MASK);
556 		wr32(hw, I40E_QTX_CTL(i), txctl);
557 		ixl_flush(hw);
558 
559 		/* Do ring (re)init */
560 		ixl_init_tx_ring(vsi, tx_que);
561 	}
562 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
563 		struct rx_ring 		*rxr = &rx_que->rxr;
564 		struct i40e_hmc_obj_rxq rctx;
565 
566 		/* Next setup the HMC RX Context  */
567 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
568 
569 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
570 
571 		/* Set up an RX context for the HMC */
572 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
573 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
574 		/* ignore header split for now */
575 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
576 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
577 		    scctx->isc_max_frame_size : max_rxmax;
578 		rctx.dtype = 0;
579 		rctx.dsize = 1;		/* do 32byte descriptors */
580 		rctx.hsplit_0 = 0;	/* no header split */
581 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
582 		rctx.qlen = scctx->isc_nrxd[0];
583 		rctx.tphrdesc_ena = 1;
584 		rctx.tphwdesc_ena = 1;
585 		rctx.tphdata_ena = 0;	/* Header Split related */
586 		rctx.tphhead_ena = 0;	/* Header Split related */
587 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
588 		rctx.crcstrip = 1;
589 		rctx.l2tsel = 1;
590 		rctx.showiv = 1;	/* Strip inner VLAN header */
591 		rctx.fc_ena = 0;	/* Disable FCoE */
592 		rctx.prefena = 1;	/* Prefetch descriptors */
593 
594 		err = i40e_clear_lan_rx_queue_context(hw, i);
595 		if (err) {
596 			device_printf(dev,
597 			    "Unable to clear RX context %d\n", i);
598 			break;
599 		}
600 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
601 		if (err) {
602 			device_printf(dev, "Unable to set RX context %d\n", i);
603 			break;
604 		}
605 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
606 	}
607 	return (err);
608 }
609 
610 
611 /*
612 ** Provide a update to the queue RX
613 ** interrupt moderation value.
614 */
615 void
616 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
617 {
618 	struct ixl_vsi	*vsi = que->vsi;
619 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
620 	struct i40e_hw	*hw = vsi->hw;
621 	struct rx_ring	*rxr = &que->rxr;
622 	u16		rx_itr;
623 	u16		rx_latency = 0;
624 	int		rx_bytes;
625 
626 	/* Idle, do nothing */
627 	if (rxr->bytes == 0)
628 		return;
629 
630 	if (pf->dynamic_rx_itr) {
631 		rx_bytes = rxr->bytes/rxr->itr;
632 		rx_itr = rxr->itr;
633 
634 		/* Adjust latency range */
635 		switch (rxr->latency) {
636 		case IXL_LOW_LATENCY:
637 			if (rx_bytes > 10) {
638 				rx_latency = IXL_AVE_LATENCY;
639 				rx_itr = IXL_ITR_20K;
640 			}
641 			break;
642 		case IXL_AVE_LATENCY:
643 			if (rx_bytes > 20) {
644 				rx_latency = IXL_BULK_LATENCY;
645 				rx_itr = IXL_ITR_8K;
646 			} else if (rx_bytes <= 10) {
647 				rx_latency = IXL_LOW_LATENCY;
648 				rx_itr = IXL_ITR_100K;
649 			}
650 			break;
651 		case IXL_BULK_LATENCY:
652 			if (rx_bytes <= 20) {
653 				rx_latency = IXL_AVE_LATENCY;
654 				rx_itr = IXL_ITR_20K;
655 			}
656 			break;
657 		}
658 
659 		rxr->latency = rx_latency;
660 
661 		if (rx_itr != rxr->itr) {
662 			/* do an exponential smoothing */
663 			rx_itr = (10 * rx_itr * rxr->itr) /
664 			    ((9 * rx_itr) + rxr->itr);
665 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
666 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
667 			    rxr->me), rxr->itr);
668 		}
669 	} else { /* We may have have toggled to non-dynamic */
670 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
671 			vsi->rx_itr_setting = pf->rx_itr;
672 		/* Update the hardware if needed */
673 		if (rxr->itr != vsi->rx_itr_setting) {
674 			rxr->itr = vsi->rx_itr_setting;
675 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
676 			    rxr->me), rxr->itr);
677 		}
678 	}
679 	rxr->bytes = 0;
680 	rxr->packets = 0;
681 }
682 
683 
684 /*
685 ** Provide a update to the queue TX
686 ** interrupt moderation value.
687 */
688 void
689 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
690 {
691 	struct ixl_vsi	*vsi = que->vsi;
692 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
693 	struct i40e_hw	*hw = vsi->hw;
694 	struct tx_ring	*txr = &que->txr;
695 	u16		tx_itr;
696 	u16		tx_latency = 0;
697 	int		tx_bytes;
698 
699 
700 	/* Idle, do nothing */
701 	if (txr->bytes == 0)
702 		return;
703 
704 	if (pf->dynamic_tx_itr) {
705 		tx_bytes = txr->bytes/txr->itr;
706 		tx_itr = txr->itr;
707 
708 		switch (txr->latency) {
709 		case IXL_LOW_LATENCY:
710 			if (tx_bytes > 10) {
711 				tx_latency = IXL_AVE_LATENCY;
712 				tx_itr = IXL_ITR_20K;
713 			}
714 			break;
715 		case IXL_AVE_LATENCY:
716 			if (tx_bytes > 20) {
717 				tx_latency = IXL_BULK_LATENCY;
718 				tx_itr = IXL_ITR_8K;
719 			} else if (tx_bytes <= 10) {
720 				tx_latency = IXL_LOW_LATENCY;
721 				tx_itr = IXL_ITR_100K;
722 			}
723 			break;
724 		case IXL_BULK_LATENCY:
725 			if (tx_bytes <= 20) {
726 				tx_latency = IXL_AVE_LATENCY;
727 				tx_itr = IXL_ITR_20K;
728 			}
729 			break;
730 		}
731 
732 		txr->latency = tx_latency;
733 
734 		if (tx_itr != txr->itr) {
735 			/* do an exponential smoothing */
736 			tx_itr = (10 * tx_itr * txr->itr) /
737 			    ((9 * tx_itr) + txr->itr);
738 			txr->itr = min(tx_itr, IXL_MAX_ITR);
739 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
740 			    txr->me), txr->itr);
741 		}
742 
743 	} else { /* We may have have toggled to non-dynamic */
744 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
745 			vsi->tx_itr_setting = pf->tx_itr;
746 		/* Update the hardware if needed */
747 		if (txr->itr != vsi->tx_itr_setting) {
748 			txr->itr = vsi->tx_itr_setting;
749 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
750 			    txr->me), txr->itr);
751 		}
752 	}
753 	txr->bytes = 0;
754 	txr->packets = 0;
755 	return;
756 }
757 
758 #ifdef IXL_DEBUG
759 /**
760  * ixl_sysctl_qtx_tail_handler
761  * Retrieves I40E_QTX_TAIL value from hardware
762  * for a sysctl.
763  */
764 int
765 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
766 {
767 	struct ixl_tx_queue *tx_que;
768 	int error;
769 	u32 val;
770 
771 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
772 	if (!tx_que) return 0;
773 
774 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
775 	error = sysctl_handle_int(oidp, &val, 0, req);
776 	if (error || !req->newptr)
777 		return error;
778 	return (0);
779 }
780 
781 /**
782  * ixl_sysctl_qrx_tail_handler
783  * Retrieves I40E_QRX_TAIL value from hardware
784  * for a sysctl.
785  */
786 int
787 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
788 {
789 	struct ixl_rx_queue *rx_que;
790 	int error;
791 	u32 val;
792 
793 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
794 	if (!rx_que) return 0;
795 
796 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
797 	error = sysctl_handle_int(oidp, &val, 0, req);
798 	if (error || !req->newptr)
799 		return error;
800 	return (0);
801 }
802 #endif
803 
804 void
805 ixl_add_hw_stats(struct ixl_pf *pf)
806 {
807 	struct ixl_vsi *vsi = &pf->vsi;
808 	device_t dev = iflib_get_dev(vsi->ctx);
809 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
810 
811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
812 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
813 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
814 
815 	/* Driver statistics */
816 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
817 			CTLFLAG_RD, &pf->admin_irq,
818 			"Admin Queue IRQs received");
819 
820 	sysctl_ctx_init(&vsi->sysctl_ctx);
821 	ixl_vsi_add_sysctls(vsi, "pf", true);
822 
823 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
824 }
825 
826 void
827 ixl_set_rss_hlut(struct ixl_pf *pf)
828 {
829 	struct i40e_hw	*hw = &pf->hw;
830 	struct ixl_vsi *vsi = &pf->vsi;
831 	device_t	dev = iflib_get_dev(vsi->ctx);
832 	int		i, que_id;
833 	int		lut_entry_width;
834 	u32		lut = 0;
835 	enum i40e_status_code status;
836 
837 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
838 
839 	/* Populate the LUT with max no. of queues in round robin fashion */
840 	u8 hlut_buf[512];
841 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
842 #ifdef RSS
843 		/*
844 		 * Fetch the RSS bucket id for the given indirection entry.
845 		 * Cap it at the number of configured buckets (which is
846 		 * num_queues.)
847 		 */
848 		que_id = rss_get_indirection_to_bucket(i);
849 		que_id = que_id % vsi->num_rx_queues;
850 #else
851 		que_id = i % vsi->num_rx_queues;
852 #endif
853 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
854 		hlut_buf[i] = lut;
855 	}
856 
857 	if (hw->mac.type == I40E_MAC_X722) {
858 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
859 		if (status)
860 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
861 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
862 	} else {
863 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
864 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
865 		ixl_flush(hw);
866 	}
867 }
868 
869 /*
870 ** This routine updates vlan filters, called by init
871 ** it scans the filter table and then updates the hw
872 ** after a soft reset.
873 */
874 void
875 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
876 {
877 	struct ixl_mac_filter	*f;
878 	int			cnt = 0, flags;
879 
880 	if (vsi->num_vlans == 0)
881 		return;
882 	/*
883 	** Scan the filter list for vlan entries,
884 	** mark them for addition and then call
885 	** for the AQ update.
886 	*/
887 	SLIST_FOREACH(f, &vsi->ftl, next) {
888 		if (f->flags & IXL_FILTER_VLAN) {
889 			f->flags |=
890 			    (IXL_FILTER_ADD |
891 			    IXL_FILTER_USED);
892 			cnt++;
893 		}
894 	}
895 	if (cnt == 0) {
896 		printf("setup vlan: no filters found!\n");
897 		return;
898 	}
899 	flags = IXL_FILTER_VLAN;
900 	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
901 	ixl_add_hw_filters(vsi, flags, cnt);
902 }
903 
904 /* For PF VSI only */
905 int
906 ixl_enable_rings(struct ixl_vsi *vsi)
907 {
908 	struct ixl_pf	*pf = vsi->back;
909 	int		error = 0;
910 
911 	for (int i = 0; i < vsi->num_tx_queues; i++)
912 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
913 
914 	for (int i = 0; i < vsi->num_rx_queues; i++)
915 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
916 
917 	return (error);
918 }
919 
920 int
921 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
922 {
923 	int error = 0;
924 
925 	for (int i = 0; i < vsi->num_tx_queues; i++)
926 		error = ixl_disable_tx_ring(pf, qtag, i);
927 
928 	for (int i = 0; i < vsi->num_rx_queues; i++)
929 		error = ixl_disable_rx_ring(pf, qtag, i);
930 
931 	return (error);
932 }
933 
934 void
935 ixl_enable_intr(struct ixl_vsi *vsi)
936 {
937 	struct i40e_hw		*hw = vsi->hw;
938 	struct ixl_rx_queue	*que = vsi->rx_queues;
939 
940 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
941 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
942 			ixl_enable_queue(hw, que->rxr.me);
943 	} else
944 		ixl_enable_intr0(hw);
945 }
946 
947 void
948 ixl_disable_rings_intr(struct ixl_vsi *vsi)
949 {
950 	struct i40e_hw		*hw = vsi->hw;
951 	struct ixl_rx_queue	*que = vsi->rx_queues;
952 
953 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
954 		ixl_disable_queue(hw, que->rxr.me);
955 }
956 
957 int
958 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
959 {
960 	struct i40e_hw *hw = &pf->hw;
961 	device_t dev = pf->dev;
962 	int error = 0;
963 
964 	if (is_up)
965 		ixl_if_stop(pf->vsi.ctx);
966 
967 	ixl_shutdown_hmc(pf);
968 
969 	ixl_disable_intr0(hw);
970 
971 	error = i40e_shutdown_adminq(hw);
972 	if (error)
973 		device_printf(dev,
974 		    "Shutdown Admin queue failed with code %d\n", error);
975 
976 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
977 	return (error);
978 }
979 
980 int
981 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
982 {
983 	struct i40e_hw *hw = &pf->hw;
984 	struct ixl_vsi *vsi = &pf->vsi;
985 	device_t dev = pf->dev;
986 	enum i40e_get_fw_lldp_status_resp lldp_status;
987 	int error = 0;
988 
989 	device_printf(dev, "Rebuilding driver state...\n");
990 
991 	/* Setup */
992 	error = i40e_init_adminq(hw);
993 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
994 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
995 		    error);
996 		goto ixl_rebuild_hw_structs_after_reset_err;
997 	}
998 
999 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
1000 		/* Keep admin queue interrupts active while driver is loaded */
1001 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1002 			ixl_configure_intr0_msix(pf);
1003 			ixl_enable_intr0(hw);
1004 		}
1005 
1006 		return (0);
1007 	}
1008 
1009 	i40e_clear_pxe_mode(hw);
1010 
1011 	error = ixl_get_hw_capabilities(pf);
1012 	if (error) {
1013 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
1014 		goto ixl_rebuild_hw_structs_after_reset_err;
1015 	}
1016 
1017 	error = ixl_setup_hmc(pf);
1018 	if (error)
1019 		goto ixl_rebuild_hw_structs_after_reset_err;
1020 
1021 	/* reserve a contiguous allocation for the PF's VSI */
1022 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
1023 	if (error) {
1024 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
1025 		    error);
1026 	}
1027 
1028 	error = ixl_switch_config(pf);
1029 	if (error) {
1030 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
1031 		     error);
1032 		error = EIO;
1033 		goto ixl_rebuild_hw_structs_after_reset_err;
1034 	}
1035 
1036 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1037 	    NULL);
1038 	if (error) {
1039 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1040 		    " aq_err %d\n", error, hw->aq.asq_last_status);
1041 		error = EIO;
1042 		goto ixl_rebuild_hw_structs_after_reset_err;
1043 	}
1044 
1045 	u8 set_fc_err_mask;
1046 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
1047 	if (error) {
1048 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
1049 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1050 		error = EIO;
1051 		goto ixl_rebuild_hw_structs_after_reset_err;
1052 	}
1053 
1054 	/* Remove default filters reinstalled by FW on reset */
1055 	ixl_del_default_hw_filters(vsi);
1056 
1057 	/* Receive broadcast Ethernet frames */
1058 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1059 
1060 	/* Determine link state */
1061 	if (ixl_attach_get_link_status(pf)) {
1062 		error = EINVAL;
1063 	}
1064 
1065 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
1066 
1067 	/* Query device FW LLDP status */
1068 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
1069 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
1070 			atomic_set_32(&pf->state,
1071 			    IXL_PF_STATE_FW_LLDP_DISABLED);
1072 		} else {
1073 			atomic_clear_32(&pf->state,
1074 			    IXL_PF_STATE_FW_LLDP_DISABLED);
1075 		}
1076 	}
1077 
1078 	/* Keep admin queue interrupts active while driver is loaded */
1079 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1080 		ixl_configure_intr0_msix(pf);
1081 		ixl_enable_intr0(hw);
1082 	}
1083 
1084 	if (is_up) {
1085 		iflib_request_reset(vsi->ctx);
1086 		iflib_admin_intr_deferred(vsi->ctx);
1087 	}
1088 
1089 	device_printf(dev, "Rebuilding driver state done.\n");
1090 	return (0);
1091 
1092 ixl_rebuild_hw_structs_after_reset_err:
1093 	device_printf(dev, "Reload the driver to recover\n");
1094 	return (error);
1095 }
1096 
1097 /*
1098 ** Set flow control using sysctl:
1099 ** 	0 - off
1100 **	1 - rx pause
1101 **	2 - tx pause
1102 **	3 - full
1103 */
1104 int
1105 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
1106 {
1107 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1108 	struct i40e_hw *hw = &pf->hw;
1109 	device_t dev = pf->dev;
1110 	int requested_fc, error = 0;
1111 	enum i40e_status_code aq_error = 0;
1112 	u8 fc_aq_err = 0;
1113 
1114 	/* Get request */
1115 	requested_fc = pf->fc;
1116 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
1117 	if ((error) || (req->newptr == NULL))
1118 		return (error);
1119 	if (requested_fc < 0 || requested_fc > 3) {
1120 		device_printf(dev,
1121 		    "Invalid fc mode; valid modes are 0 through 3\n");
1122 		return (EINVAL);
1123 	}
1124 
1125 	/* Set fc ability for port */
1126 	hw->fc.requested_mode = requested_fc;
1127 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
1128 	if (aq_error) {
1129 		device_printf(dev,
1130 		    "%s: Error setting new fc mode %d; fc_err %#x\n",
1131 		    __func__, aq_error, fc_aq_err);
1132 		return (EIO);
1133 	}
1134 	pf->fc = requested_fc;
1135 
1136 	return (0);
1137 }
1138