xref: /freebsd/sys/dev/ixl/ixl_pf_iflib.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1b4a7ce06SEric Joyner /******************************************************************************
2b4a7ce06SEric Joyner 
3b4a7ce06SEric Joyner   Copyright (c) 2013-2020, Intel Corporation
4b4a7ce06SEric Joyner   All rights reserved.
5b4a7ce06SEric Joyner 
6b4a7ce06SEric Joyner   Redistribution and use in source and binary forms, with or without
7b4a7ce06SEric Joyner   modification, are permitted provided that the following conditions are met:
8b4a7ce06SEric Joyner 
9b4a7ce06SEric Joyner    1. Redistributions of source code must retain the above copyright notice,
10b4a7ce06SEric Joyner       this list of conditions and the following disclaimer.
11b4a7ce06SEric Joyner 
12b4a7ce06SEric Joyner    2. Redistributions in binary form must reproduce the above copyright
13b4a7ce06SEric Joyner       notice, this list of conditions and the following disclaimer in the
14b4a7ce06SEric Joyner       documentation and/or other materials provided with the distribution.
15b4a7ce06SEric Joyner 
16b4a7ce06SEric Joyner    3. Neither the name of the Intel Corporation nor the names of its
17b4a7ce06SEric Joyner       contributors may be used to endorse or promote products derived from
18b4a7ce06SEric Joyner       this software without specific prior written permission.
19b4a7ce06SEric Joyner 
20b4a7ce06SEric Joyner   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21b4a7ce06SEric Joyner   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22b4a7ce06SEric Joyner   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23b4a7ce06SEric Joyner   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24b4a7ce06SEric Joyner   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25b4a7ce06SEric Joyner   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26b4a7ce06SEric Joyner   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27b4a7ce06SEric Joyner   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28b4a7ce06SEric Joyner   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29b4a7ce06SEric Joyner   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30b4a7ce06SEric Joyner   POSSIBILITY OF SUCH DAMAGE.
31b4a7ce06SEric Joyner 
32b4a7ce06SEric Joyner ******************************************************************************/
33b4a7ce06SEric Joyner 
34b4a7ce06SEric Joyner #include "ixl_pf.h"
35b4a7ce06SEric Joyner 
36b4a7ce06SEric Joyner void
ixl_configure_tx_itr(struct ixl_pf * pf)37b4a7ce06SEric Joyner ixl_configure_tx_itr(struct ixl_pf *pf)
38b4a7ce06SEric Joyner {
39b4a7ce06SEric Joyner 	struct i40e_hw		*hw = &pf->hw;
40b4a7ce06SEric Joyner 	struct ixl_vsi		*vsi = &pf->vsi;
41b4a7ce06SEric Joyner 	struct ixl_tx_queue	*que = vsi->tx_queues;
42b4a7ce06SEric Joyner 
43b4a7ce06SEric Joyner 	vsi->tx_itr_setting = pf->tx_itr;
44b4a7ce06SEric Joyner 
45b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
46b4a7ce06SEric Joyner 		struct tx_ring	*txr = &que->txr;
47b4a7ce06SEric Joyner 
48b4a7ce06SEric Joyner 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
49b4a7ce06SEric Joyner 		    vsi->tx_itr_setting);
50b4a7ce06SEric Joyner 		txr->itr = vsi->tx_itr_setting;
51b4a7ce06SEric Joyner 		txr->latency = IXL_AVE_LATENCY;
52b4a7ce06SEric Joyner 	}
53b4a7ce06SEric Joyner }
54b4a7ce06SEric Joyner 
55b4a7ce06SEric Joyner void
ixl_configure_rx_itr(struct ixl_pf * pf)56b4a7ce06SEric Joyner ixl_configure_rx_itr(struct ixl_pf *pf)
57b4a7ce06SEric Joyner {
58b4a7ce06SEric Joyner 	struct i40e_hw		*hw = &pf->hw;
59b4a7ce06SEric Joyner 	struct ixl_vsi		*vsi = &pf->vsi;
60b4a7ce06SEric Joyner 	struct ixl_rx_queue	*que = vsi->rx_queues;
61b4a7ce06SEric Joyner 
62b4a7ce06SEric Joyner 	vsi->rx_itr_setting = pf->rx_itr;
63b4a7ce06SEric Joyner 
64b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
65b4a7ce06SEric Joyner 		struct rx_ring 	*rxr = &que->rxr;
66b4a7ce06SEric Joyner 
67b4a7ce06SEric Joyner 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
68b4a7ce06SEric Joyner 		    vsi->rx_itr_setting);
69b4a7ce06SEric Joyner 		rxr->itr = vsi->rx_itr_setting;
70b4a7ce06SEric Joyner 		rxr->latency = IXL_AVE_LATENCY;
71b4a7ce06SEric Joyner 	}
72b4a7ce06SEric Joyner }
73b4a7ce06SEric Joyner 
74b4a7ce06SEric Joyner int
ixl_intr(void * arg)75b4a7ce06SEric Joyner ixl_intr(void *arg)
76b4a7ce06SEric Joyner {
77b4a7ce06SEric Joyner 	struct ixl_pf		*pf = arg;
78b4a7ce06SEric Joyner 	struct i40e_hw		*hw =  &pf->hw;
79b4a7ce06SEric Joyner 	struct ixl_vsi		*vsi = &pf->vsi;
80b4a7ce06SEric Joyner 	struct ixl_rx_queue	*que = vsi->rx_queues;
81b4a7ce06SEric Joyner         u32			icr0;
82b4a7ce06SEric Joyner 
83b4a7ce06SEric Joyner 	++que->irqs;
84b4a7ce06SEric Joyner 
85b4a7ce06SEric Joyner 	/* Clear PBA at start of ISR if using legacy interrupts */
86b4a7ce06SEric Joyner 	if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
87b4a7ce06SEric Joyner 		wr32(hw, I40E_PFINT_DYN_CTL0,
88b4a7ce06SEric Joyner 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
89b4a7ce06SEric Joyner 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
90b4a7ce06SEric Joyner 
91b4a7ce06SEric Joyner 	icr0 = rd32(hw, I40E_PFINT_ICR0);
92b4a7ce06SEric Joyner 
93b4a7ce06SEric Joyner 
94b4a7ce06SEric Joyner #ifdef PCI_IOV
95b4a7ce06SEric Joyner 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
96b4a7ce06SEric Joyner 		iflib_iov_intr_deferred(vsi->ctx);
97b4a7ce06SEric Joyner #endif
98b4a7ce06SEric Joyner 
99b4a7ce06SEric Joyner 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
100b4a7ce06SEric Joyner 		iflib_admin_intr_deferred(vsi->ctx);
101b4a7ce06SEric Joyner 
102b4a7ce06SEric Joyner 	ixl_enable_intr0(hw);
103b4a7ce06SEric Joyner 
104b4a7ce06SEric Joyner 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
105b4a7ce06SEric Joyner 		return (FILTER_SCHEDULE_THREAD);
106b4a7ce06SEric Joyner 	else
107b4a7ce06SEric Joyner 		return (FILTER_HANDLED);
108b4a7ce06SEric Joyner }
109b4a7ce06SEric Joyner 
110b4a7ce06SEric Joyner /*********************************************************************
111b4a7ce06SEric Joyner  *
112b4a7ce06SEric Joyner  *  MSI-X VSI Interrupt Service routine
113b4a7ce06SEric Joyner  *
114b4a7ce06SEric Joyner  **********************************************************************/
115b4a7ce06SEric Joyner int
ixl_msix_que(void * arg)116b4a7ce06SEric Joyner ixl_msix_que(void *arg)
117b4a7ce06SEric Joyner {
118b4a7ce06SEric Joyner 	struct ixl_rx_queue *rx_que = arg;
119b4a7ce06SEric Joyner 
120b4a7ce06SEric Joyner 	++rx_que->irqs;
121b4a7ce06SEric Joyner 
122b4a7ce06SEric Joyner 	ixl_set_queue_rx_itr(rx_que);
123b4a7ce06SEric Joyner 
124b4a7ce06SEric Joyner 	return (FILTER_SCHEDULE_THREAD);
125b4a7ce06SEric Joyner }
126b4a7ce06SEric Joyner 
127b4a7ce06SEric Joyner /*********************************************************************
128b4a7ce06SEric Joyner  *
129b4a7ce06SEric Joyner  *  MSI-X Admin Queue Interrupt Service routine
130b4a7ce06SEric Joyner  *
131b4a7ce06SEric Joyner  **********************************************************************/
132b4a7ce06SEric Joyner int
ixl_msix_adminq(void * arg)133b4a7ce06SEric Joyner ixl_msix_adminq(void *arg)
134b4a7ce06SEric Joyner {
135b4a7ce06SEric Joyner 	struct ixl_pf	*pf = arg;
136b4a7ce06SEric Joyner 	struct i40e_hw	*hw = &pf->hw;
137b4a7ce06SEric Joyner 	device_t	dev = pf->dev;
138b4a7ce06SEric Joyner 	u32		reg, mask, rstat_reg;
139b4a7ce06SEric Joyner 	bool		do_task = FALSE;
140b4a7ce06SEric Joyner 
141b4a7ce06SEric Joyner 	DDPRINTF(dev, "begin");
142b4a7ce06SEric Joyner 
143b4a7ce06SEric Joyner 	++pf->admin_irq;
144b4a7ce06SEric Joyner 
145b4a7ce06SEric Joyner 	reg = rd32(hw, I40E_PFINT_ICR0);
146b4a7ce06SEric Joyner 	/*
147b4a7ce06SEric Joyner 	 * For masking off interrupt causes that need to be handled before
148b4a7ce06SEric Joyner 	 * they can be re-enabled
149b4a7ce06SEric Joyner 	 */
150b4a7ce06SEric Joyner 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
151b4a7ce06SEric Joyner 
152b4a7ce06SEric Joyner 	/* Check on the cause */
153b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
154b4a7ce06SEric Joyner 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
155b4a7ce06SEric Joyner 		do_task = TRUE;
156b4a7ce06SEric Joyner 	}
157b4a7ce06SEric Joyner 
158b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
159b4a7ce06SEric Joyner 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
160b8f51b8cSPiotr Kubaj 		ixl_set_state(&pf->state, IXL_STATE_MDD_PENDING);
161b4a7ce06SEric Joyner 		do_task = TRUE;
162b4a7ce06SEric Joyner 	}
163b4a7ce06SEric Joyner 
164b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
165b4a7ce06SEric Joyner 		const char *reset_type;
166b4a7ce06SEric Joyner 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
167b4a7ce06SEric Joyner 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
168b4a7ce06SEric Joyner 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
169b4a7ce06SEric Joyner 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
170b4a7ce06SEric Joyner 		switch (rstat_reg) {
171b4a7ce06SEric Joyner 		/* These others might be handled similarly to an EMPR reset */
172b4a7ce06SEric Joyner 		case I40E_RESET_CORER:
173b4a7ce06SEric Joyner 			reset_type = "CORER";
174b4a7ce06SEric Joyner 			break;
175b4a7ce06SEric Joyner 		case I40E_RESET_GLOBR:
176b4a7ce06SEric Joyner 			reset_type = "GLOBR";
177b4a7ce06SEric Joyner 			break;
178b4a7ce06SEric Joyner 		case I40E_RESET_EMPR:
179b4a7ce06SEric Joyner 			reset_type = "EMPR";
180b4a7ce06SEric Joyner 			break;
181b4a7ce06SEric Joyner 		default:
182b4a7ce06SEric Joyner 			reset_type = "POR";
183b4a7ce06SEric Joyner 			break;
184b4a7ce06SEric Joyner 		}
185b4a7ce06SEric Joyner 		device_printf(dev, "Reset Requested! (%s)\n", reset_type);
186b4a7ce06SEric Joyner 		/* overload admin queue task to check reset progress */
187b8f51b8cSPiotr Kubaj 		ixl_set_state(&pf->state, IXL_STATE_RESETTING);
188b4a7ce06SEric Joyner 		do_task = TRUE;
189b4a7ce06SEric Joyner 	}
190b4a7ce06SEric Joyner 
191b4a7ce06SEric Joyner 	/*
192b4a7ce06SEric Joyner 	 * PE / PCI / ECC exceptions are all handled in the same way:
193b4a7ce06SEric Joyner 	 * mask out these three causes, then request a PF reset
194b4a7ce06SEric Joyner 	 */
195b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
196b4a7ce06SEric Joyner 		device_printf(dev, "ECC Error detected!\n");
197b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
198b4a7ce06SEric Joyner 		device_printf(dev, "PCI Exception detected!\n");
199b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
200b4a7ce06SEric Joyner 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
201b4a7ce06SEric Joyner 	/* Checks against the conditions above */
202b4a7ce06SEric Joyner 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
203b4a7ce06SEric Joyner 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
204b8f51b8cSPiotr Kubaj 		ixl_set_state(&pf->state,
205b8f51b8cSPiotr Kubaj 		    IXL_STATE_PF_RESET_REQ | IXL_STATE_PF_CRIT_ERR);
206b4a7ce06SEric Joyner 		do_task = TRUE;
207b4a7ce06SEric Joyner 	}
208b4a7ce06SEric Joyner 
209b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
210b4a7ce06SEric Joyner 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
211b4a7ce06SEric Joyner 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
212b4a7ce06SEric Joyner 			device_printf(dev, "HMC Error detected!\n");
213b4a7ce06SEric Joyner 			device_printf(dev, "INFO 0x%08x\n", reg);
214b4a7ce06SEric Joyner 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
215b4a7ce06SEric Joyner 			device_printf(dev, "DATA 0x%08x\n", reg);
216b4a7ce06SEric Joyner 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
217b4a7ce06SEric Joyner 		}
218b4a7ce06SEric Joyner 	}
219b4a7ce06SEric Joyner 
220b4a7ce06SEric Joyner #ifdef PCI_IOV
221b4a7ce06SEric Joyner 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
222b4a7ce06SEric Joyner 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
223b4a7ce06SEric Joyner 		iflib_iov_intr_deferred(pf->vsi.ctx);
224b4a7ce06SEric Joyner 	}
225b4a7ce06SEric Joyner #endif
226b4a7ce06SEric Joyner 
227b4a7ce06SEric Joyner 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
228b4a7ce06SEric Joyner 	ixl_enable_intr0(hw);
229b4a7ce06SEric Joyner 
230b4a7ce06SEric Joyner 	if (do_task)
231b4a7ce06SEric Joyner 		return (FILTER_SCHEDULE_THREAD);
232b4a7ce06SEric Joyner 	else
233b4a7ce06SEric Joyner 		return (FILTER_HANDLED);
234b4a7ce06SEric Joyner }
235b4a7ce06SEric Joyner 
236b4a7ce06SEric Joyner /*
237b4a7ce06SEric Joyner  * Configure queue interrupt cause registers in hardware.
238b4a7ce06SEric Joyner  *
239b4a7ce06SEric Joyner  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
240b4a7ce06SEric Joyner  */
241b4a7ce06SEric Joyner void
ixl_configure_queue_intr_msix(struct ixl_pf * pf)242b4a7ce06SEric Joyner ixl_configure_queue_intr_msix(struct ixl_pf *pf)
243b4a7ce06SEric Joyner {
244b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
245b4a7ce06SEric Joyner 	struct ixl_vsi *vsi = &pf->vsi;
246b4a7ce06SEric Joyner 	u32		reg;
247b4a7ce06SEric Joyner 	u16		vector = 1;
248b4a7ce06SEric Joyner 
249b4a7ce06SEric Joyner 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
250b4a7ce06SEric Joyner 		/* Make sure interrupt is disabled */
251b4a7ce06SEric Joyner 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
252b4a7ce06SEric Joyner 		/* Set linked list head to point to corresponding RX queue
253b4a7ce06SEric Joyner 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
254b4a7ce06SEric Joyner 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
255b4a7ce06SEric Joyner 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
256b4a7ce06SEric Joyner 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
257b4a7ce06SEric Joyner 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
258b4a7ce06SEric Joyner 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
259b4a7ce06SEric Joyner 
260b4a7ce06SEric Joyner 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
261b4a7ce06SEric Joyner 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
262b4a7ce06SEric Joyner 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
263b4a7ce06SEric Joyner 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
264b4a7ce06SEric Joyner 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
265b4a7ce06SEric Joyner 		wr32(hw, I40E_QINT_RQCTL(i), reg);
266b4a7ce06SEric Joyner 
267b4a7ce06SEric Joyner 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
268b4a7ce06SEric Joyner 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
269b4a7ce06SEric Joyner 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
270b4a7ce06SEric Joyner 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
271b4a7ce06SEric Joyner 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
272b4a7ce06SEric Joyner 		wr32(hw, I40E_QINT_TQCTL(i), reg);
273b4a7ce06SEric Joyner 	}
274b4a7ce06SEric Joyner }
275b4a7ce06SEric Joyner 
276b4a7ce06SEric Joyner /*
277b4a7ce06SEric Joyner  * Configure for single interrupt vector operation
278b4a7ce06SEric Joyner  */
279b4a7ce06SEric Joyner void
ixl_configure_legacy(struct ixl_pf * pf)280b4a7ce06SEric Joyner ixl_configure_legacy(struct ixl_pf *pf)
281b4a7ce06SEric Joyner {
282b4a7ce06SEric Joyner 	struct i40e_hw	*hw = &pf->hw;
283b4a7ce06SEric Joyner 	struct ixl_vsi	*vsi = &pf->vsi;
284b4a7ce06SEric Joyner 	u32 reg;
285b4a7ce06SEric Joyner 
286b4a7ce06SEric Joyner 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
287b4a7ce06SEric Joyner 
288b4a7ce06SEric Joyner 	/* Setup "other" causes */
289b4a7ce06SEric Joyner 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
290b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
291b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
292b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
293b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
294b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
295b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
296b4a7ce06SEric Joyner 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
297b4a7ce06SEric Joyner 	    ;
298b4a7ce06SEric Joyner 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
299b4a7ce06SEric Joyner 
300b4a7ce06SEric Joyner 	/* No ITR for non-queue interrupts */
301b4a7ce06SEric Joyner 	wr32(hw, I40E_PFINT_STAT_CTL0,
302b4a7ce06SEric Joyner 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
303b4a7ce06SEric Joyner 
304b4a7ce06SEric Joyner 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
305b4a7ce06SEric Joyner 	wr32(hw, I40E_PFINT_LNKLST0, 0);
306b4a7ce06SEric Joyner 
307b4a7ce06SEric Joyner 	/* Associate the queue pair to the vector and enable the q int */
308b4a7ce06SEric Joyner 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
309b4a7ce06SEric Joyner 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
310b4a7ce06SEric Joyner 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
311b4a7ce06SEric Joyner 	wr32(hw, I40E_QINT_RQCTL(0), reg);
312b4a7ce06SEric Joyner 
313b4a7ce06SEric Joyner 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
314b4a7ce06SEric Joyner 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
315b4a7ce06SEric Joyner 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
316b4a7ce06SEric Joyner 	wr32(hw, I40E_QINT_TQCTL(0), reg);
317b4a7ce06SEric Joyner }
318b4a7ce06SEric Joyner 
319b4a7ce06SEric Joyner void
ixl_free_pci_resources(struct ixl_pf * pf)320b4a7ce06SEric Joyner ixl_free_pci_resources(struct ixl_pf *pf)
321b4a7ce06SEric Joyner {
322b4a7ce06SEric Joyner 	struct ixl_vsi		*vsi = &pf->vsi;
323b4a7ce06SEric Joyner 	device_t		dev = iflib_get_dev(vsi->ctx);
324b4a7ce06SEric Joyner 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
325b4a7ce06SEric Joyner 
326b4a7ce06SEric Joyner 	/* We may get here before stations are set up */
327b4a7ce06SEric Joyner 	if (rx_que == NULL)
328b4a7ce06SEric Joyner 		goto early;
329b4a7ce06SEric Joyner 
330b4a7ce06SEric Joyner 	/*
331b4a7ce06SEric Joyner 	**  Release all MSI-X VSI resources:
332b4a7ce06SEric Joyner 	*/
333b4a7ce06SEric Joyner 	iflib_irq_free(vsi->ctx, &vsi->irq);
334b4a7ce06SEric Joyner 
335b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
336b4a7ce06SEric Joyner 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
337b4a7ce06SEric Joyner early:
338b4a7ce06SEric Joyner 	if (pf->pci_mem != NULL)
339b4a7ce06SEric Joyner 		bus_release_resource(dev, SYS_RES_MEMORY,
340b4a7ce06SEric Joyner 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
341b4a7ce06SEric Joyner }
342b4a7ce06SEric Joyner 
343b4a7ce06SEric Joyner /*********************************************************************
344b4a7ce06SEric Joyner  *
345b4a7ce06SEric Joyner  *  Setup networking device structure and register an interface.
346b4a7ce06SEric Joyner  *
347b4a7ce06SEric Joyner  **********************************************************************/
348b4a7ce06SEric Joyner int
ixl_setup_interface(device_t dev,struct ixl_pf * pf)349b4a7ce06SEric Joyner ixl_setup_interface(device_t dev, struct ixl_pf *pf)
350b4a7ce06SEric Joyner {
351b4a7ce06SEric Joyner 	struct ixl_vsi *vsi = &pf->vsi;
352b4a7ce06SEric Joyner 	if_ctx_t ctx = vsi->ctx;
353b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
354402810d3SJustin Hibbits 	if_t ifp = iflib_get_ifp(ctx);
355b4a7ce06SEric Joyner 	struct i40e_aq_get_phy_abilities_resp abilities;
356b4a7ce06SEric Joyner 	enum i40e_status_code aq_error = 0;
357b4a7ce06SEric Joyner 
358b4a7ce06SEric Joyner 	INIT_DBG_DEV(dev, "begin");
359b4a7ce06SEric Joyner 
360b4a7ce06SEric Joyner 	vsi->shared->isc_max_frame_size =
361402810d3SJustin Hibbits 	    if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN
362b4a7ce06SEric Joyner 	    + ETHER_VLAN_ENCAP_LEN;
363b4a7ce06SEric Joyner 
364b4a7ce06SEric Joyner 	if (IXL_PF_IN_RECOVERY_MODE(pf))
365b4a7ce06SEric Joyner 		goto only_auto;
366b4a7ce06SEric Joyner 
367b4a7ce06SEric Joyner 	aq_error = i40e_aq_get_phy_capabilities(hw,
368b4a7ce06SEric Joyner 	    FALSE, TRUE, &abilities, NULL);
369b4a7ce06SEric Joyner 	/* May need delay to detect fiber correctly */
370b4a7ce06SEric Joyner 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
371b4a7ce06SEric Joyner 		i40e_msec_delay(200);
372b4a7ce06SEric Joyner 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
373b4a7ce06SEric Joyner 		    TRUE, &abilities, NULL);
374b4a7ce06SEric Joyner 	}
375b4a7ce06SEric Joyner 	if (aq_error) {
376b4a7ce06SEric Joyner 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
377b4a7ce06SEric Joyner 			device_printf(dev, "Unknown PHY type detected!\n");
378b4a7ce06SEric Joyner 		else
379b4a7ce06SEric Joyner 			device_printf(dev,
380b4a7ce06SEric Joyner 			    "Error getting supported media types, err %d,"
381b4a7ce06SEric Joyner 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
382b4a7ce06SEric Joyner 	} else {
383b4a7ce06SEric Joyner 		pf->supported_speeds = abilities.link_speed;
384b4a7ce06SEric Joyner 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
385b4a7ce06SEric Joyner 
386b4a7ce06SEric Joyner 		ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
387b4a7ce06SEric Joyner 	}
388b4a7ce06SEric Joyner 
389b4a7ce06SEric Joyner only_auto:
390b4a7ce06SEric Joyner 	/* Use autoselect media by default */
391b4a7ce06SEric Joyner 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
392b4a7ce06SEric Joyner 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
393b4a7ce06SEric Joyner 
394b4a7ce06SEric Joyner 	return (0);
395b4a7ce06SEric Joyner }
396b4a7ce06SEric Joyner 
397b4a7ce06SEric Joyner /*
398b4a7ce06SEric Joyner ** Run when the Admin Queue gets a link state change interrupt.
399b4a7ce06SEric Joyner */
400b4a7ce06SEric Joyner void
ixl_link_event(struct ixl_pf * pf,struct i40e_arq_event_info * e)401b4a7ce06SEric Joyner ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
402b4a7ce06SEric Joyner {
403b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
404b4a7ce06SEric Joyner 	device_t dev = iflib_get_dev(pf->vsi.ctx);
405c4622b01SKrzysztof Galazka 	struct i40e_link_status *link_info = &hw->phy.link_info;
406b4a7ce06SEric Joyner 
407c4622b01SKrzysztof Galazka 	/* Driver needs to re-enable delivering of link status events
408c4622b01SKrzysztof Galazka 	 * by FW after each event reception. Call i40e_get_link_status
409c4622b01SKrzysztof Galazka 	 * to do that. To not lose information about link state changes,
410c4622b01SKrzysztof Galazka 	 * which happened between receiving an event and the call,
411c4622b01SKrzysztof Galazka 	 * do not rely on status from event but use most recent
412c4622b01SKrzysztof Galazka 	 * status information retrieved by the call. */
413b4a7ce06SEric Joyner 	hw->phy.get_link_info = TRUE;
414b4a7ce06SEric Joyner 	i40e_get_link_status(hw, &pf->link_up);
415b4a7ce06SEric Joyner 
416b4a7ce06SEric Joyner 	/* Print out message if an unqualified module is found */
417c4622b01SKrzysztof Galazka 	if ((link_info->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
418b4a7ce06SEric Joyner 	    (pf->advertised_speed) &&
419c4622b01SKrzysztof Galazka 	    (if_getflags(pf->vsi.ifp) & IFF_UP) &&
420c4622b01SKrzysztof Galazka 	    (!(link_info->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
421c4622b01SKrzysztof Galazka 	    (!(link_info->link_info & I40E_AQ_LINK_UP)))
422b4a7ce06SEric Joyner 		device_printf(dev, "Link failed because "
423b4a7ce06SEric Joyner 		    "an unqualified module was detected!\n");
424b4a7ce06SEric Joyner 
425b4a7ce06SEric Joyner 	/* OS link info is updated elsewhere */
426b4a7ce06SEric Joyner }
427b4a7ce06SEric Joyner 
428b4a7ce06SEric Joyner /*********************************************************************
429b4a7ce06SEric Joyner  *
430b4a7ce06SEric Joyner  *  Initialize the VSI:  this handles contexts, which means things
431b4a7ce06SEric Joyner  *  			 like the number of descriptors, buffer size,
432b4a7ce06SEric Joyner  *			 plus we init the rings thru this function.
433b4a7ce06SEric Joyner  *
434b4a7ce06SEric Joyner  **********************************************************************/
435b4a7ce06SEric Joyner int
ixl_initialize_vsi(struct ixl_vsi * vsi)436b4a7ce06SEric Joyner ixl_initialize_vsi(struct ixl_vsi *vsi)
437b4a7ce06SEric Joyner {
438b4a7ce06SEric Joyner 	struct ixl_pf *pf = vsi->back;
439b4a7ce06SEric Joyner 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
440b4a7ce06SEric Joyner 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
441b4a7ce06SEric Joyner 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
442b4a7ce06SEric Joyner 	device_t		dev = iflib_get_dev(vsi->ctx);
443b4a7ce06SEric Joyner 	struct i40e_hw		*hw = vsi->hw;
444b4a7ce06SEric Joyner 	struct i40e_vsi_context	ctxt;
445b4a7ce06SEric Joyner 	int 			tc_queues;
446b4a7ce06SEric Joyner 	int			err = 0;
447b4a7ce06SEric Joyner 
448b4a7ce06SEric Joyner 	memset(&ctxt, 0, sizeof(ctxt));
449b4a7ce06SEric Joyner 	ctxt.seid = vsi->seid;
450b4a7ce06SEric Joyner 	if (pf->veb_seid != 0)
451b4a7ce06SEric Joyner 		ctxt.uplink_seid = pf->veb_seid;
452b4a7ce06SEric Joyner 	ctxt.pf_num = hw->pf_id;
453b4a7ce06SEric Joyner 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
454b4a7ce06SEric Joyner 	if (err) {
455b4a7ce06SEric Joyner 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
456b4a7ce06SEric Joyner 		    " aq_error %d\n", err, hw->aq.asq_last_status);
457b4a7ce06SEric Joyner 		return (err);
458b4a7ce06SEric Joyner 	}
459b4a7ce06SEric Joyner 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
460b4a7ce06SEric Joyner 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
461b4a7ce06SEric Joyner 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
462b4a7ce06SEric Joyner 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
463b4a7ce06SEric Joyner 	    ctxt.uplink_seid, ctxt.vsi_number,
464b4a7ce06SEric Joyner 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
465b4a7ce06SEric Joyner 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
466b4a7ce06SEric Joyner 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
467b4a7ce06SEric Joyner 	/*
468b4a7ce06SEric Joyner 	** Set the queue and traffic class bits
469b4a7ce06SEric Joyner 	**  - when multiple traffic classes are supported
470b4a7ce06SEric Joyner 	**    this will need to be more robust.
471b4a7ce06SEric Joyner 	*/
472b4a7ce06SEric Joyner 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
473b4a7ce06SEric Joyner 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
474b4a7ce06SEric Joyner 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
475b4a7ce06SEric Joyner 	ctxt.info.queue_mapping[0] = 0;
476b4a7ce06SEric Joyner 	/*
477b4a7ce06SEric Joyner 	 * This VSI will only use traffic class 0; start traffic class 0's
478b4a7ce06SEric Joyner 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
479b4a7ce06SEric Joyner 	 * the driver may not use all of them).
480b4a7ce06SEric Joyner 	 */
481b4a7ce06SEric Joyner 	tc_queues = fls(pf->qtag.num_allocated) - 1;
482b4a7ce06SEric Joyner 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
483b4a7ce06SEric Joyner 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
484b4a7ce06SEric Joyner 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
485b4a7ce06SEric Joyner 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
486b4a7ce06SEric Joyner 
487b4a7ce06SEric Joyner 	/* Set VLAN receive stripping mode */
488b4a7ce06SEric Joyner 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
489b4a7ce06SEric Joyner 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
490b4a7ce06SEric Joyner 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
491b4a7ce06SEric Joyner 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
492b4a7ce06SEric Joyner 	else
493b4a7ce06SEric Joyner 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
494b4a7ce06SEric Joyner 
495b4a7ce06SEric Joyner #ifdef IXL_IW
496b4a7ce06SEric Joyner 	/* Set TCP Enable for iWARP capable VSI */
497b4a7ce06SEric Joyner 	if (ixl_enable_iwarp && pf->iw_enabled) {
498b4a7ce06SEric Joyner 		ctxt.info.valid_sections |=
499b4a7ce06SEric Joyner 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
500b4a7ce06SEric Joyner 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
501b4a7ce06SEric Joyner 	}
502b4a7ce06SEric Joyner #endif
503b4a7ce06SEric Joyner 	/* Save VSI number and info for use later */
504b4a7ce06SEric Joyner 	vsi->vsi_num = ctxt.vsi_number;
505b4a7ce06SEric Joyner 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
506b4a7ce06SEric Joyner 
507b4a7ce06SEric Joyner 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
508b4a7ce06SEric Joyner 
509b4a7ce06SEric Joyner 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
510b4a7ce06SEric Joyner 	if (err) {
511b4a7ce06SEric Joyner 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
512b4a7ce06SEric Joyner 		    " aq_error %d\n", err, hw->aq.asq_last_status);
513b4a7ce06SEric Joyner 		return (err);
514b4a7ce06SEric Joyner 	}
515b4a7ce06SEric Joyner 
516b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
517b4a7ce06SEric Joyner 		struct tx_ring		*txr = &tx_que->txr;
518b4a7ce06SEric Joyner 		struct i40e_hmc_obj_txq tctx;
519b4a7ce06SEric Joyner 		u32			txctl;
520b4a7ce06SEric Joyner 
521b4a7ce06SEric Joyner 		/* Setup the HMC TX Context  */
522b4a7ce06SEric Joyner 		bzero(&tctx, sizeof(tctx));
523b4a7ce06SEric Joyner 		tctx.new_context = 1;
524b4a7ce06SEric Joyner 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
525b4a7ce06SEric Joyner 		tctx.qlen = scctx->isc_ntxd[0];
526b4a7ce06SEric Joyner 		tctx.fc_ena = 0;	/* Disable FCoE */
527b4a7ce06SEric Joyner 		/*
528b4a7ce06SEric Joyner 		 * This value needs to pulled from the VSI that this queue
529b4a7ce06SEric Joyner 		 * is assigned to. Index into array is traffic class.
530b4a7ce06SEric Joyner 		 */
531b4a7ce06SEric Joyner 		tctx.rdylist = vsi->info.qs_handle[0];
532b4a7ce06SEric Joyner 		/*
533b4a7ce06SEric Joyner 		 * Set these to enable Head Writeback
534b4a7ce06SEric Joyner 		 * - Address is last entry in TX ring (reserved for HWB index)
535b4a7ce06SEric Joyner 		 * Leave these as 0 for Descriptor Writeback
536b4a7ce06SEric Joyner 		 */
537b4a7ce06SEric Joyner 		if (vsi->enable_head_writeback) {
538b4a7ce06SEric Joyner 			tctx.head_wb_ena = 1;
539b4a7ce06SEric Joyner 			tctx.head_wb_addr = txr->tx_paddr +
540b4a7ce06SEric Joyner 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
541b4a7ce06SEric Joyner 		} else {
542b4a7ce06SEric Joyner 			tctx.head_wb_ena = 0;
543b4a7ce06SEric Joyner 			tctx.head_wb_addr = 0;
544b4a7ce06SEric Joyner 		}
545b4a7ce06SEric Joyner 		tctx.rdylist_act = 0;
546b4a7ce06SEric Joyner 		err = i40e_clear_lan_tx_queue_context(hw, i);
547b4a7ce06SEric Joyner 		if (err) {
548b4a7ce06SEric Joyner 			device_printf(dev, "Unable to clear TX context\n");
549b4a7ce06SEric Joyner 			break;
550b4a7ce06SEric Joyner 		}
551b4a7ce06SEric Joyner 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
552b4a7ce06SEric Joyner 		if (err) {
553b4a7ce06SEric Joyner 			device_printf(dev, "Unable to set TX context\n");
554b4a7ce06SEric Joyner 			break;
555b4a7ce06SEric Joyner 		}
556b4a7ce06SEric Joyner 		/* Associate the ring with this PF */
557b4a7ce06SEric Joyner 		txctl = I40E_QTX_CTL_PF_QUEUE;
558b4a7ce06SEric Joyner 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
559b4a7ce06SEric Joyner 		    I40E_QTX_CTL_PF_INDX_MASK);
560b4a7ce06SEric Joyner 		wr32(hw, I40E_QTX_CTL(i), txctl);
561b4a7ce06SEric Joyner 		ixl_flush(hw);
562b4a7ce06SEric Joyner 
563b4a7ce06SEric Joyner 		/* Do ring (re)init */
564b4a7ce06SEric Joyner 		ixl_init_tx_ring(vsi, tx_que);
565b4a7ce06SEric Joyner 	}
566b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
567b4a7ce06SEric Joyner 		struct rx_ring 		*rxr = &rx_que->rxr;
568b4a7ce06SEric Joyner 		struct i40e_hmc_obj_rxq rctx;
569b4a7ce06SEric Joyner 
570b4a7ce06SEric Joyner 		/* Next setup the HMC RX Context  */
571b4a7ce06SEric Joyner 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
572b4a7ce06SEric Joyner 
573b4a7ce06SEric Joyner 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
574b4a7ce06SEric Joyner 
575b4a7ce06SEric Joyner 		/* Set up an RX context for the HMC */
576b4a7ce06SEric Joyner 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
577b4a7ce06SEric Joyner 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
578b4a7ce06SEric Joyner 		/* ignore header split for now */
579b4a7ce06SEric Joyner 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
580b4a7ce06SEric Joyner 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
581b4a7ce06SEric Joyner 		    scctx->isc_max_frame_size : max_rxmax;
582b4a7ce06SEric Joyner 		rctx.dtype = 0;
583b4a7ce06SEric Joyner 		rctx.dsize = 1;		/* do 32byte descriptors */
584b4a7ce06SEric Joyner 		rctx.hsplit_0 = 0;	/* no header split */
585b4a7ce06SEric Joyner 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
586b4a7ce06SEric Joyner 		rctx.qlen = scctx->isc_nrxd[0];
587b4a7ce06SEric Joyner 		rctx.tphrdesc_ena = 1;
588b4a7ce06SEric Joyner 		rctx.tphwdesc_ena = 1;
589b4a7ce06SEric Joyner 		rctx.tphdata_ena = 0;	/* Header Split related */
590b4a7ce06SEric Joyner 		rctx.tphhead_ena = 0;	/* Header Split related */
591b4a7ce06SEric Joyner 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
592b4a7ce06SEric Joyner 		rctx.crcstrip = 1;
593b4a7ce06SEric Joyner 		rctx.l2tsel = 1;
594b4a7ce06SEric Joyner 		rctx.showiv = 1;	/* Strip inner VLAN header */
595b4a7ce06SEric Joyner 		rctx.fc_ena = 0;	/* Disable FCoE */
596b4a7ce06SEric Joyner 		rctx.prefena = 1;	/* Prefetch descriptors */
597b4a7ce06SEric Joyner 
598b4a7ce06SEric Joyner 		err = i40e_clear_lan_rx_queue_context(hw, i);
599b4a7ce06SEric Joyner 		if (err) {
600b4a7ce06SEric Joyner 			device_printf(dev,
601b4a7ce06SEric Joyner 			    "Unable to clear RX context %d\n", i);
602b4a7ce06SEric Joyner 			break;
603b4a7ce06SEric Joyner 		}
604b4a7ce06SEric Joyner 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
605b4a7ce06SEric Joyner 		if (err) {
606b4a7ce06SEric Joyner 			device_printf(dev, "Unable to set RX context %d\n", i);
607b4a7ce06SEric Joyner 			break;
608b4a7ce06SEric Joyner 		}
609b4a7ce06SEric Joyner 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
610b4a7ce06SEric Joyner 	}
611b4a7ce06SEric Joyner 	return (err);
612b4a7ce06SEric Joyner }
613b4a7ce06SEric Joyner 
614b4a7ce06SEric Joyner 
615b4a7ce06SEric Joyner /*
616b4a7ce06SEric Joyner ** Provide a update to the queue RX
617b4a7ce06SEric Joyner ** interrupt moderation value.
618b4a7ce06SEric Joyner */
619b4a7ce06SEric Joyner void
ixl_set_queue_rx_itr(struct ixl_rx_queue * que)620b4a7ce06SEric Joyner ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
621b4a7ce06SEric Joyner {
622b4a7ce06SEric Joyner 	struct ixl_vsi	*vsi = que->vsi;
623b4a7ce06SEric Joyner 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
624b4a7ce06SEric Joyner 	struct i40e_hw	*hw = vsi->hw;
625b4a7ce06SEric Joyner 	struct rx_ring	*rxr = &que->rxr;
626b4a7ce06SEric Joyner 	u16		rx_itr;
627b4a7ce06SEric Joyner 	u16		rx_latency = 0;
628b4a7ce06SEric Joyner 	int		rx_bytes;
629b4a7ce06SEric Joyner 
630b4a7ce06SEric Joyner 	/* Idle, do nothing */
631b4a7ce06SEric Joyner 	if (rxr->bytes == 0)
632b4a7ce06SEric Joyner 		return;
633b4a7ce06SEric Joyner 
634b4a7ce06SEric Joyner 	if (pf->dynamic_rx_itr) {
635b4a7ce06SEric Joyner 		rx_bytes = rxr->bytes/rxr->itr;
636b4a7ce06SEric Joyner 		rx_itr = rxr->itr;
637b4a7ce06SEric Joyner 
638b4a7ce06SEric Joyner 		/* Adjust latency range */
639b4a7ce06SEric Joyner 		switch (rxr->latency) {
640b4a7ce06SEric Joyner 		case IXL_LOW_LATENCY:
641b4a7ce06SEric Joyner 			if (rx_bytes > 10) {
642b4a7ce06SEric Joyner 				rx_latency = IXL_AVE_LATENCY;
643b4a7ce06SEric Joyner 				rx_itr = IXL_ITR_20K;
644b4a7ce06SEric Joyner 			}
645b4a7ce06SEric Joyner 			break;
646b4a7ce06SEric Joyner 		case IXL_AVE_LATENCY:
647b4a7ce06SEric Joyner 			if (rx_bytes > 20) {
648b4a7ce06SEric Joyner 				rx_latency = IXL_BULK_LATENCY;
649b4a7ce06SEric Joyner 				rx_itr = IXL_ITR_8K;
650b4a7ce06SEric Joyner 			} else if (rx_bytes <= 10) {
651b4a7ce06SEric Joyner 				rx_latency = IXL_LOW_LATENCY;
652b4a7ce06SEric Joyner 				rx_itr = IXL_ITR_100K;
653b4a7ce06SEric Joyner 			}
654b4a7ce06SEric Joyner 			break;
655b4a7ce06SEric Joyner 		case IXL_BULK_LATENCY:
656b4a7ce06SEric Joyner 			if (rx_bytes <= 20) {
657b4a7ce06SEric Joyner 				rx_latency = IXL_AVE_LATENCY;
658b4a7ce06SEric Joyner 				rx_itr = IXL_ITR_20K;
659b4a7ce06SEric Joyner 			}
660b4a7ce06SEric Joyner 			break;
661b4a7ce06SEric Joyner 		}
662b4a7ce06SEric Joyner 
663b4a7ce06SEric Joyner 		rxr->latency = rx_latency;
664b4a7ce06SEric Joyner 
665b4a7ce06SEric Joyner 		if (rx_itr != rxr->itr) {
666b4a7ce06SEric Joyner 			/* do an exponential smoothing */
667b4a7ce06SEric Joyner 			rx_itr = (10 * rx_itr * rxr->itr) /
668b4a7ce06SEric Joyner 			    ((9 * rx_itr) + rxr->itr);
669b4a7ce06SEric Joyner 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
670b4a7ce06SEric Joyner 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
671b4a7ce06SEric Joyner 			    rxr->me), rxr->itr);
672b4a7ce06SEric Joyner 		}
673b4a7ce06SEric Joyner 	} else { /* We may have have toggled to non-dynamic */
674b4a7ce06SEric Joyner 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
675b4a7ce06SEric Joyner 			vsi->rx_itr_setting = pf->rx_itr;
676b4a7ce06SEric Joyner 		/* Update the hardware if needed */
677b4a7ce06SEric Joyner 		if (rxr->itr != vsi->rx_itr_setting) {
678b4a7ce06SEric Joyner 			rxr->itr = vsi->rx_itr_setting;
679b4a7ce06SEric Joyner 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
680b4a7ce06SEric Joyner 			    rxr->me), rxr->itr);
681b4a7ce06SEric Joyner 		}
682b4a7ce06SEric Joyner 	}
683b4a7ce06SEric Joyner 	rxr->bytes = 0;
684b4a7ce06SEric Joyner 	rxr->packets = 0;
685b4a7ce06SEric Joyner }
686b4a7ce06SEric Joyner 
687b4a7ce06SEric Joyner 
688b4a7ce06SEric Joyner /*
689b4a7ce06SEric Joyner ** Provide a update to the queue TX
690b4a7ce06SEric Joyner ** interrupt moderation value.
691b4a7ce06SEric Joyner */
692b4a7ce06SEric Joyner void
ixl_set_queue_tx_itr(struct ixl_tx_queue * que)693b4a7ce06SEric Joyner ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
694b4a7ce06SEric Joyner {
695b4a7ce06SEric Joyner 	struct ixl_vsi	*vsi = que->vsi;
696b4a7ce06SEric Joyner 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
697b4a7ce06SEric Joyner 	struct i40e_hw	*hw = vsi->hw;
698b4a7ce06SEric Joyner 	struct tx_ring	*txr = &que->txr;
699b4a7ce06SEric Joyner 	u16		tx_itr;
700b4a7ce06SEric Joyner 	u16		tx_latency = 0;
701b4a7ce06SEric Joyner 	int		tx_bytes;
702b4a7ce06SEric Joyner 
703b4a7ce06SEric Joyner 
704b4a7ce06SEric Joyner 	/* Idle, do nothing */
705b4a7ce06SEric Joyner 	if (txr->bytes == 0)
706b4a7ce06SEric Joyner 		return;
707b4a7ce06SEric Joyner 
708b4a7ce06SEric Joyner 	if (pf->dynamic_tx_itr) {
709b4a7ce06SEric Joyner 		tx_bytes = txr->bytes/txr->itr;
710b4a7ce06SEric Joyner 		tx_itr = txr->itr;
711b4a7ce06SEric Joyner 
712b4a7ce06SEric Joyner 		switch (txr->latency) {
713b4a7ce06SEric Joyner 		case IXL_LOW_LATENCY:
714b4a7ce06SEric Joyner 			if (tx_bytes > 10) {
715b4a7ce06SEric Joyner 				tx_latency = IXL_AVE_LATENCY;
716b4a7ce06SEric Joyner 				tx_itr = IXL_ITR_20K;
717b4a7ce06SEric Joyner 			}
718b4a7ce06SEric Joyner 			break;
719b4a7ce06SEric Joyner 		case IXL_AVE_LATENCY:
720b4a7ce06SEric Joyner 			if (tx_bytes > 20) {
721b4a7ce06SEric Joyner 				tx_latency = IXL_BULK_LATENCY;
722b4a7ce06SEric Joyner 				tx_itr = IXL_ITR_8K;
723b4a7ce06SEric Joyner 			} else if (tx_bytes <= 10) {
724b4a7ce06SEric Joyner 				tx_latency = IXL_LOW_LATENCY;
725b4a7ce06SEric Joyner 				tx_itr = IXL_ITR_100K;
726b4a7ce06SEric Joyner 			}
727b4a7ce06SEric Joyner 			break;
728b4a7ce06SEric Joyner 		case IXL_BULK_LATENCY:
729b4a7ce06SEric Joyner 			if (tx_bytes <= 20) {
730b4a7ce06SEric Joyner 				tx_latency = IXL_AVE_LATENCY;
731b4a7ce06SEric Joyner 				tx_itr = IXL_ITR_20K;
732b4a7ce06SEric Joyner 			}
733b4a7ce06SEric Joyner 			break;
734b4a7ce06SEric Joyner 		}
735b4a7ce06SEric Joyner 
736b4a7ce06SEric Joyner 		txr->latency = tx_latency;
737b4a7ce06SEric Joyner 
738b4a7ce06SEric Joyner 		if (tx_itr != txr->itr) {
739b4a7ce06SEric Joyner 			/* do an exponential smoothing */
740b4a7ce06SEric Joyner 			tx_itr = (10 * tx_itr * txr->itr) /
741b4a7ce06SEric Joyner 			    ((9 * tx_itr) + txr->itr);
742b4a7ce06SEric Joyner 			txr->itr = min(tx_itr, IXL_MAX_ITR);
743b4a7ce06SEric Joyner 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
744b4a7ce06SEric Joyner 			    txr->me), txr->itr);
745b4a7ce06SEric Joyner 		}
746b4a7ce06SEric Joyner 
747b4a7ce06SEric Joyner 	} else { /* We may have have toggled to non-dynamic */
748b4a7ce06SEric Joyner 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
749b4a7ce06SEric Joyner 			vsi->tx_itr_setting = pf->tx_itr;
750b4a7ce06SEric Joyner 		/* Update the hardware if needed */
751b4a7ce06SEric Joyner 		if (txr->itr != vsi->tx_itr_setting) {
752b4a7ce06SEric Joyner 			txr->itr = vsi->tx_itr_setting;
753b4a7ce06SEric Joyner 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
754b4a7ce06SEric Joyner 			    txr->me), txr->itr);
755b4a7ce06SEric Joyner 		}
756b4a7ce06SEric Joyner 	}
757b4a7ce06SEric Joyner 	txr->bytes = 0;
758b4a7ce06SEric Joyner 	txr->packets = 0;
759b4a7ce06SEric Joyner 	return;
760b4a7ce06SEric Joyner }
761b4a7ce06SEric Joyner 
762b4a7ce06SEric Joyner #ifdef IXL_DEBUG
763b4a7ce06SEric Joyner /**
764b4a7ce06SEric Joyner  * ixl_sysctl_qtx_tail_handler
765b4a7ce06SEric Joyner  * Retrieves I40E_QTX_TAIL value from hardware
766b4a7ce06SEric Joyner  * for a sysctl.
767b4a7ce06SEric Joyner  */
768b4a7ce06SEric Joyner int
ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)769b4a7ce06SEric Joyner ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
770b4a7ce06SEric Joyner {
771b4a7ce06SEric Joyner 	struct ixl_tx_queue *tx_que;
772b4a7ce06SEric Joyner 	int error;
773b4a7ce06SEric Joyner 	u32 val;
774b4a7ce06SEric Joyner 
775b4a7ce06SEric Joyner 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
776b4a7ce06SEric Joyner 	if (!tx_que) return 0;
777b4a7ce06SEric Joyner 
778b4a7ce06SEric Joyner 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
779b4a7ce06SEric Joyner 	error = sysctl_handle_int(oidp, &val, 0, req);
780b4a7ce06SEric Joyner 	if (error || !req->newptr)
781b4a7ce06SEric Joyner 		return error;
782b4a7ce06SEric Joyner 	return (0);
783b4a7ce06SEric Joyner }
784b4a7ce06SEric Joyner 
785b4a7ce06SEric Joyner /**
786b4a7ce06SEric Joyner  * ixl_sysctl_qrx_tail_handler
787b4a7ce06SEric Joyner  * Retrieves I40E_QRX_TAIL value from hardware
788b4a7ce06SEric Joyner  * for a sysctl.
789b4a7ce06SEric Joyner  */
790b4a7ce06SEric Joyner int
ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)791b4a7ce06SEric Joyner ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
792b4a7ce06SEric Joyner {
793b4a7ce06SEric Joyner 	struct ixl_rx_queue *rx_que;
794b4a7ce06SEric Joyner 	int error;
795b4a7ce06SEric Joyner 	u32 val;
796b4a7ce06SEric Joyner 
797b4a7ce06SEric Joyner 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
798b4a7ce06SEric Joyner 	if (!rx_que) return 0;
799b4a7ce06SEric Joyner 
800b4a7ce06SEric Joyner 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
801b4a7ce06SEric Joyner 	error = sysctl_handle_int(oidp, &val, 0, req);
802b4a7ce06SEric Joyner 	if (error || !req->newptr)
803b4a7ce06SEric Joyner 		return error;
804b4a7ce06SEric Joyner 	return (0);
805b4a7ce06SEric Joyner }
806b4a7ce06SEric Joyner #endif
807b4a7ce06SEric Joyner 
808b4a7ce06SEric Joyner void
ixl_add_hw_stats(struct ixl_pf * pf)809b4a7ce06SEric Joyner ixl_add_hw_stats(struct ixl_pf *pf)
810b4a7ce06SEric Joyner {
811b4a7ce06SEric Joyner 	struct ixl_vsi *vsi = &pf->vsi;
812b4a7ce06SEric Joyner 	device_t dev = iflib_get_dev(vsi->ctx);
813b4a7ce06SEric Joyner 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
814b4a7ce06SEric Joyner 
815b4a7ce06SEric Joyner 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
816b4a7ce06SEric Joyner 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
817b4a7ce06SEric Joyner 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
818b4a7ce06SEric Joyner 
819b4a7ce06SEric Joyner 	/* Driver statistics */
820b4a7ce06SEric Joyner 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
821b4a7ce06SEric Joyner 			CTLFLAG_RD, &pf->admin_irq,
822b4a7ce06SEric Joyner 			"Admin Queue IRQs received");
823b4a7ce06SEric Joyner 
824b4a7ce06SEric Joyner 	sysctl_ctx_init(&vsi->sysctl_ctx);
825b4a7ce06SEric Joyner 	ixl_vsi_add_sysctls(vsi, "pf", true);
826b4a7ce06SEric Joyner 
827b4a7ce06SEric Joyner 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
828b4a7ce06SEric Joyner }
829b4a7ce06SEric Joyner 
830b4a7ce06SEric Joyner void
ixl_set_rss_hlut(struct ixl_pf * pf)831b4a7ce06SEric Joyner ixl_set_rss_hlut(struct ixl_pf *pf)
832b4a7ce06SEric Joyner {
833b4a7ce06SEric Joyner 	struct i40e_hw	*hw = &pf->hw;
834b4a7ce06SEric Joyner 	struct ixl_vsi *vsi = &pf->vsi;
835b4a7ce06SEric Joyner 	device_t	dev = iflib_get_dev(vsi->ctx);
836b4a7ce06SEric Joyner 	int		i, que_id;
837b4a7ce06SEric Joyner 	int		lut_entry_width;
838b4a7ce06SEric Joyner 	u32		lut = 0;
839b4a7ce06SEric Joyner 	enum i40e_status_code status;
840b4a7ce06SEric Joyner 
841b4a7ce06SEric Joyner 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
842b4a7ce06SEric Joyner 
843b4a7ce06SEric Joyner 	/* Populate the LUT with max no. of queues in round robin fashion */
844b4a7ce06SEric Joyner 	u8 hlut_buf[512];
845b4a7ce06SEric Joyner 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
846b4a7ce06SEric Joyner #ifdef RSS
847b4a7ce06SEric Joyner 		/*
848b4a7ce06SEric Joyner 		 * Fetch the RSS bucket id for the given indirection entry.
849b4a7ce06SEric Joyner 		 * Cap it at the number of configured buckets (which is
850b4a7ce06SEric Joyner 		 * num_queues.)
851b4a7ce06SEric Joyner 		 */
852b4a7ce06SEric Joyner 		que_id = rss_get_indirection_to_bucket(i);
853b4a7ce06SEric Joyner 		que_id = que_id % vsi->num_rx_queues;
854b4a7ce06SEric Joyner #else
855b4a7ce06SEric Joyner 		que_id = i % vsi->num_rx_queues;
856b4a7ce06SEric Joyner #endif
857b4a7ce06SEric Joyner 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
858b4a7ce06SEric Joyner 		hlut_buf[i] = lut;
859b4a7ce06SEric Joyner 	}
860b4a7ce06SEric Joyner 
861b4a7ce06SEric Joyner 	if (hw->mac.type == I40E_MAC_X722) {
862b4a7ce06SEric Joyner 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
863b4a7ce06SEric Joyner 		if (status)
864b4a7ce06SEric Joyner 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
865b4a7ce06SEric Joyner 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
866b4a7ce06SEric Joyner 	} else {
867b4a7ce06SEric Joyner 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
868b4a7ce06SEric Joyner 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
869b4a7ce06SEric Joyner 		ixl_flush(hw);
870b4a7ce06SEric Joyner 	}
871b4a7ce06SEric Joyner }
872b4a7ce06SEric Joyner 
873b4a7ce06SEric Joyner /* For PF VSI only */
874b4a7ce06SEric Joyner int
ixl_enable_rings(struct ixl_vsi * vsi)875b4a7ce06SEric Joyner ixl_enable_rings(struct ixl_vsi *vsi)
876b4a7ce06SEric Joyner {
877b4a7ce06SEric Joyner 	struct ixl_pf	*pf = vsi->back;
878b4a7ce06SEric Joyner 	int		error = 0;
879b4a7ce06SEric Joyner 
880b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_tx_queues; i++)
881b4a7ce06SEric Joyner 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
882b4a7ce06SEric Joyner 
883b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++)
884b4a7ce06SEric Joyner 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
885b4a7ce06SEric Joyner 
886b4a7ce06SEric Joyner 	return (error);
887b4a7ce06SEric Joyner }
888b4a7ce06SEric Joyner 
889b4a7ce06SEric Joyner int
ixl_disable_rings(struct ixl_pf * pf,struct ixl_vsi * vsi,struct ixl_pf_qtag * qtag)890b4a7ce06SEric Joyner ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
891b4a7ce06SEric Joyner {
892b4a7ce06SEric Joyner 	int error = 0;
893b4a7ce06SEric Joyner 
894b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_tx_queues; i++)
895b4a7ce06SEric Joyner 		error = ixl_disable_tx_ring(pf, qtag, i);
896b4a7ce06SEric Joyner 
897b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++)
898b4a7ce06SEric Joyner 		error = ixl_disable_rx_ring(pf, qtag, i);
899b4a7ce06SEric Joyner 
900b4a7ce06SEric Joyner 	return (error);
901b4a7ce06SEric Joyner }
902b4a7ce06SEric Joyner 
903b4a7ce06SEric Joyner void
ixl_enable_intr(struct ixl_vsi * vsi)904b4a7ce06SEric Joyner ixl_enable_intr(struct ixl_vsi *vsi)
905b4a7ce06SEric Joyner {
906b4a7ce06SEric Joyner 	struct i40e_hw		*hw = vsi->hw;
907b4a7ce06SEric Joyner 	struct ixl_rx_queue	*que = vsi->rx_queues;
908b4a7ce06SEric Joyner 
909b4a7ce06SEric Joyner 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
910b4a7ce06SEric Joyner 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
911b4a7ce06SEric Joyner 			ixl_enable_queue(hw, que->rxr.me);
912b4a7ce06SEric Joyner 	} else
913b4a7ce06SEric Joyner 		ixl_enable_intr0(hw);
914b4a7ce06SEric Joyner }
915b4a7ce06SEric Joyner 
916b4a7ce06SEric Joyner void
ixl_disable_rings_intr(struct ixl_vsi * vsi)917b4a7ce06SEric Joyner ixl_disable_rings_intr(struct ixl_vsi *vsi)
918b4a7ce06SEric Joyner {
919b4a7ce06SEric Joyner 	struct i40e_hw		*hw = vsi->hw;
920b4a7ce06SEric Joyner 	struct ixl_rx_queue	*que = vsi->rx_queues;
921b4a7ce06SEric Joyner 
922b4a7ce06SEric Joyner 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
923b4a7ce06SEric Joyner 		ixl_disable_queue(hw, que->rxr.me);
924b4a7ce06SEric Joyner }
925b4a7ce06SEric Joyner 
926b4a7ce06SEric Joyner int
ixl_prepare_for_reset(struct ixl_pf * pf,bool is_up)927b4a7ce06SEric Joyner ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
928b4a7ce06SEric Joyner {
929b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
930b4a7ce06SEric Joyner 	device_t dev = pf->dev;
931b4a7ce06SEric Joyner 	int error = 0;
932b4a7ce06SEric Joyner 
933b4a7ce06SEric Joyner 	if (is_up)
934b4a7ce06SEric Joyner 		ixl_if_stop(pf->vsi.ctx);
935b4a7ce06SEric Joyner 
936b4a7ce06SEric Joyner 	ixl_shutdown_hmc(pf);
937b4a7ce06SEric Joyner 
938b4a7ce06SEric Joyner 	ixl_disable_intr0(hw);
939b4a7ce06SEric Joyner 
940b4a7ce06SEric Joyner 	error = i40e_shutdown_adminq(hw);
941b4a7ce06SEric Joyner 	if (error)
942b4a7ce06SEric Joyner 		device_printf(dev,
943b4a7ce06SEric Joyner 		    "Shutdown Admin queue failed with code %d\n", error);
944b4a7ce06SEric Joyner 
945b4a7ce06SEric Joyner 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
946b4a7ce06SEric Joyner 	return (error);
947b4a7ce06SEric Joyner }
948b4a7ce06SEric Joyner 
949b4a7ce06SEric Joyner int
ixl_rebuild_hw_structs_after_reset(struct ixl_pf * pf,bool is_up)950b4a7ce06SEric Joyner ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
951b4a7ce06SEric Joyner {
952b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
953b4a7ce06SEric Joyner 	struct ixl_vsi *vsi = &pf->vsi;
954b4a7ce06SEric Joyner 	device_t dev = pf->dev;
955b4a7ce06SEric Joyner 	enum i40e_get_fw_lldp_status_resp lldp_status;
956b4a7ce06SEric Joyner 	int error = 0;
957b4a7ce06SEric Joyner 
958b4a7ce06SEric Joyner 	device_printf(dev, "Rebuilding driver state...\n");
959b4a7ce06SEric Joyner 
960b4a7ce06SEric Joyner 	/* Setup */
961b4a7ce06SEric Joyner 	error = i40e_init_adminq(hw);
962b4a7ce06SEric Joyner 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
963b4a7ce06SEric Joyner 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
964b4a7ce06SEric Joyner 		    error);
965b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
966b4a7ce06SEric Joyner 	}
967b4a7ce06SEric Joyner 
968b4a7ce06SEric Joyner 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
969b4a7ce06SEric Joyner 		/* Keep admin queue interrupts active while driver is loaded */
970b4a7ce06SEric Joyner 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
971b4a7ce06SEric Joyner 			ixl_configure_intr0_msix(pf);
972b4a7ce06SEric Joyner 			ixl_enable_intr0(hw);
973b4a7ce06SEric Joyner 		}
974b4a7ce06SEric Joyner 
975b4a7ce06SEric Joyner 		return (0);
976b4a7ce06SEric Joyner 	}
977b4a7ce06SEric Joyner 
978b4a7ce06SEric Joyner 	i40e_clear_pxe_mode(hw);
979b4a7ce06SEric Joyner 
980b4a7ce06SEric Joyner 	error = ixl_get_hw_capabilities(pf);
981b4a7ce06SEric Joyner 	if (error) {
982b4a7ce06SEric Joyner 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
983b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
984b4a7ce06SEric Joyner 	}
985b4a7ce06SEric Joyner 
986b4a7ce06SEric Joyner 	error = ixl_setup_hmc(pf);
987b4a7ce06SEric Joyner 	if (error)
988b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
989b4a7ce06SEric Joyner 
990b4a7ce06SEric Joyner 	/* reserve a contiguous allocation for the PF's VSI */
991b4a7ce06SEric Joyner 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
992b4a7ce06SEric Joyner 	if (error) {
993b4a7ce06SEric Joyner 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
994b4a7ce06SEric Joyner 		    error);
995b4a7ce06SEric Joyner 	}
996b4a7ce06SEric Joyner 
997b4a7ce06SEric Joyner 	error = ixl_switch_config(pf);
998b4a7ce06SEric Joyner 	if (error) {
999b4a7ce06SEric Joyner 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
1000b4a7ce06SEric Joyner 		     error);
1001b4a7ce06SEric Joyner 		error = EIO;
1002b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
1003b4a7ce06SEric Joyner 	}
1004b4a7ce06SEric Joyner 
1005b4a7ce06SEric Joyner 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1006b4a7ce06SEric Joyner 	    NULL);
1007b4a7ce06SEric Joyner 	if (error) {
1008b4a7ce06SEric Joyner 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1009b4a7ce06SEric Joyner 		    " aq_err %d\n", error, hw->aq.asq_last_status);
1010b4a7ce06SEric Joyner 		error = EIO;
1011b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
1012b4a7ce06SEric Joyner 	}
1013b4a7ce06SEric Joyner 
1014b4a7ce06SEric Joyner 	u8 set_fc_err_mask;
1015b4a7ce06SEric Joyner 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
1016b4a7ce06SEric Joyner 	if (error) {
1017b4a7ce06SEric Joyner 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
1018b4a7ce06SEric Joyner 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1019b4a7ce06SEric Joyner 		error = EIO;
1020b4a7ce06SEric Joyner 		goto ixl_rebuild_hw_structs_after_reset_err;
1021b4a7ce06SEric Joyner 	}
1022b4a7ce06SEric Joyner 
1023b4a7ce06SEric Joyner 	/* Remove default filters reinstalled by FW on reset */
1024b4a7ce06SEric Joyner 	ixl_del_default_hw_filters(vsi);
1025b4a7ce06SEric Joyner 
1026b4a7ce06SEric Joyner 	/* Receive broadcast Ethernet frames */
1027b4a7ce06SEric Joyner 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1028b4a7ce06SEric Joyner 
1029b4a7ce06SEric Joyner 	/* Determine link state */
1030*ba2f531fSKrzysztof Galazka 	ixl_attach_get_link_status(pf);
1031b4a7ce06SEric Joyner 
1032b4a7ce06SEric Joyner 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
1033b4a7ce06SEric Joyner 
1034b4a7ce06SEric Joyner 	/* Query device FW LLDP status */
1035b4a7ce06SEric Joyner 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
1036b4a7ce06SEric Joyner 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
1037b8f51b8cSPiotr Kubaj 			ixl_set_state(&pf->state,
1038b8f51b8cSPiotr Kubaj 			    IXL_STATE_FW_LLDP_DISABLED);
1039b4a7ce06SEric Joyner 		} else {
1040b8f51b8cSPiotr Kubaj 			ixl_clear_state(&pf->state,
1041b8f51b8cSPiotr Kubaj 			    IXL_STATE_FW_LLDP_DISABLED);
1042b4a7ce06SEric Joyner 		}
1043b4a7ce06SEric Joyner 	}
1044b4a7ce06SEric Joyner 
1045b4a7ce06SEric Joyner 	/* Keep admin queue interrupts active while driver is loaded */
1046b4a7ce06SEric Joyner 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1047b4a7ce06SEric Joyner 		ixl_configure_intr0_msix(pf);
1048b4a7ce06SEric Joyner 		ixl_enable_intr0(hw);
1049b4a7ce06SEric Joyner 	}
1050b4a7ce06SEric Joyner 
1051b4a7ce06SEric Joyner 	if (is_up) {
1052b4a7ce06SEric Joyner 		iflib_request_reset(vsi->ctx);
1053b4a7ce06SEric Joyner 		iflib_admin_intr_deferred(vsi->ctx);
1054b4a7ce06SEric Joyner 	}
1055b4a7ce06SEric Joyner 
1056b4a7ce06SEric Joyner 	device_printf(dev, "Rebuilding driver state done.\n");
1057b4a7ce06SEric Joyner 	return (0);
1058b4a7ce06SEric Joyner 
1059b4a7ce06SEric Joyner ixl_rebuild_hw_structs_after_reset_err:
1060b4a7ce06SEric Joyner 	device_printf(dev, "Reload the driver to recover\n");
1061b4a7ce06SEric Joyner 	return (error);
1062b4a7ce06SEric Joyner }
1063b4a7ce06SEric Joyner 
1064b4a7ce06SEric Joyner /*
1065b4a7ce06SEric Joyner ** Set flow control using sysctl:
1066b4a7ce06SEric Joyner ** 	0 - off
1067b4a7ce06SEric Joyner **	1 - rx pause
1068b4a7ce06SEric Joyner **	2 - tx pause
1069b4a7ce06SEric Joyner **	3 - full
1070b4a7ce06SEric Joyner */
1071b4a7ce06SEric Joyner int
ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)1072b4a7ce06SEric Joyner ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
1073b4a7ce06SEric Joyner {
1074b4a7ce06SEric Joyner 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1075b4a7ce06SEric Joyner 	struct i40e_hw *hw = &pf->hw;
1076b4a7ce06SEric Joyner 	device_t dev = pf->dev;
1077b4a7ce06SEric Joyner 	int requested_fc, error = 0;
1078b4a7ce06SEric Joyner 	enum i40e_status_code aq_error = 0;
1079b4a7ce06SEric Joyner 	u8 fc_aq_err = 0;
1080b4a7ce06SEric Joyner 
1081b4a7ce06SEric Joyner 	/* Get request */
1082b4a7ce06SEric Joyner 	requested_fc = pf->fc;
1083b4a7ce06SEric Joyner 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
1084b4a7ce06SEric Joyner 	if ((error) || (req->newptr == NULL))
1085b4a7ce06SEric Joyner 		return (error);
1086b4a7ce06SEric Joyner 	if (requested_fc < 0 || requested_fc > 3) {
1087b4a7ce06SEric Joyner 		device_printf(dev,
1088b4a7ce06SEric Joyner 		    "Invalid fc mode; valid modes are 0 through 3\n");
1089b4a7ce06SEric Joyner 		return (EINVAL);
1090b4a7ce06SEric Joyner 	}
1091b4a7ce06SEric Joyner 
1092b4a7ce06SEric Joyner 	/* Set fc ability for port */
1093b4a7ce06SEric Joyner 	hw->fc.requested_mode = requested_fc;
1094b4a7ce06SEric Joyner 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
1095b4a7ce06SEric Joyner 	if (aq_error) {
1096b4a7ce06SEric Joyner 		device_printf(dev,
109720a52706SKrzysztof Galazka 		    "%s: Error setting Flow Control mode %d; fc_err %#x\n",
1098b4a7ce06SEric Joyner 		    __func__, aq_error, fc_aq_err);
1099b4a7ce06SEric Joyner 		return (EIO);
1100b4a7ce06SEric Joyner 	}
1101b4a7ce06SEric Joyner 	pf->fc = requested_fc;
1102b4a7ce06SEric Joyner 
1103b4a7ce06SEric Joyner 	return (0);
1104b4a7ce06SEric Joyner }
1105