xref: /freebsd/sys/dev/axgbe/xgbe-drv.c (revision 44b781cfe0b561909686778153915ec2b0ba5a21)
1*44b781cfSAndrew Turner /*
2*44b781cfSAndrew Turner  * AMD 10Gb Ethernet driver
3*44b781cfSAndrew Turner  *
4*44b781cfSAndrew Turner  * This file is available to you under your choice of the following two
5*44b781cfSAndrew Turner  * licenses:
6*44b781cfSAndrew Turner  *
7*44b781cfSAndrew Turner  * License 1: GPLv2
8*44b781cfSAndrew Turner  *
9*44b781cfSAndrew Turner  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10*44b781cfSAndrew Turner  *
11*44b781cfSAndrew Turner  * This file is free software; you may copy, redistribute and/or modify
12*44b781cfSAndrew Turner  * it under the terms of the GNU General Public License as published by
13*44b781cfSAndrew Turner  * the Free Software Foundation, either version 2 of the License, or (at
14*44b781cfSAndrew Turner  * your option) any later version.
15*44b781cfSAndrew Turner  *
16*44b781cfSAndrew Turner  * This file is distributed in the hope that it will be useful, but
17*44b781cfSAndrew Turner  * WITHOUT ANY WARRANTY; without even the implied warranty of
18*44b781cfSAndrew Turner  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19*44b781cfSAndrew Turner  * General Public License for more details.
20*44b781cfSAndrew Turner  *
21*44b781cfSAndrew Turner  * You should have received a copy of the GNU General Public License
22*44b781cfSAndrew Turner  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23*44b781cfSAndrew Turner  *
24*44b781cfSAndrew Turner  * This file incorporates work covered by the following copyright and
25*44b781cfSAndrew Turner  * permission notice:
26*44b781cfSAndrew Turner  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27*44b781cfSAndrew Turner  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28*44b781cfSAndrew Turner  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29*44b781cfSAndrew Turner  *     and you.
30*44b781cfSAndrew Turner  *
31*44b781cfSAndrew Turner  *     The Software IS NOT an item of Licensed Software or Licensed Product
32*44b781cfSAndrew Turner  *     under any End User Software License Agreement or Agreement for Licensed
33*44b781cfSAndrew Turner  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34*44b781cfSAndrew Turner  *     granted, free of charge, to any person obtaining a copy of this software
35*44b781cfSAndrew Turner  *     annotated with this license and the Software, to deal in the Software
36*44b781cfSAndrew Turner  *     without restriction, including without limitation the rights to use,
37*44b781cfSAndrew Turner  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38*44b781cfSAndrew Turner  *     of the Software, and to permit persons to whom the Software is furnished
39*44b781cfSAndrew Turner  *     to do so, subject to the following conditions:
40*44b781cfSAndrew Turner  *
41*44b781cfSAndrew Turner  *     The above copyright notice and this permission notice shall be included
42*44b781cfSAndrew Turner  *     in all copies or substantial portions of the Software.
43*44b781cfSAndrew Turner  *
44*44b781cfSAndrew Turner  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45*44b781cfSAndrew Turner  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46*44b781cfSAndrew Turner  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47*44b781cfSAndrew Turner  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48*44b781cfSAndrew Turner  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49*44b781cfSAndrew Turner  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50*44b781cfSAndrew Turner  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51*44b781cfSAndrew Turner  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52*44b781cfSAndrew Turner  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53*44b781cfSAndrew Turner  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54*44b781cfSAndrew Turner  *     THE POSSIBILITY OF SUCH DAMAGE.
55*44b781cfSAndrew Turner  *
56*44b781cfSAndrew Turner  *
57*44b781cfSAndrew Turner  * License 2: Modified BSD
58*44b781cfSAndrew Turner  *
59*44b781cfSAndrew Turner  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60*44b781cfSAndrew Turner  * All rights reserved.
61*44b781cfSAndrew Turner  *
62*44b781cfSAndrew Turner  * Redistribution and use in source and binary forms, with or without
63*44b781cfSAndrew Turner  * modification, are permitted provided that the following conditions are met:
64*44b781cfSAndrew Turner  *     * Redistributions of source code must retain the above copyright
65*44b781cfSAndrew Turner  *       notice, this list of conditions and the following disclaimer.
66*44b781cfSAndrew Turner  *     * Redistributions in binary form must reproduce the above copyright
67*44b781cfSAndrew Turner  *       notice, this list of conditions and the following disclaimer in the
68*44b781cfSAndrew Turner  *       documentation and/or other materials provided with the distribution.
69*44b781cfSAndrew Turner  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70*44b781cfSAndrew Turner  *       names of its contributors may be used to endorse or promote products
71*44b781cfSAndrew Turner  *       derived from this software without specific prior written permission.
72*44b781cfSAndrew Turner  *
73*44b781cfSAndrew Turner  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74*44b781cfSAndrew Turner  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75*44b781cfSAndrew Turner  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76*44b781cfSAndrew Turner  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77*44b781cfSAndrew Turner  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78*44b781cfSAndrew Turner  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79*44b781cfSAndrew Turner  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80*44b781cfSAndrew Turner  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81*44b781cfSAndrew Turner  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82*44b781cfSAndrew Turner  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83*44b781cfSAndrew Turner  *
84*44b781cfSAndrew Turner  * This file incorporates work covered by the following copyright and
85*44b781cfSAndrew Turner  * permission notice:
86*44b781cfSAndrew Turner  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87*44b781cfSAndrew Turner  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88*44b781cfSAndrew Turner  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89*44b781cfSAndrew Turner  *     and you.
90*44b781cfSAndrew Turner  *
91*44b781cfSAndrew Turner  *     The Software IS NOT an item of Licensed Software or Licensed Product
92*44b781cfSAndrew Turner  *     under any End User Software License Agreement or Agreement for Licensed
93*44b781cfSAndrew Turner  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94*44b781cfSAndrew Turner  *     granted, free of charge, to any person obtaining a copy of this software
95*44b781cfSAndrew Turner  *     annotated with this license and the Software, to deal in the Software
96*44b781cfSAndrew Turner  *     without restriction, including without limitation the rights to use,
97*44b781cfSAndrew Turner  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98*44b781cfSAndrew Turner  *     of the Software, and to permit persons to whom the Software is furnished
99*44b781cfSAndrew Turner  *     to do so, subject to the following conditions:
100*44b781cfSAndrew Turner  *
101*44b781cfSAndrew Turner  *     The above copyright notice and this permission notice shall be included
102*44b781cfSAndrew Turner  *     in all copies or substantial portions of the Software.
103*44b781cfSAndrew Turner  *
104*44b781cfSAndrew Turner  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105*44b781cfSAndrew Turner  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106*44b781cfSAndrew Turner  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107*44b781cfSAndrew Turner  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108*44b781cfSAndrew Turner  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109*44b781cfSAndrew Turner  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110*44b781cfSAndrew Turner  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111*44b781cfSAndrew Turner  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112*44b781cfSAndrew Turner  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113*44b781cfSAndrew Turner  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114*44b781cfSAndrew Turner  *     THE POSSIBILITY OF SUCH DAMAGE.
115*44b781cfSAndrew Turner  */
116*44b781cfSAndrew Turner 
117*44b781cfSAndrew Turner #include <linux/platform_device.h>
118*44b781cfSAndrew Turner #include <linux/spinlock.h>
119*44b781cfSAndrew Turner #include <linux/tcp.h>
120*44b781cfSAndrew Turner #include <linux/if_vlan.h>
121*44b781cfSAndrew Turner #include <net/busy_poll.h>
122*44b781cfSAndrew Turner #include <linux/clk.h>
123*44b781cfSAndrew Turner #include <linux/if_ether.h>
124*44b781cfSAndrew Turner #include <linux/net_tstamp.h>
125*44b781cfSAndrew Turner #include <linux/phy.h>
126*44b781cfSAndrew Turner 
127*44b781cfSAndrew Turner #include "xgbe.h"
128*44b781cfSAndrew Turner #include "xgbe-common.h"
129*44b781cfSAndrew Turner 
130*44b781cfSAndrew Turner static int xgbe_one_poll(struct napi_struct *, int);
131*44b781cfSAndrew Turner static int xgbe_all_poll(struct napi_struct *, int);
132*44b781cfSAndrew Turner 
133*44b781cfSAndrew Turner static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
134*44b781cfSAndrew Turner {
135*44b781cfSAndrew Turner 	struct xgbe_channel *channel_mem, *channel;
136*44b781cfSAndrew Turner 	struct xgbe_ring *tx_ring, *rx_ring;
137*44b781cfSAndrew Turner 	unsigned int count, i;
138*44b781cfSAndrew Turner 	int ret = -ENOMEM;
139*44b781cfSAndrew Turner 
140*44b781cfSAndrew Turner 	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
141*44b781cfSAndrew Turner 
142*44b781cfSAndrew Turner 	channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
143*44b781cfSAndrew Turner 	if (!channel_mem)
144*44b781cfSAndrew Turner 		goto err_channel;
145*44b781cfSAndrew Turner 
146*44b781cfSAndrew Turner 	tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
147*44b781cfSAndrew Turner 			  GFP_KERNEL);
148*44b781cfSAndrew Turner 	if (!tx_ring)
149*44b781cfSAndrew Turner 		goto err_tx_ring;
150*44b781cfSAndrew Turner 
151*44b781cfSAndrew Turner 	rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
152*44b781cfSAndrew Turner 			  GFP_KERNEL);
153*44b781cfSAndrew Turner 	if (!rx_ring)
154*44b781cfSAndrew Turner 		goto err_rx_ring;
155*44b781cfSAndrew Turner 
156*44b781cfSAndrew Turner 	for (i = 0, channel = channel_mem; i < count; i++, channel++) {
157*44b781cfSAndrew Turner 		snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
158*44b781cfSAndrew Turner 		channel->pdata = pdata;
159*44b781cfSAndrew Turner 		channel->queue_index = i;
160*44b781cfSAndrew Turner 		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
161*44b781cfSAndrew Turner 				    (DMA_CH_INC * i);
162*44b781cfSAndrew Turner 
163*44b781cfSAndrew Turner 		if (pdata->per_channel_irq) {
164*44b781cfSAndrew Turner 			/* Get the DMA interrupt (offset 1) */
165*44b781cfSAndrew Turner 			ret = platform_get_irq(pdata->pdev, i + 1);
166*44b781cfSAndrew Turner 			if (ret < 0) {
167*44b781cfSAndrew Turner 				netdev_err(pdata->netdev,
168*44b781cfSAndrew Turner 					   "platform_get_irq %u failed\n",
169*44b781cfSAndrew Turner 					   i + 1);
170*44b781cfSAndrew Turner 				goto err_irq;
171*44b781cfSAndrew Turner 			}
172*44b781cfSAndrew Turner 
173*44b781cfSAndrew Turner 			channel->dma_irq = ret;
174*44b781cfSAndrew Turner 		}
175*44b781cfSAndrew Turner 
176*44b781cfSAndrew Turner 		if (i < pdata->tx_ring_count) {
177*44b781cfSAndrew Turner 			spin_lock_init(&tx_ring->lock);
178*44b781cfSAndrew Turner 			channel->tx_ring = tx_ring++;
179*44b781cfSAndrew Turner 		}
180*44b781cfSAndrew Turner 
181*44b781cfSAndrew Turner 		if (i < pdata->rx_ring_count) {
182*44b781cfSAndrew Turner 			spin_lock_init(&rx_ring->lock);
183*44b781cfSAndrew Turner 			channel->rx_ring = rx_ring++;
184*44b781cfSAndrew Turner 		}
185*44b781cfSAndrew Turner 
186*44b781cfSAndrew Turner 		netif_dbg(pdata, drv, pdata->netdev,
187*44b781cfSAndrew Turner 			  "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188*44b781cfSAndrew Turner 			  channel->name, channel->dma_regs, channel->dma_irq,
189*44b781cfSAndrew Turner 			  channel->tx_ring, channel->rx_ring);
190*44b781cfSAndrew Turner 	}
191*44b781cfSAndrew Turner 
192*44b781cfSAndrew Turner 	pdata->channel = channel_mem;
193*44b781cfSAndrew Turner 	pdata->channel_count = count;
194*44b781cfSAndrew Turner 
195*44b781cfSAndrew Turner 	return 0;
196*44b781cfSAndrew Turner 
197*44b781cfSAndrew Turner err_irq:
198*44b781cfSAndrew Turner 	kfree(rx_ring);
199*44b781cfSAndrew Turner 
200*44b781cfSAndrew Turner err_rx_ring:
201*44b781cfSAndrew Turner 	kfree(tx_ring);
202*44b781cfSAndrew Turner 
203*44b781cfSAndrew Turner err_tx_ring:
204*44b781cfSAndrew Turner 	kfree(channel_mem);
205*44b781cfSAndrew Turner 
206*44b781cfSAndrew Turner err_channel:
207*44b781cfSAndrew Turner 	return ret;
208*44b781cfSAndrew Turner }
209*44b781cfSAndrew Turner 
210*44b781cfSAndrew Turner static void xgbe_free_channels(struct xgbe_prv_data *pdata)
211*44b781cfSAndrew Turner {
212*44b781cfSAndrew Turner 	if (!pdata->channel)
213*44b781cfSAndrew Turner 		return;
214*44b781cfSAndrew Turner 
215*44b781cfSAndrew Turner 	kfree(pdata->channel->rx_ring);
216*44b781cfSAndrew Turner 	kfree(pdata->channel->tx_ring);
217*44b781cfSAndrew Turner 	kfree(pdata->channel);
218*44b781cfSAndrew Turner 
219*44b781cfSAndrew Turner 	pdata->channel = NULL;
220*44b781cfSAndrew Turner 	pdata->channel_count = 0;
221*44b781cfSAndrew Turner }
222*44b781cfSAndrew Turner 
223*44b781cfSAndrew Turner static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
224*44b781cfSAndrew Turner {
225*44b781cfSAndrew Turner 	return (ring->rdesc_count - (ring->cur - ring->dirty));
226*44b781cfSAndrew Turner }
227*44b781cfSAndrew Turner 
228*44b781cfSAndrew Turner static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
229*44b781cfSAndrew Turner {
230*44b781cfSAndrew Turner 	return (ring->cur - ring->dirty);
231*44b781cfSAndrew Turner }
232*44b781cfSAndrew Turner 
233*44b781cfSAndrew Turner static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
234*44b781cfSAndrew Turner 				    struct xgbe_ring *ring, unsigned int count)
235*44b781cfSAndrew Turner {
236*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = channel->pdata;
237*44b781cfSAndrew Turner 
238*44b781cfSAndrew Turner 	if (count > xgbe_tx_avail_desc(ring)) {
239*44b781cfSAndrew Turner 		netif_info(pdata, drv, pdata->netdev,
240*44b781cfSAndrew Turner 			   "Tx queue stopped, not enough descriptors available\n");
241*44b781cfSAndrew Turner 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
242*44b781cfSAndrew Turner 		ring->tx.queue_stopped = 1;
243*44b781cfSAndrew Turner 
244*44b781cfSAndrew Turner 		/* If we haven't notified the hardware because of xmit_more
245*44b781cfSAndrew Turner 		 * support, tell it now
246*44b781cfSAndrew Turner 		 */
247*44b781cfSAndrew Turner 		if (ring->tx.xmit_more)
248*44b781cfSAndrew Turner 			pdata->hw_if.tx_start_xmit(channel, ring);
249*44b781cfSAndrew Turner 
250*44b781cfSAndrew Turner 		return NETDEV_TX_BUSY;
251*44b781cfSAndrew Turner 	}
252*44b781cfSAndrew Turner 
253*44b781cfSAndrew Turner 	return 0;
254*44b781cfSAndrew Turner }
255*44b781cfSAndrew Turner 
256*44b781cfSAndrew Turner static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
257*44b781cfSAndrew Turner {
258*44b781cfSAndrew Turner 	unsigned int rx_buf_size;
259*44b781cfSAndrew Turner 
260*44b781cfSAndrew Turner 	if (mtu > XGMAC_JUMBO_PACKET_MTU) {
261*44b781cfSAndrew Turner 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
262*44b781cfSAndrew Turner 		return -EINVAL;
263*44b781cfSAndrew Turner 	}
264*44b781cfSAndrew Turner 
265*44b781cfSAndrew Turner 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
266*44b781cfSAndrew Turner 	rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
267*44b781cfSAndrew Turner 
268*44b781cfSAndrew Turner 	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
269*44b781cfSAndrew Turner 		      ~(XGBE_RX_BUF_ALIGN - 1);
270*44b781cfSAndrew Turner 
271*44b781cfSAndrew Turner 	return rx_buf_size;
272*44b781cfSAndrew Turner }
273*44b781cfSAndrew Turner 
274*44b781cfSAndrew Turner static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
275*44b781cfSAndrew Turner {
276*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
277*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
278*44b781cfSAndrew Turner 	enum xgbe_int int_id;
279*44b781cfSAndrew Turner 	unsigned int i;
280*44b781cfSAndrew Turner 
281*44b781cfSAndrew Turner 	channel = pdata->channel;
282*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
283*44b781cfSAndrew Turner 		if (channel->tx_ring && channel->rx_ring)
284*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
285*44b781cfSAndrew Turner 		else if (channel->tx_ring)
286*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_TI;
287*44b781cfSAndrew Turner 		else if (channel->rx_ring)
288*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_RI;
289*44b781cfSAndrew Turner 		else
290*44b781cfSAndrew Turner 			continue;
291*44b781cfSAndrew Turner 
292*44b781cfSAndrew Turner 		hw_if->enable_int(channel, int_id);
293*44b781cfSAndrew Turner 	}
294*44b781cfSAndrew Turner }
295*44b781cfSAndrew Turner 
296*44b781cfSAndrew Turner static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
297*44b781cfSAndrew Turner {
298*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
299*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
300*44b781cfSAndrew Turner 	enum xgbe_int int_id;
301*44b781cfSAndrew Turner 	unsigned int i;
302*44b781cfSAndrew Turner 
303*44b781cfSAndrew Turner 	channel = pdata->channel;
304*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
305*44b781cfSAndrew Turner 		if (channel->tx_ring && channel->rx_ring)
306*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
307*44b781cfSAndrew Turner 		else if (channel->tx_ring)
308*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_TI;
309*44b781cfSAndrew Turner 		else if (channel->rx_ring)
310*44b781cfSAndrew Turner 			int_id = XGMAC_INT_DMA_CH_SR_RI;
311*44b781cfSAndrew Turner 		else
312*44b781cfSAndrew Turner 			continue;
313*44b781cfSAndrew Turner 
314*44b781cfSAndrew Turner 		hw_if->disable_int(channel, int_id);
315*44b781cfSAndrew Turner 	}
316*44b781cfSAndrew Turner }
317*44b781cfSAndrew Turner 
318*44b781cfSAndrew Turner static irqreturn_t xgbe_isr(int irq, void *data)
319*44b781cfSAndrew Turner {
320*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = data;
321*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
322*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
323*44b781cfSAndrew Turner 	unsigned int dma_isr, dma_ch_isr;
324*44b781cfSAndrew Turner 	unsigned int mac_isr, mac_tssr;
325*44b781cfSAndrew Turner 	unsigned int i;
326*44b781cfSAndrew Turner 
327*44b781cfSAndrew Turner 	/* The DMA interrupt status register also reports MAC and MTL
328*44b781cfSAndrew Turner 	 * interrupts. So for polling mode, we just need to check for
329*44b781cfSAndrew Turner 	 * this register to be non-zero
330*44b781cfSAndrew Turner 	 */
331*44b781cfSAndrew Turner 	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
332*44b781cfSAndrew Turner 	if (!dma_isr)
333*44b781cfSAndrew Turner 		goto isr_done;
334*44b781cfSAndrew Turner 
335*44b781cfSAndrew Turner 	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
336*44b781cfSAndrew Turner 
337*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++) {
338*44b781cfSAndrew Turner 		if (!(dma_isr & (1 << i)))
339*44b781cfSAndrew Turner 			continue;
340*44b781cfSAndrew Turner 
341*44b781cfSAndrew Turner 		channel = pdata->channel + i;
342*44b781cfSAndrew Turner 
343*44b781cfSAndrew Turner 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
344*44b781cfSAndrew Turner 		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
345*44b781cfSAndrew Turner 			  i, dma_ch_isr);
346*44b781cfSAndrew Turner 
347*44b781cfSAndrew Turner 		/* The TI or RI interrupt bits may still be set even if using
348*44b781cfSAndrew Turner 		 * per channel DMA interrupts. Check to be sure those are not
349*44b781cfSAndrew Turner 		 * enabled before using the private data napi structure.
350*44b781cfSAndrew Turner 		 */
351*44b781cfSAndrew Turner 		if (!pdata->per_channel_irq &&
352*44b781cfSAndrew Turner 		    (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
353*44b781cfSAndrew Turner 		     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
354*44b781cfSAndrew Turner 			if (napi_schedule_prep(&pdata->napi)) {
355*44b781cfSAndrew Turner 				/* Disable Tx and Rx interrupts */
356*44b781cfSAndrew Turner 				xgbe_disable_rx_tx_ints(pdata);
357*44b781cfSAndrew Turner 
358*44b781cfSAndrew Turner 				/* Turn on polling */
359*44b781cfSAndrew Turner 				__napi_schedule_irqoff(&pdata->napi);
360*44b781cfSAndrew Turner 			}
361*44b781cfSAndrew Turner 		}
362*44b781cfSAndrew Turner 
363*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
364*44b781cfSAndrew Turner 			pdata->ext_stats.rx_buffer_unavailable++;
365*44b781cfSAndrew Turner 
366*44b781cfSAndrew Turner 		/* Restart the device on a Fatal Bus Error */
367*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
368*44b781cfSAndrew Turner 			schedule_work(&pdata->restart_work);
369*44b781cfSAndrew Turner 
370*44b781cfSAndrew Turner 		/* Clear all interrupt signals */
371*44b781cfSAndrew Turner 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
372*44b781cfSAndrew Turner 	}
373*44b781cfSAndrew Turner 
374*44b781cfSAndrew Turner 	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
375*44b781cfSAndrew Turner 		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
376*44b781cfSAndrew Turner 
377*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
378*44b781cfSAndrew Turner 			hw_if->tx_mmc_int(pdata);
379*44b781cfSAndrew Turner 
380*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
381*44b781cfSAndrew Turner 			hw_if->rx_mmc_int(pdata);
382*44b781cfSAndrew Turner 
383*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
384*44b781cfSAndrew Turner 			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
385*44b781cfSAndrew Turner 
386*44b781cfSAndrew Turner 			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
387*44b781cfSAndrew Turner 				/* Read Tx Timestamp to clear interrupt */
388*44b781cfSAndrew Turner 				pdata->tx_tstamp =
389*44b781cfSAndrew Turner 					hw_if->get_tx_tstamp(pdata);
390*44b781cfSAndrew Turner 				queue_work(pdata->dev_workqueue,
391*44b781cfSAndrew Turner 					   &pdata->tx_tstamp_work);
392*44b781cfSAndrew Turner 			}
393*44b781cfSAndrew Turner 		}
394*44b781cfSAndrew Turner 	}
395*44b781cfSAndrew Turner 
396*44b781cfSAndrew Turner isr_done:
397*44b781cfSAndrew Turner 	return IRQ_HANDLED;
398*44b781cfSAndrew Turner }
399*44b781cfSAndrew Turner 
400*44b781cfSAndrew Turner static irqreturn_t xgbe_dma_isr(int irq, void *data)
401*44b781cfSAndrew Turner {
402*44b781cfSAndrew Turner 	struct xgbe_channel *channel = data;
403*44b781cfSAndrew Turner 
404*44b781cfSAndrew Turner 	/* Per channel DMA interrupts are enabled, so we use the per
405*44b781cfSAndrew Turner 	 * channel napi structure and not the private data napi structure
406*44b781cfSAndrew Turner 	 */
407*44b781cfSAndrew Turner 	if (napi_schedule_prep(&channel->napi)) {
408*44b781cfSAndrew Turner 		/* Disable Tx and Rx interrupts */
409*44b781cfSAndrew Turner 		disable_irq_nosync(channel->dma_irq);
410*44b781cfSAndrew Turner 
411*44b781cfSAndrew Turner 		/* Turn on polling */
412*44b781cfSAndrew Turner 		__napi_schedule_irqoff(&channel->napi);
413*44b781cfSAndrew Turner 	}
414*44b781cfSAndrew Turner 
415*44b781cfSAndrew Turner 	return IRQ_HANDLED;
416*44b781cfSAndrew Turner }
417*44b781cfSAndrew Turner 
418*44b781cfSAndrew Turner static void xgbe_tx_timer(unsigned long data)
419*44b781cfSAndrew Turner {
420*44b781cfSAndrew Turner 	struct xgbe_channel *channel = (struct xgbe_channel *)data;
421*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = channel->pdata;
422*44b781cfSAndrew Turner 	struct napi_struct *napi;
423*44b781cfSAndrew Turner 
424*44b781cfSAndrew Turner 	DBGPR("-->xgbe_tx_timer\n");
425*44b781cfSAndrew Turner 
426*44b781cfSAndrew Turner 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
427*44b781cfSAndrew Turner 
428*44b781cfSAndrew Turner 	if (napi_schedule_prep(napi)) {
429*44b781cfSAndrew Turner 		/* Disable Tx and Rx interrupts */
430*44b781cfSAndrew Turner 		if (pdata->per_channel_irq)
431*44b781cfSAndrew Turner 			disable_irq_nosync(channel->dma_irq);
432*44b781cfSAndrew Turner 		else
433*44b781cfSAndrew Turner 			xgbe_disable_rx_tx_ints(pdata);
434*44b781cfSAndrew Turner 
435*44b781cfSAndrew Turner 		/* Turn on polling */
436*44b781cfSAndrew Turner 		__napi_schedule(napi);
437*44b781cfSAndrew Turner 	}
438*44b781cfSAndrew Turner 
439*44b781cfSAndrew Turner 	channel->tx_timer_active = 0;
440*44b781cfSAndrew Turner 
441*44b781cfSAndrew Turner 	DBGPR("<--xgbe_tx_timer\n");
442*44b781cfSAndrew Turner }
443*44b781cfSAndrew Turner 
444*44b781cfSAndrew Turner static void xgbe_service(struct work_struct *work)
445*44b781cfSAndrew Turner {
446*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = container_of(work,
447*44b781cfSAndrew Turner 						   struct xgbe_prv_data,
448*44b781cfSAndrew Turner 						   service_work);
449*44b781cfSAndrew Turner 
450*44b781cfSAndrew Turner 	pdata->phy_if.phy_status(pdata);
451*44b781cfSAndrew Turner }
452*44b781cfSAndrew Turner 
453*44b781cfSAndrew Turner static void xgbe_service_timer(unsigned long data)
454*44b781cfSAndrew Turner {
455*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
456*44b781cfSAndrew Turner 
457*44b781cfSAndrew Turner 	queue_work(pdata->dev_workqueue, &pdata->service_work);
458*44b781cfSAndrew Turner 
459*44b781cfSAndrew Turner 	mod_timer(&pdata->service_timer, jiffies + HZ);
460*44b781cfSAndrew Turner }
461*44b781cfSAndrew Turner 
462*44b781cfSAndrew Turner static void xgbe_init_timers(struct xgbe_prv_data *pdata)
463*44b781cfSAndrew Turner {
464*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
465*44b781cfSAndrew Turner 	unsigned int i;
466*44b781cfSAndrew Turner 
467*44b781cfSAndrew Turner 	setup_timer(&pdata->service_timer, xgbe_service_timer,
468*44b781cfSAndrew Turner 		    (unsigned long)pdata);
469*44b781cfSAndrew Turner 
470*44b781cfSAndrew Turner 	channel = pdata->channel;
471*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
472*44b781cfSAndrew Turner 		if (!channel->tx_ring)
473*44b781cfSAndrew Turner 			break;
474*44b781cfSAndrew Turner 
475*44b781cfSAndrew Turner 		setup_timer(&channel->tx_timer, xgbe_tx_timer,
476*44b781cfSAndrew Turner 			    (unsigned long)channel);
477*44b781cfSAndrew Turner 	}
478*44b781cfSAndrew Turner }
479*44b781cfSAndrew Turner 
480*44b781cfSAndrew Turner static void xgbe_start_timers(struct xgbe_prv_data *pdata)
481*44b781cfSAndrew Turner {
482*44b781cfSAndrew Turner 	mod_timer(&pdata->service_timer, jiffies + HZ);
483*44b781cfSAndrew Turner }
484*44b781cfSAndrew Turner 
485*44b781cfSAndrew Turner static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
486*44b781cfSAndrew Turner {
487*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
488*44b781cfSAndrew Turner 	unsigned int i;
489*44b781cfSAndrew Turner 
490*44b781cfSAndrew Turner 	del_timer_sync(&pdata->service_timer);
491*44b781cfSAndrew Turner 
492*44b781cfSAndrew Turner 	channel = pdata->channel;
493*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
494*44b781cfSAndrew Turner 		if (!channel->tx_ring)
495*44b781cfSAndrew Turner 			break;
496*44b781cfSAndrew Turner 
497*44b781cfSAndrew Turner 		del_timer_sync(&channel->tx_timer);
498*44b781cfSAndrew Turner 	}
499*44b781cfSAndrew Turner }
500*44b781cfSAndrew Turner 
501*44b781cfSAndrew Turner void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
502*44b781cfSAndrew Turner {
503*44b781cfSAndrew Turner 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
504*44b781cfSAndrew Turner 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
505*44b781cfSAndrew Turner 
506*44b781cfSAndrew Turner 	DBGPR("-->xgbe_get_all_hw_features\n");
507*44b781cfSAndrew Turner 
508*44b781cfSAndrew Turner 	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
509*44b781cfSAndrew Turner 	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
510*44b781cfSAndrew Turner 	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
511*44b781cfSAndrew Turner 
512*44b781cfSAndrew Turner 	memset(hw_feat, 0, sizeof(*hw_feat));
513*44b781cfSAndrew Turner 
514*44b781cfSAndrew Turner 	hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
515*44b781cfSAndrew Turner 
516*44b781cfSAndrew Turner 	/* Hardware feature register 0 */
517*44b781cfSAndrew Turner 	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
518*44b781cfSAndrew Turner 	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
519*44b781cfSAndrew Turner 	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
520*44b781cfSAndrew Turner 	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
521*44b781cfSAndrew Turner 	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
522*44b781cfSAndrew Turner 	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
523*44b781cfSAndrew Turner 	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
524*44b781cfSAndrew Turner 	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
525*44b781cfSAndrew Turner 	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
526*44b781cfSAndrew Turner 	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
527*44b781cfSAndrew Turner 	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
528*44b781cfSAndrew Turner 	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
529*44b781cfSAndrew Turner 					      ADDMACADRSEL);
530*44b781cfSAndrew Turner 	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
531*44b781cfSAndrew Turner 	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
532*44b781cfSAndrew Turner 
533*44b781cfSAndrew Turner 	/* Hardware feature register 1 */
534*44b781cfSAndrew Turner 	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
535*44b781cfSAndrew Turner 						RXFIFOSIZE);
536*44b781cfSAndrew Turner 	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
537*44b781cfSAndrew Turner 						TXFIFOSIZE);
538*44b781cfSAndrew Turner 	hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
539*44b781cfSAndrew Turner 	hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
540*44b781cfSAndrew Turner 	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
541*44b781cfSAndrew Turner 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
542*44b781cfSAndrew Turner 	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
543*44b781cfSAndrew Turner 	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
544*44b781cfSAndrew Turner 	hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
545*44b781cfSAndrew Turner 	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
546*44b781cfSAndrew Turner 	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
547*44b781cfSAndrew Turner 						  HASHTBLSZ);
548*44b781cfSAndrew Turner 	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
549*44b781cfSAndrew Turner 						  L3L4FNUM);
550*44b781cfSAndrew Turner 
551*44b781cfSAndrew Turner 	/* Hardware feature register 2 */
552*44b781cfSAndrew Turner 	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
553*44b781cfSAndrew Turner 	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
554*44b781cfSAndrew Turner 	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
555*44b781cfSAndrew Turner 	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
556*44b781cfSAndrew Turner 	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
557*44b781cfSAndrew Turner 	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
558*44b781cfSAndrew Turner 
559*44b781cfSAndrew Turner 	/* Translate the Hash Table size into actual number */
560*44b781cfSAndrew Turner 	switch (hw_feat->hash_table_size) {
561*44b781cfSAndrew Turner 	case 0:
562*44b781cfSAndrew Turner 		break;
563*44b781cfSAndrew Turner 	case 1:
564*44b781cfSAndrew Turner 		hw_feat->hash_table_size = 64;
565*44b781cfSAndrew Turner 		break;
566*44b781cfSAndrew Turner 	case 2:
567*44b781cfSAndrew Turner 		hw_feat->hash_table_size = 128;
568*44b781cfSAndrew Turner 		break;
569*44b781cfSAndrew Turner 	case 3:
570*44b781cfSAndrew Turner 		hw_feat->hash_table_size = 256;
571*44b781cfSAndrew Turner 		break;
572*44b781cfSAndrew Turner 	}
573*44b781cfSAndrew Turner 
574*44b781cfSAndrew Turner 	/* Translate the address width setting into actual number */
575*44b781cfSAndrew Turner 	switch (hw_feat->dma_width) {
576*44b781cfSAndrew Turner 	case 0:
577*44b781cfSAndrew Turner 		hw_feat->dma_width = 32;
578*44b781cfSAndrew Turner 		break;
579*44b781cfSAndrew Turner 	case 1:
580*44b781cfSAndrew Turner 		hw_feat->dma_width = 40;
581*44b781cfSAndrew Turner 		break;
582*44b781cfSAndrew Turner 	case 2:
583*44b781cfSAndrew Turner 		hw_feat->dma_width = 48;
584*44b781cfSAndrew Turner 		break;
585*44b781cfSAndrew Turner 	default:
586*44b781cfSAndrew Turner 		hw_feat->dma_width = 32;
587*44b781cfSAndrew Turner 	}
588*44b781cfSAndrew Turner 
589*44b781cfSAndrew Turner 	/* The Queue, Channel and TC counts are zero based so increment them
590*44b781cfSAndrew Turner 	 * to get the actual number
591*44b781cfSAndrew Turner 	 */
592*44b781cfSAndrew Turner 	hw_feat->rx_q_cnt++;
593*44b781cfSAndrew Turner 	hw_feat->tx_q_cnt++;
594*44b781cfSAndrew Turner 	hw_feat->rx_ch_cnt++;
595*44b781cfSAndrew Turner 	hw_feat->tx_ch_cnt++;
596*44b781cfSAndrew Turner 	hw_feat->tc_cnt++;
597*44b781cfSAndrew Turner 
598*44b781cfSAndrew Turner 	DBGPR("<--xgbe_get_all_hw_features\n");
599*44b781cfSAndrew Turner }
600*44b781cfSAndrew Turner 
601*44b781cfSAndrew Turner static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
602*44b781cfSAndrew Turner {
603*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
604*44b781cfSAndrew Turner 	unsigned int i;
605*44b781cfSAndrew Turner 
606*44b781cfSAndrew Turner 	if (pdata->per_channel_irq) {
607*44b781cfSAndrew Turner 		channel = pdata->channel;
608*44b781cfSAndrew Turner 		for (i = 0; i < pdata->channel_count; i++, channel++) {
609*44b781cfSAndrew Turner 			if (add)
610*44b781cfSAndrew Turner 				netif_napi_add(pdata->netdev, &channel->napi,
611*44b781cfSAndrew Turner 					       xgbe_one_poll, NAPI_POLL_WEIGHT);
612*44b781cfSAndrew Turner 
613*44b781cfSAndrew Turner 			napi_enable(&channel->napi);
614*44b781cfSAndrew Turner 		}
615*44b781cfSAndrew Turner 	} else {
616*44b781cfSAndrew Turner 		if (add)
617*44b781cfSAndrew Turner 			netif_napi_add(pdata->netdev, &pdata->napi,
618*44b781cfSAndrew Turner 				       xgbe_all_poll, NAPI_POLL_WEIGHT);
619*44b781cfSAndrew Turner 
620*44b781cfSAndrew Turner 		napi_enable(&pdata->napi);
621*44b781cfSAndrew Turner 	}
622*44b781cfSAndrew Turner }
623*44b781cfSAndrew Turner 
624*44b781cfSAndrew Turner static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
625*44b781cfSAndrew Turner {
626*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
627*44b781cfSAndrew Turner 	unsigned int i;
628*44b781cfSAndrew Turner 
629*44b781cfSAndrew Turner 	if (pdata->per_channel_irq) {
630*44b781cfSAndrew Turner 		channel = pdata->channel;
631*44b781cfSAndrew Turner 		for (i = 0; i < pdata->channel_count; i++, channel++) {
632*44b781cfSAndrew Turner 			napi_disable(&channel->napi);
633*44b781cfSAndrew Turner 
634*44b781cfSAndrew Turner 			if (del)
635*44b781cfSAndrew Turner 				netif_napi_del(&channel->napi);
636*44b781cfSAndrew Turner 		}
637*44b781cfSAndrew Turner 	} else {
638*44b781cfSAndrew Turner 		napi_disable(&pdata->napi);
639*44b781cfSAndrew Turner 
640*44b781cfSAndrew Turner 		if (del)
641*44b781cfSAndrew Turner 			netif_napi_del(&pdata->napi);
642*44b781cfSAndrew Turner 	}
643*44b781cfSAndrew Turner }
644*44b781cfSAndrew Turner 
645*44b781cfSAndrew Turner static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
646*44b781cfSAndrew Turner {
647*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
648*44b781cfSAndrew Turner 	struct net_device *netdev = pdata->netdev;
649*44b781cfSAndrew Turner 	unsigned int i;
650*44b781cfSAndrew Turner 	int ret;
651*44b781cfSAndrew Turner 
652*44b781cfSAndrew Turner 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
653*44b781cfSAndrew Turner 			       netdev->name, pdata);
654*44b781cfSAndrew Turner 	if (ret) {
655*44b781cfSAndrew Turner 		netdev_alert(netdev, "error requesting irq %d\n",
656*44b781cfSAndrew Turner 			     pdata->dev_irq);
657*44b781cfSAndrew Turner 		return ret;
658*44b781cfSAndrew Turner 	}
659*44b781cfSAndrew Turner 
660*44b781cfSAndrew Turner 	if (!pdata->per_channel_irq)
661*44b781cfSAndrew Turner 		return 0;
662*44b781cfSAndrew Turner 
663*44b781cfSAndrew Turner 	channel = pdata->channel;
664*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
665*44b781cfSAndrew Turner 		snprintf(channel->dma_irq_name,
666*44b781cfSAndrew Turner 			 sizeof(channel->dma_irq_name) - 1,
667*44b781cfSAndrew Turner 			 "%s-TxRx-%u", netdev_name(netdev),
668*44b781cfSAndrew Turner 			 channel->queue_index);
669*44b781cfSAndrew Turner 
670*44b781cfSAndrew Turner 		ret = devm_request_irq(pdata->dev, channel->dma_irq,
671*44b781cfSAndrew Turner 				       xgbe_dma_isr, 0,
672*44b781cfSAndrew Turner 				       channel->dma_irq_name, channel);
673*44b781cfSAndrew Turner 		if (ret) {
674*44b781cfSAndrew Turner 			netdev_alert(netdev, "error requesting irq %d\n",
675*44b781cfSAndrew Turner 				     channel->dma_irq);
676*44b781cfSAndrew Turner 			goto err_irq;
677*44b781cfSAndrew Turner 		}
678*44b781cfSAndrew Turner 	}
679*44b781cfSAndrew Turner 
680*44b781cfSAndrew Turner 	return 0;
681*44b781cfSAndrew Turner 
682*44b781cfSAndrew Turner err_irq:
683*44b781cfSAndrew Turner 	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
684*44b781cfSAndrew Turner 	for (i--, channel--; i < pdata->channel_count; i--, channel--)
685*44b781cfSAndrew Turner 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
686*44b781cfSAndrew Turner 
687*44b781cfSAndrew Turner 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
688*44b781cfSAndrew Turner 
689*44b781cfSAndrew Turner 	return ret;
690*44b781cfSAndrew Turner }
691*44b781cfSAndrew Turner 
692*44b781cfSAndrew Turner static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
693*44b781cfSAndrew Turner {
694*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
695*44b781cfSAndrew Turner 	unsigned int i;
696*44b781cfSAndrew Turner 
697*44b781cfSAndrew Turner 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
698*44b781cfSAndrew Turner 
699*44b781cfSAndrew Turner 	if (!pdata->per_channel_irq)
700*44b781cfSAndrew Turner 		return;
701*44b781cfSAndrew Turner 
702*44b781cfSAndrew Turner 	channel = pdata->channel;
703*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++)
704*44b781cfSAndrew Turner 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
705*44b781cfSAndrew Turner }
706*44b781cfSAndrew Turner 
707*44b781cfSAndrew Turner void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
708*44b781cfSAndrew Turner {
709*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
710*44b781cfSAndrew Turner 
711*44b781cfSAndrew Turner 	DBGPR("-->xgbe_init_tx_coalesce\n");
712*44b781cfSAndrew Turner 
713*44b781cfSAndrew Turner 	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
714*44b781cfSAndrew Turner 	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
715*44b781cfSAndrew Turner 
716*44b781cfSAndrew Turner 	hw_if->config_tx_coalesce(pdata);
717*44b781cfSAndrew Turner 
718*44b781cfSAndrew Turner 	DBGPR("<--xgbe_init_tx_coalesce\n");
719*44b781cfSAndrew Turner }
720*44b781cfSAndrew Turner 
721*44b781cfSAndrew Turner void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
722*44b781cfSAndrew Turner {
723*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
724*44b781cfSAndrew Turner 
725*44b781cfSAndrew Turner 	DBGPR("-->xgbe_init_rx_coalesce\n");
726*44b781cfSAndrew Turner 
727*44b781cfSAndrew Turner 	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
728*44b781cfSAndrew Turner 	pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
729*44b781cfSAndrew Turner 	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
730*44b781cfSAndrew Turner 
731*44b781cfSAndrew Turner 	hw_if->config_rx_coalesce(pdata);
732*44b781cfSAndrew Turner 
733*44b781cfSAndrew Turner 	DBGPR("<--xgbe_init_rx_coalesce\n");
734*44b781cfSAndrew Turner }
735*44b781cfSAndrew Turner 
736*44b781cfSAndrew Turner static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
737*44b781cfSAndrew Turner {
738*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
739*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
740*44b781cfSAndrew Turner 	struct xgbe_ring *ring;
741*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
742*44b781cfSAndrew Turner 	unsigned int i, j;
743*44b781cfSAndrew Turner 
744*44b781cfSAndrew Turner 	DBGPR("-->xgbe_free_tx_data\n");
745*44b781cfSAndrew Turner 
746*44b781cfSAndrew Turner 	channel = pdata->channel;
747*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
748*44b781cfSAndrew Turner 		ring = channel->tx_ring;
749*44b781cfSAndrew Turner 		if (!ring)
750*44b781cfSAndrew Turner 			break;
751*44b781cfSAndrew Turner 
752*44b781cfSAndrew Turner 		for (j = 0; j < ring->rdesc_count; j++) {
753*44b781cfSAndrew Turner 			rdata = XGBE_GET_DESC_DATA(ring, j);
754*44b781cfSAndrew Turner 			desc_if->unmap_rdata(pdata, rdata);
755*44b781cfSAndrew Turner 		}
756*44b781cfSAndrew Turner 	}
757*44b781cfSAndrew Turner 
758*44b781cfSAndrew Turner 	DBGPR("<--xgbe_free_tx_data\n");
759*44b781cfSAndrew Turner }
760*44b781cfSAndrew Turner 
761*44b781cfSAndrew Turner static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
762*44b781cfSAndrew Turner {
763*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
764*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
765*44b781cfSAndrew Turner 	struct xgbe_ring *ring;
766*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
767*44b781cfSAndrew Turner 	unsigned int i, j;
768*44b781cfSAndrew Turner 
769*44b781cfSAndrew Turner 	DBGPR("-->xgbe_free_rx_data\n");
770*44b781cfSAndrew Turner 
771*44b781cfSAndrew Turner 	channel = pdata->channel;
772*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
773*44b781cfSAndrew Turner 		ring = channel->rx_ring;
774*44b781cfSAndrew Turner 		if (!ring)
775*44b781cfSAndrew Turner 			break;
776*44b781cfSAndrew Turner 
777*44b781cfSAndrew Turner 		for (j = 0; j < ring->rdesc_count; j++) {
778*44b781cfSAndrew Turner 			rdata = XGBE_GET_DESC_DATA(ring, j);
779*44b781cfSAndrew Turner 			desc_if->unmap_rdata(pdata, rdata);
780*44b781cfSAndrew Turner 		}
781*44b781cfSAndrew Turner 	}
782*44b781cfSAndrew Turner 
783*44b781cfSAndrew Turner 	DBGPR("<--xgbe_free_rx_data\n");
784*44b781cfSAndrew Turner }
785*44b781cfSAndrew Turner 
786*44b781cfSAndrew Turner static int xgbe_phy_init(struct xgbe_prv_data *pdata)
787*44b781cfSAndrew Turner {
788*44b781cfSAndrew Turner 	pdata->phy_link = -1;
789*44b781cfSAndrew Turner 	pdata->phy_speed = SPEED_UNKNOWN;
790*44b781cfSAndrew Turner 
791*44b781cfSAndrew Turner 	return pdata->phy_if.phy_reset(pdata);
792*44b781cfSAndrew Turner }
793*44b781cfSAndrew Turner 
794*44b781cfSAndrew Turner int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
795*44b781cfSAndrew Turner {
796*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
797*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
798*44b781cfSAndrew Turner 	unsigned long flags;
799*44b781cfSAndrew Turner 
800*44b781cfSAndrew Turner 	DBGPR("-->xgbe_powerdown\n");
801*44b781cfSAndrew Turner 
802*44b781cfSAndrew Turner 	if (!netif_running(netdev) ||
803*44b781cfSAndrew Turner 	    (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
804*44b781cfSAndrew Turner 		netdev_alert(netdev, "Device is already powered down\n");
805*44b781cfSAndrew Turner 		DBGPR("<--xgbe_powerdown\n");
806*44b781cfSAndrew Turner 		return -EINVAL;
807*44b781cfSAndrew Turner 	}
808*44b781cfSAndrew Turner 
809*44b781cfSAndrew Turner 	spin_lock_irqsave(&pdata->lock, flags);
810*44b781cfSAndrew Turner 
811*44b781cfSAndrew Turner 	if (caller == XGMAC_DRIVER_CONTEXT)
812*44b781cfSAndrew Turner 		netif_device_detach(netdev);
813*44b781cfSAndrew Turner 
814*44b781cfSAndrew Turner 	netif_tx_stop_all_queues(netdev);
815*44b781cfSAndrew Turner 
816*44b781cfSAndrew Turner 	xgbe_stop_timers(pdata);
817*44b781cfSAndrew Turner 	flush_workqueue(pdata->dev_workqueue);
818*44b781cfSAndrew Turner 
819*44b781cfSAndrew Turner 	hw_if->powerdown_tx(pdata);
820*44b781cfSAndrew Turner 	hw_if->powerdown_rx(pdata);
821*44b781cfSAndrew Turner 
822*44b781cfSAndrew Turner 	xgbe_napi_disable(pdata, 0);
823*44b781cfSAndrew Turner 
824*44b781cfSAndrew Turner 	pdata->power_down = 1;
825*44b781cfSAndrew Turner 
826*44b781cfSAndrew Turner 	spin_unlock_irqrestore(&pdata->lock, flags);
827*44b781cfSAndrew Turner 
828*44b781cfSAndrew Turner 	DBGPR("<--xgbe_powerdown\n");
829*44b781cfSAndrew Turner 
830*44b781cfSAndrew Turner 	return 0;
831*44b781cfSAndrew Turner }
832*44b781cfSAndrew Turner 
833*44b781cfSAndrew Turner int xgbe_powerup(struct net_device *netdev, unsigned int caller)
834*44b781cfSAndrew Turner {
835*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
836*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
837*44b781cfSAndrew Turner 	unsigned long flags;
838*44b781cfSAndrew Turner 
839*44b781cfSAndrew Turner 	DBGPR("-->xgbe_powerup\n");
840*44b781cfSAndrew Turner 
841*44b781cfSAndrew Turner 	if (!netif_running(netdev) ||
842*44b781cfSAndrew Turner 	    (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
843*44b781cfSAndrew Turner 		netdev_alert(netdev, "Device is already powered up\n");
844*44b781cfSAndrew Turner 		DBGPR("<--xgbe_powerup\n");
845*44b781cfSAndrew Turner 		return -EINVAL;
846*44b781cfSAndrew Turner 	}
847*44b781cfSAndrew Turner 
848*44b781cfSAndrew Turner 	spin_lock_irqsave(&pdata->lock, flags);
849*44b781cfSAndrew Turner 
850*44b781cfSAndrew Turner 	pdata->power_down = 0;
851*44b781cfSAndrew Turner 
852*44b781cfSAndrew Turner 	xgbe_napi_enable(pdata, 0);
853*44b781cfSAndrew Turner 
854*44b781cfSAndrew Turner 	hw_if->powerup_tx(pdata);
855*44b781cfSAndrew Turner 	hw_if->powerup_rx(pdata);
856*44b781cfSAndrew Turner 
857*44b781cfSAndrew Turner 	if (caller == XGMAC_DRIVER_CONTEXT)
858*44b781cfSAndrew Turner 		netif_device_attach(netdev);
859*44b781cfSAndrew Turner 
860*44b781cfSAndrew Turner 	netif_tx_start_all_queues(netdev);
861*44b781cfSAndrew Turner 
862*44b781cfSAndrew Turner 	xgbe_start_timers(pdata);
863*44b781cfSAndrew Turner 
864*44b781cfSAndrew Turner 	spin_unlock_irqrestore(&pdata->lock, flags);
865*44b781cfSAndrew Turner 
866*44b781cfSAndrew Turner 	DBGPR("<--xgbe_powerup\n");
867*44b781cfSAndrew Turner 
868*44b781cfSAndrew Turner 	return 0;
869*44b781cfSAndrew Turner }
870*44b781cfSAndrew Turner 
871*44b781cfSAndrew Turner static int xgbe_start(struct xgbe_prv_data *pdata)
872*44b781cfSAndrew Turner {
873*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
874*44b781cfSAndrew Turner 	struct xgbe_phy_if *phy_if = &pdata->phy_if;
875*44b781cfSAndrew Turner 	struct net_device *netdev = pdata->netdev;
876*44b781cfSAndrew Turner 	int ret;
877*44b781cfSAndrew Turner 
878*44b781cfSAndrew Turner 	DBGPR("-->xgbe_start\n");
879*44b781cfSAndrew Turner 
880*44b781cfSAndrew Turner 	hw_if->init(pdata);
881*44b781cfSAndrew Turner 
882*44b781cfSAndrew Turner 	ret = phy_if->phy_start(pdata);
883*44b781cfSAndrew Turner 	if (ret)
884*44b781cfSAndrew Turner 		goto err_phy;
885*44b781cfSAndrew Turner 
886*44b781cfSAndrew Turner 	xgbe_napi_enable(pdata, 1);
887*44b781cfSAndrew Turner 
888*44b781cfSAndrew Turner 	ret = xgbe_request_irqs(pdata);
889*44b781cfSAndrew Turner 	if (ret)
890*44b781cfSAndrew Turner 		goto err_napi;
891*44b781cfSAndrew Turner 
892*44b781cfSAndrew Turner 	hw_if->enable_tx(pdata);
893*44b781cfSAndrew Turner 	hw_if->enable_rx(pdata);
894*44b781cfSAndrew Turner 
895*44b781cfSAndrew Turner 	netif_tx_start_all_queues(netdev);
896*44b781cfSAndrew Turner 
897*44b781cfSAndrew Turner 	xgbe_start_timers(pdata);
898*44b781cfSAndrew Turner 	queue_work(pdata->dev_workqueue, &pdata->service_work);
899*44b781cfSAndrew Turner 
900*44b781cfSAndrew Turner 	DBGPR("<--xgbe_start\n");
901*44b781cfSAndrew Turner 
902*44b781cfSAndrew Turner 	return 0;
903*44b781cfSAndrew Turner 
904*44b781cfSAndrew Turner err_napi:
905*44b781cfSAndrew Turner 	xgbe_napi_disable(pdata, 1);
906*44b781cfSAndrew Turner 
907*44b781cfSAndrew Turner 	phy_if->phy_stop(pdata);
908*44b781cfSAndrew Turner 
909*44b781cfSAndrew Turner err_phy:
910*44b781cfSAndrew Turner 	hw_if->exit(pdata);
911*44b781cfSAndrew Turner 
912*44b781cfSAndrew Turner 	return ret;
913*44b781cfSAndrew Turner }
914*44b781cfSAndrew Turner 
915*44b781cfSAndrew Turner static void xgbe_stop(struct xgbe_prv_data *pdata)
916*44b781cfSAndrew Turner {
917*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
918*44b781cfSAndrew Turner 	struct xgbe_phy_if *phy_if = &pdata->phy_if;
919*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
920*44b781cfSAndrew Turner 	struct net_device *netdev = pdata->netdev;
921*44b781cfSAndrew Turner 	struct netdev_queue *txq;
922*44b781cfSAndrew Turner 	unsigned int i;
923*44b781cfSAndrew Turner 
924*44b781cfSAndrew Turner 	DBGPR("-->xgbe_stop\n");
925*44b781cfSAndrew Turner 
926*44b781cfSAndrew Turner 	netif_tx_stop_all_queues(netdev);
927*44b781cfSAndrew Turner 
928*44b781cfSAndrew Turner 	xgbe_stop_timers(pdata);
929*44b781cfSAndrew Turner 	flush_workqueue(pdata->dev_workqueue);
930*44b781cfSAndrew Turner 
931*44b781cfSAndrew Turner 	hw_if->disable_tx(pdata);
932*44b781cfSAndrew Turner 	hw_if->disable_rx(pdata);
933*44b781cfSAndrew Turner 
934*44b781cfSAndrew Turner 	xgbe_free_irqs(pdata);
935*44b781cfSAndrew Turner 
936*44b781cfSAndrew Turner 	xgbe_napi_disable(pdata, 1);
937*44b781cfSAndrew Turner 
938*44b781cfSAndrew Turner 	phy_if->phy_stop(pdata);
939*44b781cfSAndrew Turner 
940*44b781cfSAndrew Turner 	hw_if->exit(pdata);
941*44b781cfSAndrew Turner 
942*44b781cfSAndrew Turner 	channel = pdata->channel;
943*44b781cfSAndrew Turner 	for (i = 0; i < pdata->channel_count; i++, channel++) {
944*44b781cfSAndrew Turner 		if (!channel->tx_ring)
945*44b781cfSAndrew Turner 			continue;
946*44b781cfSAndrew Turner 
947*44b781cfSAndrew Turner 		txq = netdev_get_tx_queue(netdev, channel->queue_index);
948*44b781cfSAndrew Turner 		netdev_tx_reset_queue(txq);
949*44b781cfSAndrew Turner 	}
950*44b781cfSAndrew Turner 
951*44b781cfSAndrew Turner 	DBGPR("<--xgbe_stop\n");
952*44b781cfSAndrew Turner }
953*44b781cfSAndrew Turner 
954*44b781cfSAndrew Turner static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
955*44b781cfSAndrew Turner {
956*44b781cfSAndrew Turner 	DBGPR("-->xgbe_restart_dev\n");
957*44b781cfSAndrew Turner 
958*44b781cfSAndrew Turner 	/* If not running, "restart" will happen on open */
959*44b781cfSAndrew Turner 	if (!netif_running(pdata->netdev))
960*44b781cfSAndrew Turner 		return;
961*44b781cfSAndrew Turner 
962*44b781cfSAndrew Turner 	xgbe_stop(pdata);
963*44b781cfSAndrew Turner 
964*44b781cfSAndrew Turner 	xgbe_free_tx_data(pdata);
965*44b781cfSAndrew Turner 	xgbe_free_rx_data(pdata);
966*44b781cfSAndrew Turner 
967*44b781cfSAndrew Turner 	xgbe_start(pdata);
968*44b781cfSAndrew Turner 
969*44b781cfSAndrew Turner 	DBGPR("<--xgbe_restart_dev\n");
970*44b781cfSAndrew Turner }
971*44b781cfSAndrew Turner 
972*44b781cfSAndrew Turner static void xgbe_restart(struct work_struct *work)
973*44b781cfSAndrew Turner {
974*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = container_of(work,
975*44b781cfSAndrew Turner 						   struct xgbe_prv_data,
976*44b781cfSAndrew Turner 						   restart_work);
977*44b781cfSAndrew Turner 
978*44b781cfSAndrew Turner 	rtnl_lock();
979*44b781cfSAndrew Turner 
980*44b781cfSAndrew Turner 	xgbe_restart_dev(pdata);
981*44b781cfSAndrew Turner 
982*44b781cfSAndrew Turner 	rtnl_unlock();
983*44b781cfSAndrew Turner }
984*44b781cfSAndrew Turner 
985*44b781cfSAndrew Turner static void xgbe_tx_tstamp(struct work_struct *work)
986*44b781cfSAndrew Turner {
987*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = container_of(work,
988*44b781cfSAndrew Turner 						   struct xgbe_prv_data,
989*44b781cfSAndrew Turner 						   tx_tstamp_work);
990*44b781cfSAndrew Turner 	struct skb_shared_hwtstamps hwtstamps;
991*44b781cfSAndrew Turner 	u64 nsec;
992*44b781cfSAndrew Turner 	unsigned long flags;
993*44b781cfSAndrew Turner 
994*44b781cfSAndrew Turner 	if (pdata->tx_tstamp) {
995*44b781cfSAndrew Turner 		nsec = timecounter_cyc2time(&pdata->tstamp_tc,
996*44b781cfSAndrew Turner 					    pdata->tx_tstamp);
997*44b781cfSAndrew Turner 
998*44b781cfSAndrew Turner 		memset(&hwtstamps, 0, sizeof(hwtstamps));
999*44b781cfSAndrew Turner 		hwtstamps.hwtstamp = ns_to_ktime(nsec);
1000*44b781cfSAndrew Turner 		skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1001*44b781cfSAndrew Turner 	}
1002*44b781cfSAndrew Turner 
1003*44b781cfSAndrew Turner 	dev_kfree_skb_any(pdata->tx_tstamp_skb);
1004*44b781cfSAndrew Turner 
1005*44b781cfSAndrew Turner 	spin_lock_irqsave(&pdata->tstamp_lock, flags);
1006*44b781cfSAndrew Turner 	pdata->tx_tstamp_skb = NULL;
1007*44b781cfSAndrew Turner 	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1008*44b781cfSAndrew Turner }
1009*44b781cfSAndrew Turner 
1010*44b781cfSAndrew Turner static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1011*44b781cfSAndrew Turner 				      struct ifreq *ifreq)
1012*44b781cfSAndrew Turner {
1013*44b781cfSAndrew Turner 	if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1014*44b781cfSAndrew Turner 			 sizeof(pdata->tstamp_config)))
1015*44b781cfSAndrew Turner 		return -EFAULT;
1016*44b781cfSAndrew Turner 
1017*44b781cfSAndrew Turner 	return 0;
1018*44b781cfSAndrew Turner }
1019*44b781cfSAndrew Turner 
1020*44b781cfSAndrew Turner static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1021*44b781cfSAndrew Turner 				      struct ifreq *ifreq)
1022*44b781cfSAndrew Turner {
1023*44b781cfSAndrew Turner 	struct hwtstamp_config config;
1024*44b781cfSAndrew Turner 	unsigned int mac_tscr;
1025*44b781cfSAndrew Turner 
1026*44b781cfSAndrew Turner 	if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1027*44b781cfSAndrew Turner 		return -EFAULT;
1028*44b781cfSAndrew Turner 
1029*44b781cfSAndrew Turner 	if (config.flags)
1030*44b781cfSAndrew Turner 		return -EINVAL;
1031*44b781cfSAndrew Turner 
1032*44b781cfSAndrew Turner 	mac_tscr = 0;
1033*44b781cfSAndrew Turner 
1034*44b781cfSAndrew Turner 	switch (config.tx_type) {
1035*44b781cfSAndrew Turner 	case HWTSTAMP_TX_OFF:
1036*44b781cfSAndrew Turner 		break;
1037*44b781cfSAndrew Turner 
1038*44b781cfSAndrew Turner 	case HWTSTAMP_TX_ON:
1039*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1040*44b781cfSAndrew Turner 		break;
1041*44b781cfSAndrew Turner 
1042*44b781cfSAndrew Turner 	default:
1043*44b781cfSAndrew Turner 		return -ERANGE;
1044*44b781cfSAndrew Turner 	}
1045*44b781cfSAndrew Turner 
1046*44b781cfSAndrew Turner 	switch (config.rx_filter) {
1047*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_NONE:
1048*44b781cfSAndrew Turner 		break;
1049*44b781cfSAndrew Turner 
1050*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_ALL:
1051*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1052*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1053*44b781cfSAndrew Turner 		break;
1054*44b781cfSAndrew Turner 
1055*44b781cfSAndrew Turner 	/* PTP v2, UDP, any kind of event packet */
1056*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1057*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1058*44b781cfSAndrew Turner 	/* PTP v1, UDP, any kind of event packet */
1059*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1060*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1061*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1062*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1063*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1064*44b781cfSAndrew Turner 		break;
1065*44b781cfSAndrew Turner 
1066*44b781cfSAndrew Turner 	/* PTP v2, UDP, Sync packet */
1067*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1068*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1069*44b781cfSAndrew Turner 	/* PTP v1, UDP, Sync packet */
1070*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1071*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1072*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1073*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1074*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1075*44b781cfSAndrew Turner 		break;
1076*44b781cfSAndrew Turner 
1077*44b781cfSAndrew Turner 	/* PTP v2, UDP, Delay_req packet */
1078*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1079*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1080*44b781cfSAndrew Turner 	/* PTP v1, UDP, Delay_req packet */
1081*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1082*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1083*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1084*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1085*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1086*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1087*44b781cfSAndrew Turner 		break;
1088*44b781cfSAndrew Turner 
1089*44b781cfSAndrew Turner 	/* 802.AS1, Ethernet, any kind of event packet */
1090*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1091*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1092*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1093*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1094*44b781cfSAndrew Turner 		break;
1095*44b781cfSAndrew Turner 
1096*44b781cfSAndrew Turner 	/* 802.AS1, Ethernet, Sync packet */
1097*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1098*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1099*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1100*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1101*44b781cfSAndrew Turner 		break;
1102*44b781cfSAndrew Turner 
1103*44b781cfSAndrew Turner 	/* 802.AS1, Ethernet, Delay_req packet */
1104*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1105*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1106*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1107*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1108*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1109*44b781cfSAndrew Turner 		break;
1110*44b781cfSAndrew Turner 
1111*44b781cfSAndrew Turner 	/* PTP v2/802.AS1, any layer, any kind of event packet */
1112*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1113*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1114*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1115*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1116*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1117*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1118*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1119*44b781cfSAndrew Turner 		break;
1120*44b781cfSAndrew Turner 
1121*44b781cfSAndrew Turner 	/* PTP v2/802.AS1, any layer, Sync packet */
1122*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1123*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1124*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1125*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1126*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1127*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1128*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1129*44b781cfSAndrew Turner 		break;
1130*44b781cfSAndrew Turner 
1131*44b781cfSAndrew Turner 	/* PTP v2/802.AS1, any layer, Delay_req packet */
1132*44b781cfSAndrew Turner 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1133*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1134*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1135*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1136*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1137*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1138*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1139*44b781cfSAndrew Turner 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1140*44b781cfSAndrew Turner 		break;
1141*44b781cfSAndrew Turner 
1142*44b781cfSAndrew Turner 	default:
1143*44b781cfSAndrew Turner 		return -ERANGE;
1144*44b781cfSAndrew Turner 	}
1145*44b781cfSAndrew Turner 
1146*44b781cfSAndrew Turner 	pdata->hw_if.config_tstamp(pdata, mac_tscr);
1147*44b781cfSAndrew Turner 
1148*44b781cfSAndrew Turner 	memcpy(&pdata->tstamp_config, &config, sizeof(config));
1149*44b781cfSAndrew Turner 
1150*44b781cfSAndrew Turner 	return 0;
1151*44b781cfSAndrew Turner }
1152*44b781cfSAndrew Turner 
1153*44b781cfSAndrew Turner static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1154*44b781cfSAndrew Turner 				struct sk_buff *skb,
1155*44b781cfSAndrew Turner 				struct xgbe_packet_data *packet)
1156*44b781cfSAndrew Turner {
1157*44b781cfSAndrew Turner 	unsigned long flags;
1158*44b781cfSAndrew Turner 
1159*44b781cfSAndrew Turner 	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1160*44b781cfSAndrew Turner 		spin_lock_irqsave(&pdata->tstamp_lock, flags);
1161*44b781cfSAndrew Turner 		if (pdata->tx_tstamp_skb) {
1162*44b781cfSAndrew Turner 			/* Another timestamp in progress, ignore this one */
1163*44b781cfSAndrew Turner 			XGMAC_SET_BITS(packet->attributes,
1164*44b781cfSAndrew Turner 				       TX_PACKET_ATTRIBUTES, PTP, 0);
1165*44b781cfSAndrew Turner 		} else {
1166*44b781cfSAndrew Turner 			pdata->tx_tstamp_skb = skb_get(skb);
1167*44b781cfSAndrew Turner 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1168*44b781cfSAndrew Turner 		}
1169*44b781cfSAndrew Turner 		spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1170*44b781cfSAndrew Turner 	}
1171*44b781cfSAndrew Turner 
1172*44b781cfSAndrew Turner 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1173*44b781cfSAndrew Turner 		skb_tx_timestamp(skb);
1174*44b781cfSAndrew Turner }
1175*44b781cfSAndrew Turner 
1176*44b781cfSAndrew Turner static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1177*44b781cfSAndrew Turner {
1178*44b781cfSAndrew Turner 	if (skb_vlan_tag_present(skb))
1179*44b781cfSAndrew Turner 		packet->vlan_ctag = skb_vlan_tag_get(skb);
1180*44b781cfSAndrew Turner }
1181*44b781cfSAndrew Turner 
1182*44b781cfSAndrew Turner static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1183*44b781cfSAndrew Turner {
1184*44b781cfSAndrew Turner 	int ret;
1185*44b781cfSAndrew Turner 
1186*44b781cfSAndrew Turner 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1187*44b781cfSAndrew Turner 			    TSO_ENABLE))
1188*44b781cfSAndrew Turner 		return 0;
1189*44b781cfSAndrew Turner 
1190*44b781cfSAndrew Turner 	ret = skb_cow_head(skb, 0);
1191*44b781cfSAndrew Turner 	if (ret)
1192*44b781cfSAndrew Turner 		return ret;
1193*44b781cfSAndrew Turner 
1194*44b781cfSAndrew Turner 	packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1195*44b781cfSAndrew Turner 	packet->tcp_header_len = tcp_hdrlen(skb);
1196*44b781cfSAndrew Turner 	packet->tcp_payload_len = skb->len - packet->header_len;
1197*44b781cfSAndrew Turner 	packet->mss = skb_shinfo(skb)->gso_size;
1198*44b781cfSAndrew Turner 	DBGPR("  packet->header_len=%u\n", packet->header_len);
1199*44b781cfSAndrew Turner 	DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1200*44b781cfSAndrew Turner 	      packet->tcp_header_len, packet->tcp_payload_len);
1201*44b781cfSAndrew Turner 	DBGPR("  packet->mss=%u\n", packet->mss);
1202*44b781cfSAndrew Turner 
1203*44b781cfSAndrew Turner 	/* Update the number of packets that will ultimately be transmitted
1204*44b781cfSAndrew Turner 	 * along with the extra bytes for each extra packet
1205*44b781cfSAndrew Turner 	 */
1206*44b781cfSAndrew Turner 	packet->tx_packets = skb_shinfo(skb)->gso_segs;
1207*44b781cfSAndrew Turner 	packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1208*44b781cfSAndrew Turner 
1209*44b781cfSAndrew Turner 	return 0;
1210*44b781cfSAndrew Turner }
1211*44b781cfSAndrew Turner 
1212*44b781cfSAndrew Turner static int xgbe_is_tso(struct sk_buff *skb)
1213*44b781cfSAndrew Turner {
1214*44b781cfSAndrew Turner 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1215*44b781cfSAndrew Turner 		return 0;
1216*44b781cfSAndrew Turner 
1217*44b781cfSAndrew Turner 	if (!skb_is_gso(skb))
1218*44b781cfSAndrew Turner 		return 0;
1219*44b781cfSAndrew Turner 
1220*44b781cfSAndrew Turner 	DBGPR("  TSO packet to be processed\n");
1221*44b781cfSAndrew Turner 
1222*44b781cfSAndrew Turner 	return 1;
1223*44b781cfSAndrew Turner }
1224*44b781cfSAndrew Turner 
1225*44b781cfSAndrew Turner static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1226*44b781cfSAndrew Turner 			     struct xgbe_ring *ring, struct sk_buff *skb,
1227*44b781cfSAndrew Turner 			     struct xgbe_packet_data *packet)
1228*44b781cfSAndrew Turner {
1229*44b781cfSAndrew Turner 	struct skb_frag_struct *frag;
1230*44b781cfSAndrew Turner 	unsigned int context_desc;
1231*44b781cfSAndrew Turner 	unsigned int len;
1232*44b781cfSAndrew Turner 	unsigned int i;
1233*44b781cfSAndrew Turner 
1234*44b781cfSAndrew Turner 	packet->skb = skb;
1235*44b781cfSAndrew Turner 
1236*44b781cfSAndrew Turner 	context_desc = 0;
1237*44b781cfSAndrew Turner 	packet->rdesc_count = 0;
1238*44b781cfSAndrew Turner 
1239*44b781cfSAndrew Turner 	packet->tx_packets = 1;
1240*44b781cfSAndrew Turner 	packet->tx_bytes = skb->len;
1241*44b781cfSAndrew Turner 
1242*44b781cfSAndrew Turner 	if (xgbe_is_tso(skb)) {
1243*44b781cfSAndrew Turner 		/* TSO requires an extra descriptor if mss is different */
1244*44b781cfSAndrew Turner 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1245*44b781cfSAndrew Turner 			context_desc = 1;
1246*44b781cfSAndrew Turner 			packet->rdesc_count++;
1247*44b781cfSAndrew Turner 		}
1248*44b781cfSAndrew Turner 
1249*44b781cfSAndrew Turner 		/* TSO requires an extra descriptor for TSO header */
1250*44b781cfSAndrew Turner 		packet->rdesc_count++;
1251*44b781cfSAndrew Turner 
1252*44b781cfSAndrew Turner 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1253*44b781cfSAndrew Turner 			       TSO_ENABLE, 1);
1254*44b781cfSAndrew Turner 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1255*44b781cfSAndrew Turner 			       CSUM_ENABLE, 1);
1256*44b781cfSAndrew Turner 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
1257*44b781cfSAndrew Turner 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1258*44b781cfSAndrew Turner 			       CSUM_ENABLE, 1);
1259*44b781cfSAndrew Turner 
1260*44b781cfSAndrew Turner 	if (skb_vlan_tag_present(skb)) {
1261*44b781cfSAndrew Turner 		/* VLAN requires an extra descriptor if tag is different */
1262*44b781cfSAndrew Turner 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1263*44b781cfSAndrew Turner 			/* We can share with the TSO context descriptor */
1264*44b781cfSAndrew Turner 			if (!context_desc) {
1265*44b781cfSAndrew Turner 				context_desc = 1;
1266*44b781cfSAndrew Turner 				packet->rdesc_count++;
1267*44b781cfSAndrew Turner 			}
1268*44b781cfSAndrew Turner 
1269*44b781cfSAndrew Turner 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1270*44b781cfSAndrew Turner 			       VLAN_CTAG, 1);
1271*44b781cfSAndrew Turner 	}
1272*44b781cfSAndrew Turner 
1273*44b781cfSAndrew Turner 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1274*44b781cfSAndrew Turner 	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1275*44b781cfSAndrew Turner 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1276*44b781cfSAndrew Turner 			       PTP, 1);
1277*44b781cfSAndrew Turner 
1278*44b781cfSAndrew Turner 	for (len = skb_headlen(skb); len;) {
1279*44b781cfSAndrew Turner 		packet->rdesc_count++;
1280*44b781cfSAndrew Turner 		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1281*44b781cfSAndrew Turner 	}
1282*44b781cfSAndrew Turner 
1283*44b781cfSAndrew Turner 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1284*44b781cfSAndrew Turner 		frag = &skb_shinfo(skb)->frags[i];
1285*44b781cfSAndrew Turner 		for (len = skb_frag_size(frag); len; ) {
1286*44b781cfSAndrew Turner 			packet->rdesc_count++;
1287*44b781cfSAndrew Turner 			len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1288*44b781cfSAndrew Turner 		}
1289*44b781cfSAndrew Turner 	}
1290*44b781cfSAndrew Turner }
1291*44b781cfSAndrew Turner 
1292*44b781cfSAndrew Turner static int xgbe_open(struct net_device *netdev)
1293*44b781cfSAndrew Turner {
1294*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1295*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1296*44b781cfSAndrew Turner 	int ret;
1297*44b781cfSAndrew Turner 
1298*44b781cfSAndrew Turner 	DBGPR("-->xgbe_open\n");
1299*44b781cfSAndrew Turner 
1300*44b781cfSAndrew Turner 	/* Initialize the phy */
1301*44b781cfSAndrew Turner 	ret = xgbe_phy_init(pdata);
1302*44b781cfSAndrew Turner 	if (ret)
1303*44b781cfSAndrew Turner 		return ret;
1304*44b781cfSAndrew Turner 
1305*44b781cfSAndrew Turner 	/* Enable the clocks */
1306*44b781cfSAndrew Turner 	ret = clk_prepare_enable(pdata->sysclk);
1307*44b781cfSAndrew Turner 	if (ret) {
1308*44b781cfSAndrew Turner 		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1309*44b781cfSAndrew Turner 		return ret;
1310*44b781cfSAndrew Turner 	}
1311*44b781cfSAndrew Turner 
1312*44b781cfSAndrew Turner 	ret = clk_prepare_enable(pdata->ptpclk);
1313*44b781cfSAndrew Turner 	if (ret) {
1314*44b781cfSAndrew Turner 		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1315*44b781cfSAndrew Turner 		goto err_sysclk;
1316*44b781cfSAndrew Turner 	}
1317*44b781cfSAndrew Turner 
1318*44b781cfSAndrew Turner 	/* Calculate the Rx buffer size before allocating rings */
1319*44b781cfSAndrew Turner 	ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1320*44b781cfSAndrew Turner 	if (ret < 0)
1321*44b781cfSAndrew Turner 		goto err_ptpclk;
1322*44b781cfSAndrew Turner 	pdata->rx_buf_size = ret;
1323*44b781cfSAndrew Turner 
1324*44b781cfSAndrew Turner 	/* Allocate the channel and ring structures */
1325*44b781cfSAndrew Turner 	ret = xgbe_alloc_channels(pdata);
1326*44b781cfSAndrew Turner 	if (ret)
1327*44b781cfSAndrew Turner 		goto err_ptpclk;
1328*44b781cfSAndrew Turner 
1329*44b781cfSAndrew Turner 	/* Allocate the ring descriptors and buffers */
1330*44b781cfSAndrew Turner 	ret = desc_if->alloc_ring_resources(pdata);
1331*44b781cfSAndrew Turner 	if (ret)
1332*44b781cfSAndrew Turner 		goto err_channels;
1333*44b781cfSAndrew Turner 
1334*44b781cfSAndrew Turner 	INIT_WORK(&pdata->service_work, xgbe_service);
1335*44b781cfSAndrew Turner 	INIT_WORK(&pdata->restart_work, xgbe_restart);
1336*44b781cfSAndrew Turner 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1337*44b781cfSAndrew Turner 	xgbe_init_timers(pdata);
1338*44b781cfSAndrew Turner 
1339*44b781cfSAndrew Turner 	ret = xgbe_start(pdata);
1340*44b781cfSAndrew Turner 	if (ret)
1341*44b781cfSAndrew Turner 		goto err_rings;
1342*44b781cfSAndrew Turner 
1343*44b781cfSAndrew Turner 	clear_bit(XGBE_DOWN, &pdata->dev_state);
1344*44b781cfSAndrew Turner 
1345*44b781cfSAndrew Turner 	DBGPR("<--xgbe_open\n");
1346*44b781cfSAndrew Turner 
1347*44b781cfSAndrew Turner 	return 0;
1348*44b781cfSAndrew Turner 
1349*44b781cfSAndrew Turner err_rings:
1350*44b781cfSAndrew Turner 	desc_if->free_ring_resources(pdata);
1351*44b781cfSAndrew Turner 
1352*44b781cfSAndrew Turner err_channels:
1353*44b781cfSAndrew Turner 	xgbe_free_channels(pdata);
1354*44b781cfSAndrew Turner 
1355*44b781cfSAndrew Turner err_ptpclk:
1356*44b781cfSAndrew Turner 	clk_disable_unprepare(pdata->ptpclk);
1357*44b781cfSAndrew Turner 
1358*44b781cfSAndrew Turner err_sysclk:
1359*44b781cfSAndrew Turner 	clk_disable_unprepare(pdata->sysclk);
1360*44b781cfSAndrew Turner 
1361*44b781cfSAndrew Turner 	return ret;
1362*44b781cfSAndrew Turner }
1363*44b781cfSAndrew Turner 
1364*44b781cfSAndrew Turner static int xgbe_close(struct net_device *netdev)
1365*44b781cfSAndrew Turner {
1366*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1367*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1368*44b781cfSAndrew Turner 
1369*44b781cfSAndrew Turner 	DBGPR("-->xgbe_close\n");
1370*44b781cfSAndrew Turner 
1371*44b781cfSAndrew Turner 	/* Stop the device */
1372*44b781cfSAndrew Turner 	xgbe_stop(pdata);
1373*44b781cfSAndrew Turner 
1374*44b781cfSAndrew Turner 	/* Free the ring descriptors and buffers */
1375*44b781cfSAndrew Turner 	desc_if->free_ring_resources(pdata);
1376*44b781cfSAndrew Turner 
1377*44b781cfSAndrew Turner 	/* Free the channel and ring structures */
1378*44b781cfSAndrew Turner 	xgbe_free_channels(pdata);
1379*44b781cfSAndrew Turner 
1380*44b781cfSAndrew Turner 	/* Disable the clocks */
1381*44b781cfSAndrew Turner 	clk_disable_unprepare(pdata->ptpclk);
1382*44b781cfSAndrew Turner 	clk_disable_unprepare(pdata->sysclk);
1383*44b781cfSAndrew Turner 
1384*44b781cfSAndrew Turner 	set_bit(XGBE_DOWN, &pdata->dev_state);
1385*44b781cfSAndrew Turner 
1386*44b781cfSAndrew Turner 	DBGPR("<--xgbe_close\n");
1387*44b781cfSAndrew Turner 
1388*44b781cfSAndrew Turner 	return 0;
1389*44b781cfSAndrew Turner }
1390*44b781cfSAndrew Turner 
1391*44b781cfSAndrew Turner static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1392*44b781cfSAndrew Turner {
1393*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1394*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1395*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1396*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
1397*44b781cfSAndrew Turner 	struct xgbe_ring *ring;
1398*44b781cfSAndrew Turner 	struct xgbe_packet_data *packet;
1399*44b781cfSAndrew Turner 	struct netdev_queue *txq;
1400*44b781cfSAndrew Turner 	int ret;
1401*44b781cfSAndrew Turner 
1402*44b781cfSAndrew Turner 	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1403*44b781cfSAndrew Turner 
1404*44b781cfSAndrew Turner 	channel = pdata->channel + skb->queue_mapping;
1405*44b781cfSAndrew Turner 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1406*44b781cfSAndrew Turner 	ring = channel->tx_ring;
1407*44b781cfSAndrew Turner 	packet = &ring->packet_data;
1408*44b781cfSAndrew Turner 
1409*44b781cfSAndrew Turner 	ret = NETDEV_TX_OK;
1410*44b781cfSAndrew Turner 
1411*44b781cfSAndrew Turner 	if (skb->len == 0) {
1412*44b781cfSAndrew Turner 		netif_err(pdata, tx_err, netdev,
1413*44b781cfSAndrew Turner 			  "empty skb received from stack\n");
1414*44b781cfSAndrew Turner 		dev_kfree_skb_any(skb);
1415*44b781cfSAndrew Turner 		goto tx_netdev_return;
1416*44b781cfSAndrew Turner 	}
1417*44b781cfSAndrew Turner 
1418*44b781cfSAndrew Turner 	/* Calculate preliminary packet info */
1419*44b781cfSAndrew Turner 	memset(packet, 0, sizeof(*packet));
1420*44b781cfSAndrew Turner 	xgbe_packet_info(pdata, ring, skb, packet);
1421*44b781cfSAndrew Turner 
1422*44b781cfSAndrew Turner 	/* Check that there are enough descriptors available */
1423*44b781cfSAndrew Turner 	ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1424*44b781cfSAndrew Turner 	if (ret)
1425*44b781cfSAndrew Turner 		goto tx_netdev_return;
1426*44b781cfSAndrew Turner 
1427*44b781cfSAndrew Turner 	ret = xgbe_prep_tso(skb, packet);
1428*44b781cfSAndrew Turner 	if (ret) {
1429*44b781cfSAndrew Turner 		netif_err(pdata, tx_err, netdev,
1430*44b781cfSAndrew Turner 			  "error processing TSO packet\n");
1431*44b781cfSAndrew Turner 		dev_kfree_skb_any(skb);
1432*44b781cfSAndrew Turner 		goto tx_netdev_return;
1433*44b781cfSAndrew Turner 	}
1434*44b781cfSAndrew Turner 	xgbe_prep_vlan(skb, packet);
1435*44b781cfSAndrew Turner 
1436*44b781cfSAndrew Turner 	if (!desc_if->map_tx_skb(channel, skb)) {
1437*44b781cfSAndrew Turner 		dev_kfree_skb_any(skb);
1438*44b781cfSAndrew Turner 		goto tx_netdev_return;
1439*44b781cfSAndrew Turner 	}
1440*44b781cfSAndrew Turner 
1441*44b781cfSAndrew Turner 	xgbe_prep_tx_tstamp(pdata, skb, packet);
1442*44b781cfSAndrew Turner 
1443*44b781cfSAndrew Turner 	/* Report on the actual number of bytes (to be) sent */
1444*44b781cfSAndrew Turner 	netdev_tx_sent_queue(txq, packet->tx_bytes);
1445*44b781cfSAndrew Turner 
1446*44b781cfSAndrew Turner 	/* Configure required descriptor fields for transmission */
1447*44b781cfSAndrew Turner 	hw_if->dev_xmit(channel);
1448*44b781cfSAndrew Turner 
1449*44b781cfSAndrew Turner 	if (netif_msg_pktdata(pdata))
1450*44b781cfSAndrew Turner 		xgbe_print_pkt(netdev, skb, true);
1451*44b781cfSAndrew Turner 
1452*44b781cfSAndrew Turner 	/* Stop the queue in advance if there may not be enough descriptors */
1453*44b781cfSAndrew Turner 	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1454*44b781cfSAndrew Turner 
1455*44b781cfSAndrew Turner 	ret = NETDEV_TX_OK;
1456*44b781cfSAndrew Turner 
1457*44b781cfSAndrew Turner tx_netdev_return:
1458*44b781cfSAndrew Turner 	return ret;
1459*44b781cfSAndrew Turner }
1460*44b781cfSAndrew Turner 
1461*44b781cfSAndrew Turner static void xgbe_set_rx_mode(struct net_device *netdev)
1462*44b781cfSAndrew Turner {
1463*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1464*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1465*44b781cfSAndrew Turner 
1466*44b781cfSAndrew Turner 	DBGPR("-->xgbe_set_rx_mode\n");
1467*44b781cfSAndrew Turner 
1468*44b781cfSAndrew Turner 	hw_if->config_rx_mode(pdata);
1469*44b781cfSAndrew Turner 
1470*44b781cfSAndrew Turner 	DBGPR("<--xgbe_set_rx_mode\n");
1471*44b781cfSAndrew Turner }
1472*44b781cfSAndrew Turner 
1473*44b781cfSAndrew Turner static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1474*44b781cfSAndrew Turner {
1475*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1476*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1477*44b781cfSAndrew Turner 	struct sockaddr *saddr = addr;
1478*44b781cfSAndrew Turner 
1479*44b781cfSAndrew Turner 	DBGPR("-->xgbe_set_mac_address\n");
1480*44b781cfSAndrew Turner 
1481*44b781cfSAndrew Turner 	if (!is_valid_ether_addr(saddr->sa_data))
1482*44b781cfSAndrew Turner 		return -EADDRNOTAVAIL;
1483*44b781cfSAndrew Turner 
1484*44b781cfSAndrew Turner 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1485*44b781cfSAndrew Turner 
1486*44b781cfSAndrew Turner 	hw_if->set_mac_address(pdata, netdev->dev_addr);
1487*44b781cfSAndrew Turner 
1488*44b781cfSAndrew Turner 	DBGPR("<--xgbe_set_mac_address\n");
1489*44b781cfSAndrew Turner 
1490*44b781cfSAndrew Turner 	return 0;
1491*44b781cfSAndrew Turner }
1492*44b781cfSAndrew Turner 
1493*44b781cfSAndrew Turner static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1494*44b781cfSAndrew Turner {
1495*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1496*44b781cfSAndrew Turner 	int ret;
1497*44b781cfSAndrew Turner 
1498*44b781cfSAndrew Turner 	switch (cmd) {
1499*44b781cfSAndrew Turner 	case SIOCGHWTSTAMP:
1500*44b781cfSAndrew Turner 		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1501*44b781cfSAndrew Turner 		break;
1502*44b781cfSAndrew Turner 
1503*44b781cfSAndrew Turner 	case SIOCSHWTSTAMP:
1504*44b781cfSAndrew Turner 		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1505*44b781cfSAndrew Turner 		break;
1506*44b781cfSAndrew Turner 
1507*44b781cfSAndrew Turner 	default:
1508*44b781cfSAndrew Turner 		ret = -EOPNOTSUPP;
1509*44b781cfSAndrew Turner 	}
1510*44b781cfSAndrew Turner 
1511*44b781cfSAndrew Turner 	return ret;
1512*44b781cfSAndrew Turner }
1513*44b781cfSAndrew Turner 
1514*44b781cfSAndrew Turner static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1515*44b781cfSAndrew Turner {
1516*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1517*44b781cfSAndrew Turner 	int ret;
1518*44b781cfSAndrew Turner 
1519*44b781cfSAndrew Turner 	DBGPR("-->xgbe_change_mtu\n");
1520*44b781cfSAndrew Turner 
1521*44b781cfSAndrew Turner 	ret = xgbe_calc_rx_buf_size(netdev, mtu);
1522*44b781cfSAndrew Turner 	if (ret < 0)
1523*44b781cfSAndrew Turner 		return ret;
1524*44b781cfSAndrew Turner 
1525*44b781cfSAndrew Turner 	pdata->rx_buf_size = ret;
1526*44b781cfSAndrew Turner 	netdev->mtu = mtu;
1527*44b781cfSAndrew Turner 
1528*44b781cfSAndrew Turner 	xgbe_restart_dev(pdata);
1529*44b781cfSAndrew Turner 
1530*44b781cfSAndrew Turner 	DBGPR("<--xgbe_change_mtu\n");
1531*44b781cfSAndrew Turner 
1532*44b781cfSAndrew Turner 	return 0;
1533*44b781cfSAndrew Turner }
1534*44b781cfSAndrew Turner 
1535*44b781cfSAndrew Turner static void xgbe_tx_timeout(struct net_device *netdev)
1536*44b781cfSAndrew Turner {
1537*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1538*44b781cfSAndrew Turner 
1539*44b781cfSAndrew Turner 	netdev_warn(netdev, "tx timeout, device restarting\n");
1540*44b781cfSAndrew Turner 	schedule_work(&pdata->restart_work);
1541*44b781cfSAndrew Turner }
1542*44b781cfSAndrew Turner 
1543*44b781cfSAndrew Turner static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1544*44b781cfSAndrew Turner 						  struct rtnl_link_stats64 *s)
1545*44b781cfSAndrew Turner {
1546*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1547*44b781cfSAndrew Turner 	struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1548*44b781cfSAndrew Turner 
1549*44b781cfSAndrew Turner 	DBGPR("-->%s\n", __func__);
1550*44b781cfSAndrew Turner 
1551*44b781cfSAndrew Turner 	pdata->hw_if.read_mmc_stats(pdata);
1552*44b781cfSAndrew Turner 
1553*44b781cfSAndrew Turner 	s->rx_packets = pstats->rxframecount_gb;
1554*44b781cfSAndrew Turner 	s->rx_bytes = pstats->rxoctetcount_gb;
1555*44b781cfSAndrew Turner 	s->rx_errors = pstats->rxframecount_gb -
1556*44b781cfSAndrew Turner 		       pstats->rxbroadcastframes_g -
1557*44b781cfSAndrew Turner 		       pstats->rxmulticastframes_g -
1558*44b781cfSAndrew Turner 		       pstats->rxunicastframes_g;
1559*44b781cfSAndrew Turner 	s->multicast = pstats->rxmulticastframes_g;
1560*44b781cfSAndrew Turner 	s->rx_length_errors = pstats->rxlengtherror;
1561*44b781cfSAndrew Turner 	s->rx_crc_errors = pstats->rxcrcerror;
1562*44b781cfSAndrew Turner 	s->rx_fifo_errors = pstats->rxfifooverflow;
1563*44b781cfSAndrew Turner 
1564*44b781cfSAndrew Turner 	s->tx_packets = pstats->txframecount_gb;
1565*44b781cfSAndrew Turner 	s->tx_bytes = pstats->txoctetcount_gb;
1566*44b781cfSAndrew Turner 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1567*44b781cfSAndrew Turner 	s->tx_dropped = netdev->stats.tx_dropped;
1568*44b781cfSAndrew Turner 
1569*44b781cfSAndrew Turner 	DBGPR("<--%s\n", __func__);
1570*44b781cfSAndrew Turner 
1571*44b781cfSAndrew Turner 	return s;
1572*44b781cfSAndrew Turner }
1573*44b781cfSAndrew Turner 
1574*44b781cfSAndrew Turner static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1575*44b781cfSAndrew Turner 				u16 vid)
1576*44b781cfSAndrew Turner {
1577*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1578*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1579*44b781cfSAndrew Turner 
1580*44b781cfSAndrew Turner 	DBGPR("-->%s\n", __func__);
1581*44b781cfSAndrew Turner 
1582*44b781cfSAndrew Turner 	set_bit(vid, pdata->active_vlans);
1583*44b781cfSAndrew Turner 	hw_if->update_vlan_hash_table(pdata);
1584*44b781cfSAndrew Turner 
1585*44b781cfSAndrew Turner 	DBGPR("<--%s\n", __func__);
1586*44b781cfSAndrew Turner 
1587*44b781cfSAndrew Turner 	return 0;
1588*44b781cfSAndrew Turner }
1589*44b781cfSAndrew Turner 
1590*44b781cfSAndrew Turner static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1591*44b781cfSAndrew Turner 				 u16 vid)
1592*44b781cfSAndrew Turner {
1593*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1594*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1595*44b781cfSAndrew Turner 
1596*44b781cfSAndrew Turner 	DBGPR("-->%s\n", __func__);
1597*44b781cfSAndrew Turner 
1598*44b781cfSAndrew Turner 	clear_bit(vid, pdata->active_vlans);
1599*44b781cfSAndrew Turner 	hw_if->update_vlan_hash_table(pdata);
1600*44b781cfSAndrew Turner 
1601*44b781cfSAndrew Turner 	DBGPR("<--%s\n", __func__);
1602*44b781cfSAndrew Turner 
1603*44b781cfSAndrew Turner 	return 0;
1604*44b781cfSAndrew Turner }
1605*44b781cfSAndrew Turner 
1606*44b781cfSAndrew Turner #ifdef CONFIG_NET_POLL_CONTROLLER
1607*44b781cfSAndrew Turner static void xgbe_poll_controller(struct net_device *netdev)
1608*44b781cfSAndrew Turner {
1609*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1610*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
1611*44b781cfSAndrew Turner 	unsigned int i;
1612*44b781cfSAndrew Turner 
1613*44b781cfSAndrew Turner 	DBGPR("-->xgbe_poll_controller\n");
1614*44b781cfSAndrew Turner 
1615*44b781cfSAndrew Turner 	if (pdata->per_channel_irq) {
1616*44b781cfSAndrew Turner 		channel = pdata->channel;
1617*44b781cfSAndrew Turner 		for (i = 0; i < pdata->channel_count; i++, channel++)
1618*44b781cfSAndrew Turner 			xgbe_dma_isr(channel->dma_irq, channel);
1619*44b781cfSAndrew Turner 	} else {
1620*44b781cfSAndrew Turner 		disable_irq(pdata->dev_irq);
1621*44b781cfSAndrew Turner 		xgbe_isr(pdata->dev_irq, pdata);
1622*44b781cfSAndrew Turner 		enable_irq(pdata->dev_irq);
1623*44b781cfSAndrew Turner 	}
1624*44b781cfSAndrew Turner 
1625*44b781cfSAndrew Turner 	DBGPR("<--xgbe_poll_controller\n");
1626*44b781cfSAndrew Turner }
1627*44b781cfSAndrew Turner #endif /* End CONFIG_NET_POLL_CONTROLLER */
1628*44b781cfSAndrew Turner 
1629*44b781cfSAndrew Turner static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
1630*44b781cfSAndrew Turner 			 struct tc_to_netdev *tc_to_netdev)
1631*44b781cfSAndrew Turner {
1632*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1633*44b781cfSAndrew Turner 	u8 tc;
1634*44b781cfSAndrew Turner 
1635*44b781cfSAndrew Turner 	if (tc_to_netdev->type != TC_SETUP_MQPRIO)
1636*44b781cfSAndrew Turner 		return -EINVAL;
1637*44b781cfSAndrew Turner 
1638*44b781cfSAndrew Turner 	tc = tc_to_netdev->tc;
1639*44b781cfSAndrew Turner 
1640*44b781cfSAndrew Turner 	if (tc > pdata->hw_feat.tc_cnt)
1641*44b781cfSAndrew Turner 		return -EINVAL;
1642*44b781cfSAndrew Turner 
1643*44b781cfSAndrew Turner 	pdata->num_tcs = tc;
1644*44b781cfSAndrew Turner 	pdata->hw_if.config_tc(pdata);
1645*44b781cfSAndrew Turner 
1646*44b781cfSAndrew Turner 	return 0;
1647*44b781cfSAndrew Turner }
1648*44b781cfSAndrew Turner 
1649*44b781cfSAndrew Turner static int xgbe_set_features(struct net_device *netdev,
1650*44b781cfSAndrew Turner 			     netdev_features_t features)
1651*44b781cfSAndrew Turner {
1652*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1653*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1654*44b781cfSAndrew Turner 	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1655*44b781cfSAndrew Turner 	int ret = 0;
1656*44b781cfSAndrew Turner 
1657*44b781cfSAndrew Turner 	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1658*44b781cfSAndrew Turner 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1659*44b781cfSAndrew Turner 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1660*44b781cfSAndrew Turner 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1661*44b781cfSAndrew Turner 
1662*44b781cfSAndrew Turner 	if ((features & NETIF_F_RXHASH) && !rxhash)
1663*44b781cfSAndrew Turner 		ret = hw_if->enable_rss(pdata);
1664*44b781cfSAndrew Turner 	else if (!(features & NETIF_F_RXHASH) && rxhash)
1665*44b781cfSAndrew Turner 		ret = hw_if->disable_rss(pdata);
1666*44b781cfSAndrew Turner 	if (ret)
1667*44b781cfSAndrew Turner 		return ret;
1668*44b781cfSAndrew Turner 
1669*44b781cfSAndrew Turner 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
1670*44b781cfSAndrew Turner 		hw_if->enable_rx_csum(pdata);
1671*44b781cfSAndrew Turner 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1672*44b781cfSAndrew Turner 		hw_if->disable_rx_csum(pdata);
1673*44b781cfSAndrew Turner 
1674*44b781cfSAndrew Turner 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1675*44b781cfSAndrew Turner 		hw_if->enable_rx_vlan_stripping(pdata);
1676*44b781cfSAndrew Turner 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1677*44b781cfSAndrew Turner 		hw_if->disable_rx_vlan_stripping(pdata);
1678*44b781cfSAndrew Turner 
1679*44b781cfSAndrew Turner 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1680*44b781cfSAndrew Turner 		hw_if->enable_rx_vlan_filtering(pdata);
1681*44b781cfSAndrew Turner 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1682*44b781cfSAndrew Turner 		hw_if->disable_rx_vlan_filtering(pdata);
1683*44b781cfSAndrew Turner 
1684*44b781cfSAndrew Turner 	pdata->netdev_features = features;
1685*44b781cfSAndrew Turner 
1686*44b781cfSAndrew Turner 	DBGPR("<--xgbe_set_features\n");
1687*44b781cfSAndrew Turner 
1688*44b781cfSAndrew Turner 	return 0;
1689*44b781cfSAndrew Turner }
1690*44b781cfSAndrew Turner 
1691*44b781cfSAndrew Turner static const struct net_device_ops xgbe_netdev_ops = {
1692*44b781cfSAndrew Turner 	.ndo_open		= xgbe_open,
1693*44b781cfSAndrew Turner 	.ndo_stop		= xgbe_close,
1694*44b781cfSAndrew Turner 	.ndo_start_xmit		= xgbe_xmit,
1695*44b781cfSAndrew Turner 	.ndo_set_rx_mode	= xgbe_set_rx_mode,
1696*44b781cfSAndrew Turner 	.ndo_set_mac_address	= xgbe_set_mac_address,
1697*44b781cfSAndrew Turner 	.ndo_validate_addr	= eth_validate_addr,
1698*44b781cfSAndrew Turner 	.ndo_do_ioctl		= xgbe_ioctl,
1699*44b781cfSAndrew Turner 	.ndo_change_mtu		= xgbe_change_mtu,
1700*44b781cfSAndrew Turner 	.ndo_tx_timeout		= xgbe_tx_timeout,
1701*44b781cfSAndrew Turner 	.ndo_get_stats64	= xgbe_get_stats64,
1702*44b781cfSAndrew Turner 	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
1703*44b781cfSAndrew Turner 	.ndo_vlan_rx_kill_vid	= xgbe_vlan_rx_kill_vid,
1704*44b781cfSAndrew Turner #ifdef CONFIG_NET_POLL_CONTROLLER
1705*44b781cfSAndrew Turner 	.ndo_poll_controller	= xgbe_poll_controller,
1706*44b781cfSAndrew Turner #endif
1707*44b781cfSAndrew Turner 	.ndo_setup_tc		= xgbe_setup_tc,
1708*44b781cfSAndrew Turner 	.ndo_set_features	= xgbe_set_features,
1709*44b781cfSAndrew Turner };
1710*44b781cfSAndrew Turner 
1711*44b781cfSAndrew Turner struct net_device_ops *xgbe_get_netdev_ops(void)
1712*44b781cfSAndrew Turner {
1713*44b781cfSAndrew Turner 	return (struct net_device_ops *)&xgbe_netdev_ops;
1714*44b781cfSAndrew Turner }
1715*44b781cfSAndrew Turner 
1716*44b781cfSAndrew Turner static void xgbe_rx_refresh(struct xgbe_channel *channel)
1717*44b781cfSAndrew Turner {
1718*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = channel->pdata;
1719*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1720*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1721*44b781cfSAndrew Turner 	struct xgbe_ring *ring = channel->rx_ring;
1722*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
1723*44b781cfSAndrew Turner 
1724*44b781cfSAndrew Turner 	while (ring->dirty != ring->cur) {
1725*44b781cfSAndrew Turner 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1726*44b781cfSAndrew Turner 
1727*44b781cfSAndrew Turner 		/* Reset rdata values */
1728*44b781cfSAndrew Turner 		desc_if->unmap_rdata(pdata, rdata);
1729*44b781cfSAndrew Turner 
1730*44b781cfSAndrew Turner 		if (desc_if->map_rx_buffer(pdata, ring, rdata))
1731*44b781cfSAndrew Turner 			break;
1732*44b781cfSAndrew Turner 
1733*44b781cfSAndrew Turner 		hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1734*44b781cfSAndrew Turner 
1735*44b781cfSAndrew Turner 		ring->dirty++;
1736*44b781cfSAndrew Turner 	}
1737*44b781cfSAndrew Turner 
1738*44b781cfSAndrew Turner 	/* Make sure everything is written before the register write */
1739*44b781cfSAndrew Turner 	wmb();
1740*44b781cfSAndrew Turner 
1741*44b781cfSAndrew Turner 	/* Update the Rx Tail Pointer Register with address of
1742*44b781cfSAndrew Turner 	 * the last cleaned entry */
1743*44b781cfSAndrew Turner 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1744*44b781cfSAndrew Turner 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1745*44b781cfSAndrew Turner 			  lower_32_bits(rdata->rdesc_dma));
1746*44b781cfSAndrew Turner }
1747*44b781cfSAndrew Turner 
1748*44b781cfSAndrew Turner static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1749*44b781cfSAndrew Turner 				       struct napi_struct *napi,
1750*44b781cfSAndrew Turner 				       struct xgbe_ring_data *rdata,
1751*44b781cfSAndrew Turner 				       unsigned int len)
1752*44b781cfSAndrew Turner {
1753*44b781cfSAndrew Turner 	struct sk_buff *skb;
1754*44b781cfSAndrew Turner 	u8 *packet;
1755*44b781cfSAndrew Turner 	unsigned int copy_len;
1756*44b781cfSAndrew Turner 
1757*44b781cfSAndrew Turner 	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1758*44b781cfSAndrew Turner 	if (!skb)
1759*44b781cfSAndrew Turner 		return NULL;
1760*44b781cfSAndrew Turner 
1761*44b781cfSAndrew Turner 	/* Start with the header buffer which may contain just the header
1762*44b781cfSAndrew Turner 	 * or the header plus data
1763*44b781cfSAndrew Turner 	 */
1764*44b781cfSAndrew Turner 	dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1765*44b781cfSAndrew Turner 				      rdata->rx.hdr.dma_off,
1766*44b781cfSAndrew Turner 				      rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1767*44b781cfSAndrew Turner 
1768*44b781cfSAndrew Turner 	packet = page_address(rdata->rx.hdr.pa.pages) +
1769*44b781cfSAndrew Turner 		 rdata->rx.hdr.pa.pages_offset;
1770*44b781cfSAndrew Turner 	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
1771*44b781cfSAndrew Turner 	copy_len = min(rdata->rx.hdr.dma_len, copy_len);
1772*44b781cfSAndrew Turner 	skb_copy_to_linear_data(skb, packet, copy_len);
1773*44b781cfSAndrew Turner 	skb_put(skb, copy_len);
1774*44b781cfSAndrew Turner 
1775*44b781cfSAndrew Turner 	len -= copy_len;
1776*44b781cfSAndrew Turner 	if (len) {
1777*44b781cfSAndrew Turner 		/* Add the remaining data as a frag */
1778*44b781cfSAndrew Turner 		dma_sync_single_range_for_cpu(pdata->dev,
1779*44b781cfSAndrew Turner 					      rdata->rx.buf.dma_base,
1780*44b781cfSAndrew Turner 					      rdata->rx.buf.dma_off,
1781*44b781cfSAndrew Turner 					      rdata->rx.buf.dma_len,
1782*44b781cfSAndrew Turner 					      DMA_FROM_DEVICE);
1783*44b781cfSAndrew Turner 
1784*44b781cfSAndrew Turner 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1785*44b781cfSAndrew Turner 				rdata->rx.buf.pa.pages,
1786*44b781cfSAndrew Turner 				rdata->rx.buf.pa.pages_offset,
1787*44b781cfSAndrew Turner 				len, rdata->rx.buf.dma_len);
1788*44b781cfSAndrew Turner 		rdata->rx.buf.pa.pages = NULL;
1789*44b781cfSAndrew Turner 	}
1790*44b781cfSAndrew Turner 
1791*44b781cfSAndrew Turner 	return skb;
1792*44b781cfSAndrew Turner }
1793*44b781cfSAndrew Turner 
1794*44b781cfSAndrew Turner static int xgbe_tx_poll(struct xgbe_channel *channel)
1795*44b781cfSAndrew Turner {
1796*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = channel->pdata;
1797*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1798*44b781cfSAndrew Turner 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1799*44b781cfSAndrew Turner 	struct xgbe_ring *ring = channel->tx_ring;
1800*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
1801*44b781cfSAndrew Turner 	struct xgbe_ring_desc *rdesc;
1802*44b781cfSAndrew Turner 	struct net_device *netdev = pdata->netdev;
1803*44b781cfSAndrew Turner 	struct netdev_queue *txq;
1804*44b781cfSAndrew Turner 	int processed = 0;
1805*44b781cfSAndrew Turner 	unsigned int tx_packets = 0, tx_bytes = 0;
1806*44b781cfSAndrew Turner 	unsigned int cur;
1807*44b781cfSAndrew Turner 
1808*44b781cfSAndrew Turner 	DBGPR("-->xgbe_tx_poll\n");
1809*44b781cfSAndrew Turner 
1810*44b781cfSAndrew Turner 	/* Nothing to do if there isn't a Tx ring for this channel */
1811*44b781cfSAndrew Turner 	if (!ring)
1812*44b781cfSAndrew Turner 		return 0;
1813*44b781cfSAndrew Turner 
1814*44b781cfSAndrew Turner 	cur = ring->cur;
1815*44b781cfSAndrew Turner 
1816*44b781cfSAndrew Turner 	/* Be sure we get ring->cur before accessing descriptor data */
1817*44b781cfSAndrew Turner 	smp_rmb();
1818*44b781cfSAndrew Turner 
1819*44b781cfSAndrew Turner 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1820*44b781cfSAndrew Turner 
1821*44b781cfSAndrew Turner 	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1822*44b781cfSAndrew Turner 	       (ring->dirty != cur)) {
1823*44b781cfSAndrew Turner 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1824*44b781cfSAndrew Turner 		rdesc = rdata->rdesc;
1825*44b781cfSAndrew Turner 
1826*44b781cfSAndrew Turner 		if (!hw_if->tx_complete(rdesc))
1827*44b781cfSAndrew Turner 			break;
1828*44b781cfSAndrew Turner 
1829*44b781cfSAndrew Turner 		/* Make sure descriptor fields are read after reading the OWN
1830*44b781cfSAndrew Turner 		 * bit */
1831*44b781cfSAndrew Turner 		dma_rmb();
1832*44b781cfSAndrew Turner 
1833*44b781cfSAndrew Turner 		if (netif_msg_tx_done(pdata))
1834*44b781cfSAndrew Turner 			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1835*44b781cfSAndrew Turner 
1836*44b781cfSAndrew Turner 		if (hw_if->is_last_desc(rdesc)) {
1837*44b781cfSAndrew Turner 			tx_packets += rdata->tx.packets;
1838*44b781cfSAndrew Turner 			tx_bytes += rdata->tx.bytes;
1839*44b781cfSAndrew Turner 		}
1840*44b781cfSAndrew Turner 
1841*44b781cfSAndrew Turner 		/* Free the SKB and reset the descriptor for re-use */
1842*44b781cfSAndrew Turner 		desc_if->unmap_rdata(pdata, rdata);
1843*44b781cfSAndrew Turner 		hw_if->tx_desc_reset(rdata);
1844*44b781cfSAndrew Turner 
1845*44b781cfSAndrew Turner 		processed++;
1846*44b781cfSAndrew Turner 		ring->dirty++;
1847*44b781cfSAndrew Turner 	}
1848*44b781cfSAndrew Turner 
1849*44b781cfSAndrew Turner 	if (!processed)
1850*44b781cfSAndrew Turner 		return 0;
1851*44b781cfSAndrew Turner 
1852*44b781cfSAndrew Turner 	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1853*44b781cfSAndrew Turner 
1854*44b781cfSAndrew Turner 	if ((ring->tx.queue_stopped == 1) &&
1855*44b781cfSAndrew Turner 	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1856*44b781cfSAndrew Turner 		ring->tx.queue_stopped = 0;
1857*44b781cfSAndrew Turner 		netif_tx_wake_queue(txq);
1858*44b781cfSAndrew Turner 	}
1859*44b781cfSAndrew Turner 
1860*44b781cfSAndrew Turner 	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1861*44b781cfSAndrew Turner 
1862*44b781cfSAndrew Turner 	return processed;
1863*44b781cfSAndrew Turner }
1864*44b781cfSAndrew Turner 
1865*44b781cfSAndrew Turner static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1866*44b781cfSAndrew Turner {
1867*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = channel->pdata;
1868*44b781cfSAndrew Turner 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1869*44b781cfSAndrew Turner 	struct xgbe_ring *ring = channel->rx_ring;
1870*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
1871*44b781cfSAndrew Turner 	struct xgbe_packet_data *packet;
1872*44b781cfSAndrew Turner 	struct net_device *netdev = pdata->netdev;
1873*44b781cfSAndrew Turner 	struct napi_struct *napi;
1874*44b781cfSAndrew Turner 	struct sk_buff *skb;
1875*44b781cfSAndrew Turner 	struct skb_shared_hwtstamps *hwtstamps;
1876*44b781cfSAndrew Turner 	unsigned int incomplete, error, context_next, context;
1877*44b781cfSAndrew Turner 	unsigned int len, rdesc_len, max_len;
1878*44b781cfSAndrew Turner 	unsigned int received = 0;
1879*44b781cfSAndrew Turner 	int packet_count = 0;
1880*44b781cfSAndrew Turner 
1881*44b781cfSAndrew Turner 	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1882*44b781cfSAndrew Turner 
1883*44b781cfSAndrew Turner 	/* Nothing to do if there isn't a Rx ring for this channel */
1884*44b781cfSAndrew Turner 	if (!ring)
1885*44b781cfSAndrew Turner 		return 0;
1886*44b781cfSAndrew Turner 
1887*44b781cfSAndrew Turner 	incomplete = 0;
1888*44b781cfSAndrew Turner 	context_next = 0;
1889*44b781cfSAndrew Turner 
1890*44b781cfSAndrew Turner 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1891*44b781cfSAndrew Turner 
1892*44b781cfSAndrew Turner 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1893*44b781cfSAndrew Turner 	packet = &ring->packet_data;
1894*44b781cfSAndrew Turner 	while (packet_count < budget) {
1895*44b781cfSAndrew Turner 		DBGPR("  cur = %d\n", ring->cur);
1896*44b781cfSAndrew Turner 
1897*44b781cfSAndrew Turner 		/* First time in loop see if we need to restore state */
1898*44b781cfSAndrew Turner 		if (!received && rdata->state_saved) {
1899*44b781cfSAndrew Turner 			skb = rdata->state.skb;
1900*44b781cfSAndrew Turner 			error = rdata->state.error;
1901*44b781cfSAndrew Turner 			len = rdata->state.len;
1902*44b781cfSAndrew Turner 		} else {
1903*44b781cfSAndrew Turner 			memset(packet, 0, sizeof(*packet));
1904*44b781cfSAndrew Turner 			skb = NULL;
1905*44b781cfSAndrew Turner 			error = 0;
1906*44b781cfSAndrew Turner 			len = 0;
1907*44b781cfSAndrew Turner 		}
1908*44b781cfSAndrew Turner 
1909*44b781cfSAndrew Turner read_again:
1910*44b781cfSAndrew Turner 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1911*44b781cfSAndrew Turner 
1912*44b781cfSAndrew Turner 		if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1913*44b781cfSAndrew Turner 			xgbe_rx_refresh(channel);
1914*44b781cfSAndrew Turner 
1915*44b781cfSAndrew Turner 		if (hw_if->dev_read(channel))
1916*44b781cfSAndrew Turner 			break;
1917*44b781cfSAndrew Turner 
1918*44b781cfSAndrew Turner 		received++;
1919*44b781cfSAndrew Turner 		ring->cur++;
1920*44b781cfSAndrew Turner 
1921*44b781cfSAndrew Turner 		incomplete = XGMAC_GET_BITS(packet->attributes,
1922*44b781cfSAndrew Turner 					    RX_PACKET_ATTRIBUTES,
1923*44b781cfSAndrew Turner 					    INCOMPLETE);
1924*44b781cfSAndrew Turner 		context_next = XGMAC_GET_BITS(packet->attributes,
1925*44b781cfSAndrew Turner 					      RX_PACKET_ATTRIBUTES,
1926*44b781cfSAndrew Turner 					      CONTEXT_NEXT);
1927*44b781cfSAndrew Turner 		context = XGMAC_GET_BITS(packet->attributes,
1928*44b781cfSAndrew Turner 					 RX_PACKET_ATTRIBUTES,
1929*44b781cfSAndrew Turner 					 CONTEXT);
1930*44b781cfSAndrew Turner 
1931*44b781cfSAndrew Turner 		/* Earlier error, just drain the remaining data */
1932*44b781cfSAndrew Turner 		if ((incomplete || context_next) && error)
1933*44b781cfSAndrew Turner 			goto read_again;
1934*44b781cfSAndrew Turner 
1935*44b781cfSAndrew Turner 		if (error || packet->errors) {
1936*44b781cfSAndrew Turner 			if (packet->errors)
1937*44b781cfSAndrew Turner 				netif_err(pdata, rx_err, netdev,
1938*44b781cfSAndrew Turner 					  "error in received packet\n");
1939*44b781cfSAndrew Turner 			dev_kfree_skb(skb);
1940*44b781cfSAndrew Turner 			goto next_packet;
1941*44b781cfSAndrew Turner 		}
1942*44b781cfSAndrew Turner 
1943*44b781cfSAndrew Turner 		if (!context) {
1944*44b781cfSAndrew Turner 			/* Length is cumulative, get this descriptor's length */
1945*44b781cfSAndrew Turner 			rdesc_len = rdata->rx.len - len;
1946*44b781cfSAndrew Turner 			len += rdesc_len;
1947*44b781cfSAndrew Turner 
1948*44b781cfSAndrew Turner 			if (rdesc_len && !skb) {
1949*44b781cfSAndrew Turner 				skb = xgbe_create_skb(pdata, napi, rdata,
1950*44b781cfSAndrew Turner 						      rdesc_len);
1951*44b781cfSAndrew Turner 				if (!skb)
1952*44b781cfSAndrew Turner 					error = 1;
1953*44b781cfSAndrew Turner 			} else if (rdesc_len) {
1954*44b781cfSAndrew Turner 				dma_sync_single_range_for_cpu(pdata->dev,
1955*44b781cfSAndrew Turner 							rdata->rx.buf.dma_base,
1956*44b781cfSAndrew Turner 							rdata->rx.buf.dma_off,
1957*44b781cfSAndrew Turner 							rdata->rx.buf.dma_len,
1958*44b781cfSAndrew Turner 							DMA_FROM_DEVICE);
1959*44b781cfSAndrew Turner 
1960*44b781cfSAndrew Turner 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1961*44b781cfSAndrew Turner 						rdata->rx.buf.pa.pages,
1962*44b781cfSAndrew Turner 						rdata->rx.buf.pa.pages_offset,
1963*44b781cfSAndrew Turner 						rdesc_len,
1964*44b781cfSAndrew Turner 						rdata->rx.buf.dma_len);
1965*44b781cfSAndrew Turner 				rdata->rx.buf.pa.pages = NULL;
1966*44b781cfSAndrew Turner 			}
1967*44b781cfSAndrew Turner 		}
1968*44b781cfSAndrew Turner 
1969*44b781cfSAndrew Turner 		if (incomplete || context_next)
1970*44b781cfSAndrew Turner 			goto read_again;
1971*44b781cfSAndrew Turner 
1972*44b781cfSAndrew Turner 		if (!skb)
1973*44b781cfSAndrew Turner 			goto next_packet;
1974*44b781cfSAndrew Turner 
1975*44b781cfSAndrew Turner 		/* Be sure we don't exceed the configured MTU */
1976*44b781cfSAndrew Turner 		max_len = netdev->mtu + ETH_HLEN;
1977*44b781cfSAndrew Turner 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1978*44b781cfSAndrew Turner 		    (skb->protocol == htons(ETH_P_8021Q)))
1979*44b781cfSAndrew Turner 			max_len += VLAN_HLEN;
1980*44b781cfSAndrew Turner 
1981*44b781cfSAndrew Turner 		if (skb->len > max_len) {
1982*44b781cfSAndrew Turner 			netif_err(pdata, rx_err, netdev,
1983*44b781cfSAndrew Turner 				  "packet length exceeds configured MTU\n");
1984*44b781cfSAndrew Turner 			dev_kfree_skb(skb);
1985*44b781cfSAndrew Turner 			goto next_packet;
1986*44b781cfSAndrew Turner 		}
1987*44b781cfSAndrew Turner 
1988*44b781cfSAndrew Turner 		if (netif_msg_pktdata(pdata))
1989*44b781cfSAndrew Turner 			xgbe_print_pkt(netdev, skb, false);
1990*44b781cfSAndrew Turner 
1991*44b781cfSAndrew Turner 		skb_checksum_none_assert(skb);
1992*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(packet->attributes,
1993*44b781cfSAndrew Turner 				   RX_PACKET_ATTRIBUTES, CSUM_DONE))
1994*44b781cfSAndrew Turner 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1995*44b781cfSAndrew Turner 
1996*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(packet->attributes,
1997*44b781cfSAndrew Turner 				   RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1998*44b781cfSAndrew Turner 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1999*44b781cfSAndrew Turner 					       packet->vlan_ctag);
2000*44b781cfSAndrew Turner 
2001*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(packet->attributes,
2002*44b781cfSAndrew Turner 				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2003*44b781cfSAndrew Turner 			u64 nsec;
2004*44b781cfSAndrew Turner 
2005*44b781cfSAndrew Turner 			nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2006*44b781cfSAndrew Turner 						    packet->rx_tstamp);
2007*44b781cfSAndrew Turner 			hwtstamps = skb_hwtstamps(skb);
2008*44b781cfSAndrew Turner 			hwtstamps->hwtstamp = ns_to_ktime(nsec);
2009*44b781cfSAndrew Turner 		}
2010*44b781cfSAndrew Turner 
2011*44b781cfSAndrew Turner 		if (XGMAC_GET_BITS(packet->attributes,
2012*44b781cfSAndrew Turner 				   RX_PACKET_ATTRIBUTES, RSS_HASH))
2013*44b781cfSAndrew Turner 			skb_set_hash(skb, packet->rss_hash,
2014*44b781cfSAndrew Turner 				     packet->rss_hash_type);
2015*44b781cfSAndrew Turner 
2016*44b781cfSAndrew Turner 		skb->dev = netdev;
2017*44b781cfSAndrew Turner 		skb->protocol = eth_type_trans(skb, netdev);
2018*44b781cfSAndrew Turner 		skb_record_rx_queue(skb, channel->queue_index);
2019*44b781cfSAndrew Turner 
2020*44b781cfSAndrew Turner 		napi_gro_receive(napi, skb);
2021*44b781cfSAndrew Turner 
2022*44b781cfSAndrew Turner next_packet:
2023*44b781cfSAndrew Turner 		packet_count++;
2024*44b781cfSAndrew Turner 	}
2025*44b781cfSAndrew Turner 
2026*44b781cfSAndrew Turner 	/* Check if we need to save state before leaving */
2027*44b781cfSAndrew Turner 	if (received && (incomplete || context_next)) {
2028*44b781cfSAndrew Turner 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2029*44b781cfSAndrew Turner 		rdata->state_saved = 1;
2030*44b781cfSAndrew Turner 		rdata->state.skb = skb;
2031*44b781cfSAndrew Turner 		rdata->state.len = len;
2032*44b781cfSAndrew Turner 		rdata->state.error = error;
2033*44b781cfSAndrew Turner 	}
2034*44b781cfSAndrew Turner 
2035*44b781cfSAndrew Turner 	DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2036*44b781cfSAndrew Turner 
2037*44b781cfSAndrew Turner 	return packet_count;
2038*44b781cfSAndrew Turner }
2039*44b781cfSAndrew Turner 
2040*44b781cfSAndrew Turner static int xgbe_one_poll(struct napi_struct *napi, int budget)
2041*44b781cfSAndrew Turner {
2042*44b781cfSAndrew Turner 	struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2043*44b781cfSAndrew Turner 						    napi);
2044*44b781cfSAndrew Turner 	int processed = 0;
2045*44b781cfSAndrew Turner 
2046*44b781cfSAndrew Turner 	DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2047*44b781cfSAndrew Turner 
2048*44b781cfSAndrew Turner 	/* Cleanup Tx ring first */
2049*44b781cfSAndrew Turner 	xgbe_tx_poll(channel);
2050*44b781cfSAndrew Turner 
2051*44b781cfSAndrew Turner 	/* Process Rx ring next */
2052*44b781cfSAndrew Turner 	processed = xgbe_rx_poll(channel, budget);
2053*44b781cfSAndrew Turner 
2054*44b781cfSAndrew Turner 	/* If we processed everything, we are done */
2055*44b781cfSAndrew Turner 	if (processed < budget) {
2056*44b781cfSAndrew Turner 		/* Turn off polling */
2057*44b781cfSAndrew Turner 		napi_complete_done(napi, processed);
2058*44b781cfSAndrew Turner 
2059*44b781cfSAndrew Turner 		/* Enable Tx and Rx interrupts */
2060*44b781cfSAndrew Turner 		enable_irq(channel->dma_irq);
2061*44b781cfSAndrew Turner 	}
2062*44b781cfSAndrew Turner 
2063*44b781cfSAndrew Turner 	DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2064*44b781cfSAndrew Turner 
2065*44b781cfSAndrew Turner 	return processed;
2066*44b781cfSAndrew Turner }
2067*44b781cfSAndrew Turner 
2068*44b781cfSAndrew Turner static int xgbe_all_poll(struct napi_struct *napi, int budget)
2069*44b781cfSAndrew Turner {
2070*44b781cfSAndrew Turner 	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2071*44b781cfSAndrew Turner 						   napi);
2072*44b781cfSAndrew Turner 	struct xgbe_channel *channel;
2073*44b781cfSAndrew Turner 	int ring_budget;
2074*44b781cfSAndrew Turner 	int processed, last_processed;
2075*44b781cfSAndrew Turner 	unsigned int i;
2076*44b781cfSAndrew Turner 
2077*44b781cfSAndrew Turner 	DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2078*44b781cfSAndrew Turner 
2079*44b781cfSAndrew Turner 	processed = 0;
2080*44b781cfSAndrew Turner 	ring_budget = budget / pdata->rx_ring_count;
2081*44b781cfSAndrew Turner 	do {
2082*44b781cfSAndrew Turner 		last_processed = processed;
2083*44b781cfSAndrew Turner 
2084*44b781cfSAndrew Turner 		channel = pdata->channel;
2085*44b781cfSAndrew Turner 		for (i = 0; i < pdata->channel_count; i++, channel++) {
2086*44b781cfSAndrew Turner 			/* Cleanup Tx ring first */
2087*44b781cfSAndrew Turner 			xgbe_tx_poll(channel);
2088*44b781cfSAndrew Turner 
2089*44b781cfSAndrew Turner 			/* Process Rx ring next */
2090*44b781cfSAndrew Turner 			if (ring_budget > (budget - processed))
2091*44b781cfSAndrew Turner 				ring_budget = budget - processed;
2092*44b781cfSAndrew Turner 			processed += xgbe_rx_poll(channel, ring_budget);
2093*44b781cfSAndrew Turner 		}
2094*44b781cfSAndrew Turner 	} while ((processed < budget) && (processed != last_processed));
2095*44b781cfSAndrew Turner 
2096*44b781cfSAndrew Turner 	/* If we processed everything, we are done */
2097*44b781cfSAndrew Turner 	if (processed < budget) {
2098*44b781cfSAndrew Turner 		/* Turn off polling */
2099*44b781cfSAndrew Turner 		napi_complete_done(napi, processed);
2100*44b781cfSAndrew Turner 
2101*44b781cfSAndrew Turner 		/* Enable Tx and Rx interrupts */
2102*44b781cfSAndrew Turner 		xgbe_enable_rx_tx_ints(pdata);
2103*44b781cfSAndrew Turner 	}
2104*44b781cfSAndrew Turner 
2105*44b781cfSAndrew Turner 	DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2106*44b781cfSAndrew Turner 
2107*44b781cfSAndrew Turner 	return processed;
2108*44b781cfSAndrew Turner }
2109*44b781cfSAndrew Turner 
2110*44b781cfSAndrew Turner void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2111*44b781cfSAndrew Turner 		       unsigned int idx, unsigned int count, unsigned int flag)
2112*44b781cfSAndrew Turner {
2113*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
2114*44b781cfSAndrew Turner 	struct xgbe_ring_desc *rdesc;
2115*44b781cfSAndrew Turner 
2116*44b781cfSAndrew Turner 	while (count--) {
2117*44b781cfSAndrew Turner 		rdata = XGBE_GET_DESC_DATA(ring, idx);
2118*44b781cfSAndrew Turner 		rdesc = rdata->rdesc;
2119*44b781cfSAndrew Turner 		netdev_dbg(pdata->netdev,
2120*44b781cfSAndrew Turner 			   "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2121*44b781cfSAndrew Turner 			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2122*44b781cfSAndrew Turner 			   le32_to_cpu(rdesc->desc0),
2123*44b781cfSAndrew Turner 			   le32_to_cpu(rdesc->desc1),
2124*44b781cfSAndrew Turner 			   le32_to_cpu(rdesc->desc2),
2125*44b781cfSAndrew Turner 			   le32_to_cpu(rdesc->desc3));
2126*44b781cfSAndrew Turner 		idx++;
2127*44b781cfSAndrew Turner 	}
2128*44b781cfSAndrew Turner }
2129*44b781cfSAndrew Turner 
2130*44b781cfSAndrew Turner void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2131*44b781cfSAndrew Turner 		       unsigned int idx)
2132*44b781cfSAndrew Turner {
2133*44b781cfSAndrew Turner 	struct xgbe_ring_data *rdata;
2134*44b781cfSAndrew Turner 	struct xgbe_ring_desc *rdesc;
2135*44b781cfSAndrew Turner 
2136*44b781cfSAndrew Turner 	rdata = XGBE_GET_DESC_DATA(ring, idx);
2137*44b781cfSAndrew Turner 	rdesc = rdata->rdesc;
2138*44b781cfSAndrew Turner 	netdev_dbg(pdata->netdev,
2139*44b781cfSAndrew Turner 		   "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2140*44b781cfSAndrew Turner 		   idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2141*44b781cfSAndrew Turner 		   le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2142*44b781cfSAndrew Turner }
2143*44b781cfSAndrew Turner 
2144*44b781cfSAndrew Turner void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2145*44b781cfSAndrew Turner {
2146*44b781cfSAndrew Turner 	struct ethhdr *eth = (struct ethhdr *)skb->data;
2147*44b781cfSAndrew Turner 	unsigned char *buf = skb->data;
2148*44b781cfSAndrew Turner 	unsigned char buffer[128];
2149*44b781cfSAndrew Turner 	unsigned int i, j;
2150*44b781cfSAndrew Turner 
2151*44b781cfSAndrew Turner 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2152*44b781cfSAndrew Turner 
2153*44b781cfSAndrew Turner 	netdev_dbg(netdev, "%s packet of %d bytes\n",
2154*44b781cfSAndrew Turner 		   (tx_rx ? "TX" : "RX"), skb->len);
2155*44b781cfSAndrew Turner 
2156*44b781cfSAndrew Turner 	netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2157*44b781cfSAndrew Turner 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2158*44b781cfSAndrew Turner 	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2159*44b781cfSAndrew Turner 
2160*44b781cfSAndrew Turner 	for (i = 0, j = 0; i < skb->len;) {
2161*44b781cfSAndrew Turner 		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2162*44b781cfSAndrew Turner 			      buf[i++]);
2163*44b781cfSAndrew Turner 
2164*44b781cfSAndrew Turner 		if ((i % 32) == 0) {
2165*44b781cfSAndrew Turner 			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
2166*44b781cfSAndrew Turner 			j = 0;
2167*44b781cfSAndrew Turner 		} else if ((i % 16) == 0) {
2168*44b781cfSAndrew Turner 			buffer[j++] = ' ';
2169*44b781cfSAndrew Turner 			buffer[j++] = ' ';
2170*44b781cfSAndrew Turner 		} else if ((i % 4) == 0) {
2171*44b781cfSAndrew Turner 			buffer[j++] = ' ';
2172*44b781cfSAndrew Turner 		}
2173*44b781cfSAndrew Turner 	}
2174*44b781cfSAndrew Turner 	if (i % 32)
2175*44b781cfSAndrew Turner 		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
2176*44b781cfSAndrew Turner 
2177*44b781cfSAndrew Turner 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2178*44b781cfSAndrew Turner }
2179