xref: /linux/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c (revision 65e0ace2c5cdd7aa898fea17d6e7bdc909394bf9)
1 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2  *
3  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under  the terms of  the GNU General  Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  *
10  * This Synopsys DWC XLGMAC software driver and associated documentation
11  * (hereinafter the "Software") is an unsupported proprietary work of
12  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
13  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
14  * Licensed Product under any End User Software License Agreement or
15  * Agreement for Licensed Products with Synopsys or any supplement thereto.
16  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
17  * in the SOFTWARE may be the trademarks of their respective owners.
18  */
19 
20 #include <linux/netdevice.h>
21 #include <linux/tcp.h>
22 
23 #include "dwc-xlgmac.h"
24 #include "dwc-xlgmac-reg.h"
25 
26 static int xlgmac_one_poll(struct napi_struct *, int);
27 static int xlgmac_all_poll(struct napi_struct *, int);
28 
29 static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
30 {
31 	return (ring->dma_desc_count - (ring->cur - ring->dirty));
32 }
33 
34 static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
35 {
36 	return (ring->cur - ring->dirty);
37 }
38 
39 static int xlgmac_maybe_stop_tx_queue(
40 			struct xlgmac_channel *channel,
41 			struct xlgmac_ring *ring,
42 			unsigned int count)
43 {
44 	struct xlgmac_pdata *pdata = channel->pdata;
45 
46 	if (count > xlgmac_tx_avail_desc(ring)) {
47 		netif_info(pdata, drv, pdata->netdev,
48 			   "Tx queue stopped, not enough descriptors available\n");
49 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
50 		ring->tx.queue_stopped = 1;
51 
52 		/* If we haven't notified the hardware because of xmit_more
53 		 * support, tell it now
54 		 */
55 		if (ring->tx.xmit_more)
56 			pdata->hw_ops.tx_start_xmit(channel, ring);
57 
58 		return NETDEV_TX_BUSY;
59 	}
60 
61 	return 0;
62 }
63 
64 static void xlgmac_prep_vlan(struct sk_buff *skb,
65 			     struct xlgmac_pkt_info *pkt_info)
66 {
67 	if (skb_vlan_tag_present(skb))
68 		pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
69 }
70 
71 static int xlgmac_prep_tso(struct sk_buff *skb,
72 			   struct xlgmac_pkt_info *pkt_info)
73 {
74 	int ret;
75 
76 	if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
77 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
78 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
79 		return 0;
80 
81 	ret = skb_cow_head(skb, 0);
82 	if (ret)
83 		return ret;
84 
85 	pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
86 	pkt_info->tcp_header_len = tcp_hdrlen(skb);
87 	pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
88 	pkt_info->mss = skb_shinfo(skb)->gso_size;
89 
90 	XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
91 	XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
92 		  pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
93 	XLGMAC_PR("mss=%u\n", pkt_info->mss);
94 
95 	/* Update the number of packets that will ultimately be transmitted
96 	 * along with the extra bytes for each extra packet
97 	 */
98 	pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
99 	pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
100 
101 	return 0;
102 }
103 
104 static int xlgmac_is_tso(struct sk_buff *skb)
105 {
106 	if (skb->ip_summed != CHECKSUM_PARTIAL)
107 		return 0;
108 
109 	if (!skb_is_gso(skb))
110 		return 0;
111 
112 	return 1;
113 }
114 
115 static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
116 			       struct xlgmac_ring *ring,
117 			       struct sk_buff *skb,
118 			       struct xlgmac_pkt_info *pkt_info)
119 {
120 	struct skb_frag_struct *frag;
121 	unsigned int context_desc;
122 	unsigned int len;
123 	unsigned int i;
124 
125 	pkt_info->skb = skb;
126 
127 	context_desc = 0;
128 	pkt_info->desc_count = 0;
129 
130 	pkt_info->tx_packets = 1;
131 	pkt_info->tx_bytes = skb->len;
132 
133 	if (xlgmac_is_tso(skb)) {
134 		/* TSO requires an extra descriptor if mss is different */
135 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
136 			context_desc = 1;
137 			pkt_info->desc_count++;
138 		}
139 
140 		/* TSO requires an extra descriptor for TSO header */
141 		pkt_info->desc_count++;
142 
143 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
144 					pkt_info->attributes,
145 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
146 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
147 					1);
148 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
149 					pkt_info->attributes,
150 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
151 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
152 					1);
153 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
154 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
155 					pkt_info->attributes,
156 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
157 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
158 					1);
159 
160 	if (skb_vlan_tag_present(skb)) {
161 		/* VLAN requires an extra descriptor if tag is different */
162 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
163 			/* We can share with the TSO context descriptor */
164 			if (!context_desc) {
165 				context_desc = 1;
166 				pkt_info->desc_count++;
167 			}
168 
169 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
170 					pkt_info->attributes,
171 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
172 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
173 					1);
174 	}
175 
176 	for (len = skb_headlen(skb); len;) {
177 		pkt_info->desc_count++;
178 		len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
179 	}
180 
181 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
182 		frag = &skb_shinfo(skb)->frags[i];
183 		for (len = skb_frag_size(frag); len; ) {
184 			pkt_info->desc_count++;
185 			len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
186 		}
187 	}
188 }
189 
190 static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
191 {
192 	unsigned int rx_buf_size;
193 
194 	if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
195 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
196 		return -EINVAL;
197 	}
198 
199 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
200 	rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
201 
202 	rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
203 		      ~(XLGMAC_RX_BUF_ALIGN - 1);
204 
205 	return rx_buf_size;
206 }
207 
208 static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
209 {
210 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
211 	struct xlgmac_channel *channel;
212 	enum xlgmac_int int_id;
213 	unsigned int i;
214 
215 	channel = pdata->channel_head;
216 	for (i = 0; i < pdata->channel_count; i++, channel++) {
217 		if (channel->tx_ring && channel->rx_ring)
218 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
219 		else if (channel->tx_ring)
220 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
221 		else if (channel->rx_ring)
222 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
223 		else
224 			continue;
225 
226 		hw_ops->enable_int(channel, int_id);
227 	}
228 }
229 
230 static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
231 {
232 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
233 	struct xlgmac_channel *channel;
234 	enum xlgmac_int int_id;
235 	unsigned int i;
236 
237 	channel = pdata->channel_head;
238 	for (i = 0; i < pdata->channel_count; i++, channel++) {
239 		if (channel->tx_ring && channel->rx_ring)
240 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
241 		else if (channel->tx_ring)
242 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
243 		else if (channel->rx_ring)
244 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
245 		else
246 			continue;
247 
248 		hw_ops->disable_int(channel, int_id);
249 	}
250 }
251 
252 static irqreturn_t xlgmac_isr(int irq, void *data)
253 {
254 	unsigned int dma_isr, dma_ch_isr, mac_isr;
255 	struct xlgmac_pdata *pdata = data;
256 	struct xlgmac_channel *channel;
257 	struct xlgmac_hw_ops *hw_ops;
258 	unsigned int i, ti, ri;
259 
260 	hw_ops = &pdata->hw_ops;
261 
262 	/* The DMA interrupt status register also reports MAC and MTL
263 	 * interrupts. So for polling mode, we just need to check for
264 	 * this register to be non-zero
265 	 */
266 	dma_isr = readl(pdata->mac_regs + DMA_ISR);
267 	if (!dma_isr)
268 		return IRQ_HANDLED;
269 
270 	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
271 
272 	for (i = 0; i < pdata->channel_count; i++) {
273 		if (!(dma_isr & (1 << i)))
274 			continue;
275 
276 		channel = pdata->channel_head + i;
277 
278 		dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
279 		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
280 			  i, dma_ch_isr);
281 
282 		/* The TI or RI interrupt bits may still be set even if using
283 		 * per channel DMA interrupts. Check to be sure those are not
284 		 * enabled before using the private data napi structure.
285 		 */
286 		ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
287 					 DMA_CH_SR_TI_LEN);
288 		ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
289 					 DMA_CH_SR_RI_LEN);
290 		if (!pdata->per_channel_irq && (ti || ri)) {
291 			if (napi_schedule_prep(&pdata->napi)) {
292 				/* Disable Tx and Rx interrupts */
293 				xlgmac_disable_rx_tx_ints(pdata);
294 
295 				/* Turn on polling */
296 				__napi_schedule_irqoff(&pdata->napi);
297 			}
298 		}
299 
300 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
301 					DMA_CH_SR_RBU_LEN))
302 			pdata->stats.rx_buffer_unavailable++;
303 
304 		/* Restart the device on a Fatal Bus Error */
305 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
306 					DMA_CH_SR_FBE_LEN))
307 			schedule_work(&pdata->restart_work);
308 
309 		/* Clear all interrupt signals */
310 		writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
311 	}
312 
313 	if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
314 				DMA_ISR_MACIS_LEN)) {
315 		mac_isr = readl(pdata->mac_regs + MAC_ISR);
316 
317 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
318 					MAC_ISR_MMCTXIS_LEN))
319 			hw_ops->tx_mmc_int(pdata);
320 
321 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
322 					MAC_ISR_MMCRXIS_LEN))
323 			hw_ops->rx_mmc_int(pdata);
324 	}
325 
326 	return IRQ_HANDLED;
327 }
328 
329 static irqreturn_t xlgmac_dma_isr(int irq, void *data)
330 {
331 	struct xlgmac_channel *channel = data;
332 
333 	/* Per channel DMA interrupts are enabled, so we use the per
334 	 * channel napi structure and not the private data napi structure
335 	 */
336 	if (napi_schedule_prep(&channel->napi)) {
337 		/* Disable Tx and Rx interrupts */
338 		disable_irq_nosync(channel->dma_irq);
339 
340 		/* Turn on polling */
341 		__napi_schedule_irqoff(&channel->napi);
342 	}
343 
344 	return IRQ_HANDLED;
345 }
346 
347 static void xlgmac_tx_timer(unsigned long data)
348 {
349 	struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
350 	struct xlgmac_pdata *pdata = channel->pdata;
351 	struct napi_struct *napi;
352 
353 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
354 
355 	if (napi_schedule_prep(napi)) {
356 		/* Disable Tx and Rx interrupts */
357 		if (pdata->per_channel_irq)
358 			disable_irq_nosync(channel->dma_irq);
359 		else
360 			xlgmac_disable_rx_tx_ints(pdata);
361 
362 		/* Turn on polling */
363 		__napi_schedule(napi);
364 	}
365 
366 	channel->tx_timer_active = 0;
367 }
368 
369 static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
370 {
371 	struct xlgmac_channel *channel;
372 	unsigned int i;
373 
374 	channel = pdata->channel_head;
375 	for (i = 0; i < pdata->channel_count; i++, channel++) {
376 		if (!channel->tx_ring)
377 			break;
378 
379 		setup_timer(&channel->tx_timer, xlgmac_tx_timer,
380 			    (unsigned long)channel);
381 	}
382 }
383 
384 static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
385 {
386 	struct xlgmac_channel *channel;
387 	unsigned int i;
388 
389 	channel = pdata->channel_head;
390 	for (i = 0; i < pdata->channel_count; i++, channel++) {
391 		if (!channel->tx_ring)
392 			break;
393 
394 		del_timer_sync(&channel->tx_timer);
395 	}
396 }
397 
398 static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
399 {
400 	struct xlgmac_channel *channel;
401 	unsigned int i;
402 
403 	if (pdata->per_channel_irq) {
404 		channel = pdata->channel_head;
405 		for (i = 0; i < pdata->channel_count; i++, channel++) {
406 			if (add)
407 				netif_napi_add(pdata->netdev, &channel->napi,
408 					       xlgmac_one_poll,
409 					       NAPI_POLL_WEIGHT);
410 
411 			napi_enable(&channel->napi);
412 		}
413 	} else {
414 		if (add)
415 			netif_napi_add(pdata->netdev, &pdata->napi,
416 				       xlgmac_all_poll, NAPI_POLL_WEIGHT);
417 
418 		napi_enable(&pdata->napi);
419 	}
420 }
421 
422 static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
423 {
424 	struct xlgmac_channel *channel;
425 	unsigned int i;
426 
427 	if (pdata->per_channel_irq) {
428 		channel = pdata->channel_head;
429 		for (i = 0; i < pdata->channel_count; i++, channel++) {
430 			napi_disable(&channel->napi);
431 
432 			if (del)
433 				netif_napi_del(&channel->napi);
434 		}
435 	} else {
436 		napi_disable(&pdata->napi);
437 
438 		if (del)
439 			netif_napi_del(&pdata->napi);
440 	}
441 }
442 
443 static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
444 {
445 	struct net_device *netdev = pdata->netdev;
446 	struct xlgmac_channel *channel;
447 	unsigned int i;
448 	int ret;
449 
450 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
451 			       IRQF_SHARED, netdev->name, pdata);
452 	if (ret) {
453 		netdev_alert(netdev, "error requesting irq %d\n",
454 			     pdata->dev_irq);
455 		return ret;
456 	}
457 
458 	if (!pdata->per_channel_irq)
459 		return 0;
460 
461 	channel = pdata->channel_head;
462 	for (i = 0; i < pdata->channel_count; i++, channel++) {
463 		snprintf(channel->dma_irq_name,
464 			 sizeof(channel->dma_irq_name) - 1,
465 			 "%s-TxRx-%u", netdev_name(netdev),
466 			 channel->queue_index);
467 
468 		ret = devm_request_irq(pdata->dev, channel->dma_irq,
469 				       xlgmac_dma_isr, 0,
470 				       channel->dma_irq_name, channel);
471 		if (ret) {
472 			netdev_alert(netdev, "error requesting irq %d\n",
473 				     channel->dma_irq);
474 			goto err_irq;
475 		}
476 	}
477 
478 	return 0;
479 
480 err_irq:
481 	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
482 	for (i--, channel--; i < pdata->channel_count; i--, channel--)
483 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
484 
485 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
486 
487 	return ret;
488 }
489 
490 static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
491 {
492 	struct xlgmac_channel *channel;
493 	unsigned int i;
494 
495 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
496 
497 	if (!pdata->per_channel_irq)
498 		return;
499 
500 	channel = pdata->channel_head;
501 	for (i = 0; i < pdata->channel_count; i++, channel++)
502 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
503 }
504 
505 static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
506 {
507 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
508 	struct xlgmac_desc_data *desc_data;
509 	struct xlgmac_channel *channel;
510 	struct xlgmac_ring *ring;
511 	unsigned int i, j;
512 
513 	channel = pdata->channel_head;
514 	for (i = 0; i < pdata->channel_count; i++, channel++) {
515 		ring = channel->tx_ring;
516 		if (!ring)
517 			break;
518 
519 		for (j = 0; j < ring->dma_desc_count; j++) {
520 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
521 			desc_ops->unmap_desc_data(pdata, desc_data);
522 		}
523 	}
524 }
525 
526 static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
527 {
528 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
529 	struct xlgmac_desc_data *desc_data;
530 	struct xlgmac_channel *channel;
531 	struct xlgmac_ring *ring;
532 	unsigned int i, j;
533 
534 	channel = pdata->channel_head;
535 	for (i = 0; i < pdata->channel_count; i++, channel++) {
536 		ring = channel->rx_ring;
537 		if (!ring)
538 			break;
539 
540 		for (j = 0; j < ring->dma_desc_count; j++) {
541 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
542 			desc_ops->unmap_desc_data(pdata, desc_data);
543 		}
544 	}
545 }
546 
547 static int xlgmac_start(struct xlgmac_pdata *pdata)
548 {
549 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
550 	struct net_device *netdev = pdata->netdev;
551 	int ret;
552 
553 	hw_ops->init(pdata);
554 	xlgmac_napi_enable(pdata, 1);
555 
556 	ret = xlgmac_request_irqs(pdata);
557 	if (ret)
558 		goto err_napi;
559 
560 	hw_ops->enable_tx(pdata);
561 	hw_ops->enable_rx(pdata);
562 	netif_tx_start_all_queues(netdev);
563 
564 	return 0;
565 
566 err_napi:
567 	xlgmac_napi_disable(pdata, 1);
568 	hw_ops->exit(pdata);
569 
570 	return ret;
571 }
572 
573 static void xlgmac_stop(struct xlgmac_pdata *pdata)
574 {
575 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
576 	struct net_device *netdev = pdata->netdev;
577 	struct xlgmac_channel *channel;
578 	struct netdev_queue *txq;
579 	unsigned int i;
580 
581 	netif_tx_stop_all_queues(netdev);
582 	xlgmac_stop_timers(pdata);
583 	hw_ops->disable_tx(pdata);
584 	hw_ops->disable_rx(pdata);
585 	xlgmac_free_irqs(pdata);
586 	xlgmac_napi_disable(pdata, 1);
587 	hw_ops->exit(pdata);
588 
589 	channel = pdata->channel_head;
590 	for (i = 0; i < pdata->channel_count; i++, channel++) {
591 		if (!channel->tx_ring)
592 			continue;
593 
594 		txq = netdev_get_tx_queue(netdev, channel->queue_index);
595 		netdev_tx_reset_queue(txq);
596 	}
597 }
598 
599 static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
600 {
601 	/* If not running, "restart" will happen on open */
602 	if (!netif_running(pdata->netdev))
603 		return;
604 
605 	xlgmac_stop(pdata);
606 
607 	xlgmac_free_tx_data(pdata);
608 	xlgmac_free_rx_data(pdata);
609 
610 	xlgmac_start(pdata);
611 }
612 
613 static void xlgmac_restart(struct work_struct *work)
614 {
615 	struct xlgmac_pdata *pdata = container_of(work,
616 						   struct xlgmac_pdata,
617 						   restart_work);
618 
619 	rtnl_lock();
620 
621 	xlgmac_restart_dev(pdata);
622 
623 	rtnl_unlock();
624 }
625 
626 static int xlgmac_open(struct net_device *netdev)
627 {
628 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
629 	struct xlgmac_desc_ops *desc_ops;
630 	int ret;
631 
632 	desc_ops = &pdata->desc_ops;
633 
634 	/* TODO: Initialize the phy */
635 
636 	/* Calculate the Rx buffer size before allocating rings */
637 	ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
638 	if (ret < 0)
639 		return ret;
640 	pdata->rx_buf_size = ret;
641 
642 	/* Allocate the channels and rings */
643 	ret = desc_ops->alloc_channles_and_rings(pdata);
644 	if (ret)
645 		return ret;
646 
647 	INIT_WORK(&pdata->restart_work, xlgmac_restart);
648 	xlgmac_init_timers(pdata);
649 
650 	ret = xlgmac_start(pdata);
651 	if (ret)
652 		goto err_channels_and_rings;
653 
654 	return 0;
655 
656 err_channels_and_rings:
657 	desc_ops->free_channels_and_rings(pdata);
658 
659 	return ret;
660 }
661 
662 static int xlgmac_close(struct net_device *netdev)
663 {
664 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
665 	struct xlgmac_desc_ops *desc_ops;
666 
667 	desc_ops = &pdata->desc_ops;
668 
669 	/* Stop the device */
670 	xlgmac_stop(pdata);
671 
672 	/* Free the channels and rings */
673 	desc_ops->free_channels_and_rings(pdata);
674 
675 	return 0;
676 }
677 
678 static void xlgmac_tx_timeout(struct net_device *netdev)
679 {
680 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
681 
682 	netdev_warn(netdev, "tx timeout, device restarting\n");
683 	schedule_work(&pdata->restart_work);
684 }
685 
686 static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
687 {
688 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
689 	struct xlgmac_pkt_info *tx_pkt_info;
690 	struct xlgmac_desc_ops *desc_ops;
691 	struct xlgmac_channel *channel;
692 	struct xlgmac_hw_ops *hw_ops;
693 	struct netdev_queue *txq;
694 	struct xlgmac_ring *ring;
695 	int ret;
696 
697 	desc_ops = &pdata->desc_ops;
698 	hw_ops = &pdata->hw_ops;
699 
700 	XLGMAC_PR("skb->len = %d\n", skb->len);
701 
702 	channel = pdata->channel_head + skb->queue_mapping;
703 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
704 	ring = channel->tx_ring;
705 	tx_pkt_info = &ring->pkt_info;
706 
707 	if (skb->len == 0) {
708 		netif_err(pdata, tx_err, netdev,
709 			  "empty skb received from stack\n");
710 		dev_kfree_skb_any(skb);
711 		return NETDEV_TX_OK;
712 	}
713 
714 	/* Prepare preliminary packet info for TX */
715 	memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
716 	xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
717 
718 	/* Check that there are enough descriptors available */
719 	ret = xlgmac_maybe_stop_tx_queue(channel, ring,
720 					 tx_pkt_info->desc_count);
721 	if (ret)
722 		return ret;
723 
724 	ret = xlgmac_prep_tso(skb, tx_pkt_info);
725 	if (ret) {
726 		netif_err(pdata, tx_err, netdev,
727 			  "error processing TSO packet\n");
728 		dev_kfree_skb_any(skb);
729 		return ret;
730 	}
731 	xlgmac_prep_vlan(skb, tx_pkt_info);
732 
733 	if (!desc_ops->map_tx_skb(channel, skb)) {
734 		dev_kfree_skb_any(skb);
735 		return NETDEV_TX_OK;
736 	}
737 
738 	/* Report on the actual number of bytes (to be) sent */
739 	netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
740 
741 	/* Configure required descriptor fields for transmission */
742 	hw_ops->dev_xmit(channel);
743 
744 	if (netif_msg_pktdata(pdata))
745 		xlgmac_print_pkt(netdev, skb, true);
746 
747 	/* Stop the queue in advance if there may not be enough descriptors */
748 	xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
749 
750 	return NETDEV_TX_OK;
751 }
752 
753 static void xlgmac_get_stats64(struct net_device *netdev,
754 			       struct rtnl_link_stats64 *s)
755 {
756 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
757 	struct xlgmac_stats *pstats = &pdata->stats;
758 
759 	pdata->hw_ops.read_mmc_stats(pdata);
760 
761 	s->rx_packets = pstats->rxframecount_gb;
762 	s->rx_bytes = pstats->rxoctetcount_gb;
763 	s->rx_errors = pstats->rxframecount_gb -
764 		       pstats->rxbroadcastframes_g -
765 		       pstats->rxmulticastframes_g -
766 		       pstats->rxunicastframes_g;
767 	s->multicast = pstats->rxmulticastframes_g;
768 	s->rx_length_errors = pstats->rxlengtherror;
769 	s->rx_crc_errors = pstats->rxcrcerror;
770 	s->rx_fifo_errors = pstats->rxfifooverflow;
771 
772 	s->tx_packets = pstats->txframecount_gb;
773 	s->tx_bytes = pstats->txoctetcount_gb;
774 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
775 	s->tx_dropped = netdev->stats.tx_dropped;
776 }
777 
778 static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
779 {
780 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
781 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
782 	struct sockaddr *saddr = addr;
783 
784 	if (!is_valid_ether_addr(saddr->sa_data))
785 		return -EADDRNOTAVAIL;
786 
787 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
788 
789 	hw_ops->set_mac_address(pdata, netdev->dev_addr);
790 
791 	return 0;
792 }
793 
794 static int xlgmac_ioctl(struct net_device *netdev,
795 			struct ifreq *ifreq, int cmd)
796 {
797 	if (!netif_running(netdev))
798 		return -ENODEV;
799 
800 	return 0;
801 }
802 
803 static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
804 {
805 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
806 	int ret;
807 
808 	ret = xlgmac_calc_rx_buf_size(netdev, mtu);
809 	if (ret < 0)
810 		return ret;
811 
812 	pdata->rx_buf_size = ret;
813 	netdev->mtu = mtu;
814 
815 	xlgmac_restart_dev(pdata);
816 
817 	return 0;
818 }
819 
820 static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
821 				  __be16 proto,
822 				  u16 vid)
823 {
824 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
825 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
826 
827 	set_bit(vid, pdata->active_vlans);
828 	hw_ops->update_vlan_hash_table(pdata);
829 
830 	return 0;
831 }
832 
833 static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
834 				   __be16 proto,
835 				   u16 vid)
836 {
837 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
838 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
839 
840 	clear_bit(vid, pdata->active_vlans);
841 	hw_ops->update_vlan_hash_table(pdata);
842 
843 	return 0;
844 }
845 
846 #ifdef CONFIG_NET_POLL_CONTROLLER
847 static void xlgmac_poll_controller(struct net_device *netdev)
848 {
849 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
850 	struct xlgmac_channel *channel;
851 	unsigned int i;
852 
853 	if (pdata->per_channel_irq) {
854 		channel = pdata->channel_head;
855 		for (i = 0; i < pdata->channel_count; i++, channel++)
856 			xlgmac_dma_isr(channel->dma_irq, channel);
857 	} else {
858 		disable_irq(pdata->dev_irq);
859 		xlgmac_isr(pdata->dev_irq, pdata);
860 		enable_irq(pdata->dev_irq);
861 	}
862 }
863 #endif /* CONFIG_NET_POLL_CONTROLLER */
864 
865 static int xlgmac_set_features(struct net_device *netdev,
866 			       netdev_features_t features)
867 {
868 	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
869 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
870 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
871 	int ret = 0;
872 
873 	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
874 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
875 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
876 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
877 
878 	if ((features & NETIF_F_RXHASH) && !rxhash)
879 		ret = hw_ops->enable_rss(pdata);
880 	else if (!(features & NETIF_F_RXHASH) && rxhash)
881 		ret = hw_ops->disable_rss(pdata);
882 	if (ret)
883 		return ret;
884 
885 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
886 		hw_ops->enable_rx_csum(pdata);
887 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
888 		hw_ops->disable_rx_csum(pdata);
889 
890 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
891 		hw_ops->enable_rx_vlan_stripping(pdata);
892 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
893 		hw_ops->disable_rx_vlan_stripping(pdata);
894 
895 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
896 		hw_ops->enable_rx_vlan_filtering(pdata);
897 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
898 		hw_ops->disable_rx_vlan_filtering(pdata);
899 
900 	pdata->netdev_features = features;
901 
902 	return 0;
903 }
904 
905 static void xlgmac_set_rx_mode(struct net_device *netdev)
906 {
907 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
908 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
909 
910 	hw_ops->config_rx_mode(pdata);
911 }
912 
913 static const struct net_device_ops xlgmac_netdev_ops = {
914 	.ndo_open		= xlgmac_open,
915 	.ndo_stop		= xlgmac_close,
916 	.ndo_start_xmit		= xlgmac_xmit,
917 	.ndo_tx_timeout		= xlgmac_tx_timeout,
918 	.ndo_get_stats64	= xlgmac_get_stats64,
919 	.ndo_change_mtu		= xlgmac_change_mtu,
920 	.ndo_set_mac_address	= xlgmac_set_mac_address,
921 	.ndo_validate_addr	= eth_validate_addr,
922 	.ndo_do_ioctl		= xlgmac_ioctl,
923 	.ndo_vlan_rx_add_vid	= xlgmac_vlan_rx_add_vid,
924 	.ndo_vlan_rx_kill_vid	= xlgmac_vlan_rx_kill_vid,
925 #ifdef CONFIG_NET_POLL_CONTROLLER
926 	.ndo_poll_controller	= xlgmac_poll_controller,
927 #endif
928 	.ndo_set_features	= xlgmac_set_features,
929 	.ndo_set_rx_mode	= xlgmac_set_rx_mode,
930 };
931 
932 const struct net_device_ops *xlgmac_get_netdev_ops(void)
933 {
934 	return &xlgmac_netdev_ops;
935 }
936 
937 static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
938 {
939 	struct xlgmac_pdata *pdata = channel->pdata;
940 	struct xlgmac_ring *ring = channel->rx_ring;
941 	struct xlgmac_desc_data *desc_data;
942 	struct xlgmac_desc_ops *desc_ops;
943 	struct xlgmac_hw_ops *hw_ops;
944 
945 	desc_ops = &pdata->desc_ops;
946 	hw_ops = &pdata->hw_ops;
947 
948 	while (ring->dirty != ring->cur) {
949 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
950 
951 		/* Reset desc_data values */
952 		desc_ops->unmap_desc_data(pdata, desc_data);
953 
954 		if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
955 			break;
956 
957 		hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
958 
959 		ring->dirty++;
960 	}
961 
962 	/* Make sure everything is written before the register write */
963 	wmb();
964 
965 	/* Update the Rx Tail Pointer Register with address of
966 	 * the last cleaned entry
967 	 */
968 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
969 	writel(lower_32_bits(desc_data->dma_desc_addr),
970 	       XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
971 }
972 
973 static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
974 					 struct napi_struct *napi,
975 					 struct xlgmac_desc_data *desc_data,
976 					 unsigned int len)
977 {
978 	unsigned int copy_len;
979 	struct sk_buff *skb;
980 	u8 *packet;
981 
982 	skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
983 	if (!skb)
984 		return NULL;
985 
986 	/* Start with the header buffer which may contain just the header
987 	 * or the header plus data
988 	 */
989 	dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
990 				      desc_data->rx.hdr.dma_off,
991 				      desc_data->rx.hdr.dma_len,
992 				      DMA_FROM_DEVICE);
993 
994 	packet = page_address(desc_data->rx.hdr.pa.pages) +
995 		 desc_data->rx.hdr.pa.pages_offset;
996 	copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
997 	copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
998 	skb_copy_to_linear_data(skb, packet, copy_len);
999 	skb_put(skb, copy_len);
1000 
1001 	len -= copy_len;
1002 	if (len) {
1003 		/* Add the remaining data as a frag */
1004 		dma_sync_single_range_for_cpu(pdata->dev,
1005 					      desc_data->rx.buf.dma_base,
1006 					      desc_data->rx.buf.dma_off,
1007 					      desc_data->rx.buf.dma_len,
1008 					      DMA_FROM_DEVICE);
1009 
1010 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1011 				desc_data->rx.buf.pa.pages,
1012 				desc_data->rx.buf.pa.pages_offset,
1013 				len, desc_data->rx.buf.dma_len);
1014 		desc_data->rx.buf.pa.pages = NULL;
1015 	}
1016 
1017 	return skb;
1018 }
1019 
1020 static int xlgmac_tx_poll(struct xlgmac_channel *channel)
1021 {
1022 	struct xlgmac_pdata *pdata = channel->pdata;
1023 	struct xlgmac_ring *ring = channel->tx_ring;
1024 	struct net_device *netdev = pdata->netdev;
1025 	unsigned int tx_packets = 0, tx_bytes = 0;
1026 	struct xlgmac_desc_data *desc_data;
1027 	struct xlgmac_dma_desc *dma_desc;
1028 	struct xlgmac_desc_ops *desc_ops;
1029 	struct xlgmac_hw_ops *hw_ops;
1030 	struct netdev_queue *txq;
1031 	int processed = 0;
1032 	unsigned int cur;
1033 
1034 	desc_ops = &pdata->desc_ops;
1035 	hw_ops = &pdata->hw_ops;
1036 
1037 	/* Nothing to do if there isn't a Tx ring for this channel */
1038 	if (!ring)
1039 		return 0;
1040 
1041 	cur = ring->cur;
1042 
1043 	/* Be sure we get ring->cur before accessing descriptor data */
1044 	smp_rmb();
1045 
1046 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1047 
1048 	while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
1049 	       (ring->dirty != cur)) {
1050 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
1051 		dma_desc = desc_data->dma_desc;
1052 
1053 		if (!hw_ops->tx_complete(dma_desc))
1054 			break;
1055 
1056 		/* Make sure descriptor fields are read after reading
1057 		 * the OWN bit
1058 		 */
1059 		dma_rmb();
1060 
1061 		if (netif_msg_tx_done(pdata))
1062 			xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1063 
1064 		if (hw_ops->is_last_desc(dma_desc)) {
1065 			tx_packets += desc_data->tx.packets;
1066 			tx_bytes += desc_data->tx.bytes;
1067 		}
1068 
1069 		/* Free the SKB and reset the descriptor for re-use */
1070 		desc_ops->unmap_desc_data(pdata, desc_data);
1071 		hw_ops->tx_desc_reset(desc_data);
1072 
1073 		processed++;
1074 		ring->dirty++;
1075 	}
1076 
1077 	if (!processed)
1078 		return 0;
1079 
1080 	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1081 
1082 	if ((ring->tx.queue_stopped == 1) &&
1083 	    (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
1084 		ring->tx.queue_stopped = 0;
1085 		netif_tx_wake_queue(txq);
1086 	}
1087 
1088 	XLGMAC_PR("processed=%d\n", processed);
1089 
1090 	return processed;
1091 }
1092 
1093 static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
1094 {
1095 	struct xlgmac_pdata *pdata = channel->pdata;
1096 	struct xlgmac_ring *ring = channel->rx_ring;
1097 	struct net_device *netdev = pdata->netdev;
1098 	unsigned int len, dma_desc_len, max_len;
1099 	unsigned int context_next, context;
1100 	struct xlgmac_desc_data *desc_data;
1101 	struct xlgmac_pkt_info *pkt_info;
1102 	unsigned int incomplete, error;
1103 	struct xlgmac_hw_ops *hw_ops;
1104 	unsigned int received = 0;
1105 	struct napi_struct *napi;
1106 	struct sk_buff *skb;
1107 	int packet_count = 0;
1108 
1109 	hw_ops = &pdata->hw_ops;
1110 
1111 	/* Nothing to do if there isn't a Rx ring for this channel */
1112 	if (!ring)
1113 		return 0;
1114 
1115 	incomplete = 0;
1116 	context_next = 0;
1117 
1118 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1119 
1120 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1121 	pkt_info = &ring->pkt_info;
1122 	while (packet_count < budget) {
1123 		/* First time in loop see if we need to restore state */
1124 		if (!received && desc_data->state_saved) {
1125 			skb = desc_data->state.skb;
1126 			error = desc_data->state.error;
1127 			len = desc_data->state.len;
1128 		} else {
1129 			memset(pkt_info, 0, sizeof(*pkt_info));
1130 			skb = NULL;
1131 			error = 0;
1132 			len = 0;
1133 		}
1134 
1135 read_again:
1136 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1137 
1138 		if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
1139 			xlgmac_rx_refresh(channel);
1140 
1141 		if (hw_ops->dev_read(channel))
1142 			break;
1143 
1144 		received++;
1145 		ring->cur++;
1146 
1147 		incomplete = XLGMAC_GET_REG_BITS(
1148 					pkt_info->attributes,
1149 					RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
1150 					RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
1151 		context_next = XLGMAC_GET_REG_BITS(
1152 					pkt_info->attributes,
1153 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
1154 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
1155 		context = XLGMAC_GET_REG_BITS(
1156 					pkt_info->attributes,
1157 					RX_PACKET_ATTRIBUTES_CONTEXT_POS,
1158 					RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
1159 
1160 		/* Earlier error, just drain the remaining data */
1161 		if ((incomplete || context_next) && error)
1162 			goto read_again;
1163 
1164 		if (error || pkt_info->errors) {
1165 			if (pkt_info->errors)
1166 				netif_err(pdata, rx_err, netdev,
1167 					  "error in received packet\n");
1168 			dev_kfree_skb(skb);
1169 			goto next_packet;
1170 		}
1171 
1172 		if (!context) {
1173 			/* Length is cumulative, get this descriptor's length */
1174 			dma_desc_len = desc_data->rx.len - len;
1175 			len += dma_desc_len;
1176 
1177 			if (dma_desc_len && !skb) {
1178 				skb = xlgmac_create_skb(pdata, napi, desc_data,
1179 							dma_desc_len);
1180 				if (!skb)
1181 					error = 1;
1182 			} else if (dma_desc_len) {
1183 				dma_sync_single_range_for_cpu(
1184 						pdata->dev,
1185 						desc_data->rx.buf.dma_base,
1186 						desc_data->rx.buf.dma_off,
1187 						desc_data->rx.buf.dma_len,
1188 						DMA_FROM_DEVICE);
1189 
1190 				skb_add_rx_frag(
1191 					skb, skb_shinfo(skb)->nr_frags,
1192 					desc_data->rx.buf.pa.pages,
1193 					desc_data->rx.buf.pa.pages_offset,
1194 					dma_desc_len,
1195 					desc_data->rx.buf.dma_len);
1196 				desc_data->rx.buf.pa.pages = NULL;
1197 			}
1198 		}
1199 
1200 		if (incomplete || context_next)
1201 			goto read_again;
1202 
1203 		if (!skb)
1204 			goto next_packet;
1205 
1206 		/* Be sure we don't exceed the configured MTU */
1207 		max_len = netdev->mtu + ETH_HLEN;
1208 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1209 		    (skb->protocol == htons(ETH_P_8021Q)))
1210 			max_len += VLAN_HLEN;
1211 
1212 		if (skb->len > max_len) {
1213 			netif_err(pdata, rx_err, netdev,
1214 				  "packet length exceeds configured MTU\n");
1215 			dev_kfree_skb(skb);
1216 			goto next_packet;
1217 		}
1218 
1219 		if (netif_msg_pktdata(pdata))
1220 			xlgmac_print_pkt(netdev, skb, false);
1221 
1222 		skb_checksum_none_assert(skb);
1223 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1224 					RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
1225 				    RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
1226 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1227 
1228 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1229 					RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
1230 				    RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN))
1231 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1232 					       pkt_info->vlan_ctag);
1233 
1234 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1235 					RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
1236 				    RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
1237 			skb_set_hash(skb, pkt_info->rss_hash,
1238 				     pkt_info->rss_hash_type);
1239 
1240 		skb->dev = netdev;
1241 		skb->protocol = eth_type_trans(skb, netdev);
1242 		skb_record_rx_queue(skb, channel->queue_index);
1243 
1244 		napi_gro_receive(napi, skb);
1245 
1246 next_packet:
1247 		packet_count++;
1248 	}
1249 
1250 	/* Check if we need to save state before leaving */
1251 	if (received && (incomplete || context_next)) {
1252 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1253 		desc_data->state_saved = 1;
1254 		desc_data->state.skb = skb;
1255 		desc_data->state.len = len;
1256 		desc_data->state.error = error;
1257 	}
1258 
1259 	XLGMAC_PR("packet_count = %d\n", packet_count);
1260 
1261 	return packet_count;
1262 }
1263 
1264 static int xlgmac_one_poll(struct napi_struct *napi, int budget)
1265 {
1266 	struct xlgmac_channel *channel = container_of(napi,
1267 						struct xlgmac_channel,
1268 						napi);
1269 	int processed = 0;
1270 
1271 	XLGMAC_PR("budget=%d\n", budget);
1272 
1273 	/* Cleanup Tx ring first */
1274 	xlgmac_tx_poll(channel);
1275 
1276 	/* Process Rx ring next */
1277 	processed = xlgmac_rx_poll(channel, budget);
1278 
1279 	/* If we processed everything, we are done */
1280 	if (processed < budget) {
1281 		/* Turn off polling */
1282 		napi_complete_done(napi, processed);
1283 
1284 		/* Enable Tx and Rx interrupts */
1285 		enable_irq(channel->dma_irq);
1286 	}
1287 
1288 	XLGMAC_PR("received = %d\n", processed);
1289 
1290 	return processed;
1291 }
1292 
1293 static int xlgmac_all_poll(struct napi_struct *napi, int budget)
1294 {
1295 	struct xlgmac_pdata *pdata = container_of(napi,
1296 						   struct xlgmac_pdata,
1297 						   napi);
1298 	struct xlgmac_channel *channel;
1299 	int processed, last_processed;
1300 	int ring_budget;
1301 	unsigned int i;
1302 
1303 	XLGMAC_PR("budget=%d\n", budget);
1304 
1305 	processed = 0;
1306 	ring_budget = budget / pdata->rx_ring_count;
1307 	do {
1308 		last_processed = processed;
1309 
1310 		channel = pdata->channel_head;
1311 		for (i = 0; i < pdata->channel_count; i++, channel++) {
1312 			/* Cleanup Tx ring first */
1313 			xlgmac_tx_poll(channel);
1314 
1315 			/* Process Rx ring next */
1316 			if (ring_budget > (budget - processed))
1317 				ring_budget = budget - processed;
1318 			processed += xlgmac_rx_poll(channel, ring_budget);
1319 		}
1320 	} while ((processed < budget) && (processed != last_processed));
1321 
1322 	/* If we processed everything, we are done */
1323 	if (processed < budget) {
1324 		/* Turn off polling */
1325 		napi_complete_done(napi, processed);
1326 
1327 		/* Enable Tx and Rx interrupts */
1328 		xlgmac_enable_rx_tx_ints(pdata);
1329 	}
1330 
1331 	XLGMAC_PR("received = %d\n", processed);
1332 
1333 	return processed;
1334 }
1335