xref: /linux/drivers/net/ethernet/freescale/gianfar.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12  *
13  * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14  * Copyright 2007 MontaVista Software, Inc.
15  *
16  * This program is free software; you can redistribute  it and/or modify it
17  * under  the terms of  the GNU General  Public License as published by the
18  * Free Software Foundation;  either version 2 of the  License, or (at your
19  * option) any later version.
20  *
21  *  Gianfar:  AKA Lambda Draconis, "Dragon"
22  *  RA 11 31 24.2
23  *  Dec +69 19 52
24  *  V 3.84
25  *  B-V +1.62
26  *
27  *  Theory of operation
28  *
29  *  The driver is initialized through of_device. Configuration information
30  *  is therefore conveyed through an OF-style device tree.
31  *
32  *  The Gianfar Ethernet Controller uses a ring of buffer
33  *  descriptors.  The beginning is indicated by a register
34  *  pointing to the physical address of the start of the ring.
35  *  The end is determined by a "wrap" bit being set in the
36  *  last descriptor of the ring.
37  *
38  *  When a packet is received, the RXF bit in the
39  *  IEVENT register is set, triggering an interrupt when the
40  *  corresponding bit in the IMASK register is also set (if
41  *  interrupt coalescing is active, then the interrupt may not
42  *  happen immediately, but will wait until either a set number
43  *  of frames or amount of time have passed).  In NAPI, the
44  *  interrupt handler will signal there is work to be done, and
45  *  exit. This method will start at the last known empty
46  *  descriptor, and process every subsequent descriptor until there
47  *  are none left with data (NAPI will stop after a set number of
48  *  packets to give time to other tasks, but will eventually
49  *  process all the packets).  The data arrives inside a
50  *  pre-allocated skb, and so after the skb is passed up to the
51  *  stack, a new skb must be allocated, and the address field in
52  *  the buffer descriptor must be updated to indicate this new
53  *  skb.
54  *
55  *  When the kernel requests that a packet be transmitted, the
56  *  driver starts where it left off last time, and points the
57  *  descriptor at the buffer which was passed in.  The driver
58  *  then informs the DMA engine that there are packets ready to
59  *  be transmitted.  Once the controller is finished transmitting
60  *  the packet, an interrupt may be triggered (under the same
61  *  conditions as for reception, but depending on the TXF bit).
62  *  The driver then cleans up the buffer.
63  */
64 
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #define DEBUG
67 
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88 #include <linux/net_tstamp.h>
89 
90 #include <asm/io.h>
91 #include <asm/reg.h>
92 #include <asm/irq.h>
93 #include <asm/uaccess.h>
94 #include <linux/module.h>
95 #include <linux/dma-mapping.h>
96 #include <linux/crc32.h>
97 #include <linux/mii.h>
98 #include <linux/phy.h>
99 #include <linux/phy_fixed.h>
100 #include <linux/of.h>
101 #include <linux/of_net.h>
102 
103 #include "gianfar.h"
104 #include "fsl_pq_mdio.h"
105 
106 #define TX_TIMEOUT      (1*HZ)
107 #undef BRIEF_GFAR_ERRORS
108 #undef VERBOSE_GFAR_ERRORS
109 
110 const char gfar_driver_name[] = "Gianfar Ethernet";
111 const char gfar_driver_version[] = "1.3";
112 
113 static int gfar_enet_open(struct net_device *dev);
114 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
115 static void gfar_reset_task(struct work_struct *work);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev);
119 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
120 		struct sk_buff *skb);
121 static int gfar_set_mac_address(struct net_device *dev);
122 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123 static irqreturn_t gfar_error(int irq, void *dev_id);
124 static irqreturn_t gfar_transmit(int irq, void *dev_id);
125 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126 static void adjust_link(struct net_device *dev);
127 static void init_registers(struct net_device *dev);
128 static int init_phy(struct net_device *dev);
129 static int gfar_probe(struct platform_device *ofdev);
130 static int gfar_remove(struct platform_device *ofdev);
131 static void free_skb_resources(struct gfar_private *priv);
132 static void gfar_set_multi(struct net_device *dev);
133 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134 static void gfar_configure_serdes(struct net_device *dev);
135 static int gfar_poll(struct napi_struct *napi, int budget);
136 #ifdef CONFIG_NET_POLL_CONTROLLER
137 static void gfar_netpoll(struct net_device *dev);
138 #endif
139 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
140 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
141 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
142 			      int amount_pull);
143 void gfar_halt(struct net_device *dev);
144 static void gfar_halt_nodisable(struct net_device *dev);
145 void gfar_start(struct net_device *dev);
146 static void gfar_clear_exact_match(struct net_device *dev);
147 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 				  const u8 *addr);
149 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
150 
151 MODULE_AUTHOR("Freescale Semiconductor, Inc");
152 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153 MODULE_LICENSE("GPL");
154 
155 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
156 			    dma_addr_t buf)
157 {
158 	u32 lstatus;
159 
160 	bdp->bufPtr = buf;
161 
162 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
163 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
164 		lstatus |= BD_LFLAG(RXBD_WRAP);
165 
166 	eieio();
167 
168 	bdp->lstatus = lstatus;
169 }
170 
171 static int gfar_init_bds(struct net_device *ndev)
172 {
173 	struct gfar_private *priv = netdev_priv(ndev);
174 	struct gfar_priv_tx_q *tx_queue = NULL;
175 	struct gfar_priv_rx_q *rx_queue = NULL;
176 	struct txbd8 *txbdp;
177 	struct rxbd8 *rxbdp;
178 	int i, j;
179 
180 	for (i = 0; i < priv->num_tx_queues; i++) {
181 		tx_queue = priv->tx_queue[i];
182 		/* Initialize some variables in our dev structure */
183 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
184 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
185 		tx_queue->cur_tx = tx_queue->tx_bd_base;
186 		tx_queue->skb_curtx = 0;
187 		tx_queue->skb_dirtytx = 0;
188 
189 		/* Initialize Transmit Descriptor Ring */
190 		txbdp = tx_queue->tx_bd_base;
191 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
192 			txbdp->lstatus = 0;
193 			txbdp->bufPtr = 0;
194 			txbdp++;
195 		}
196 
197 		/* Set the last descriptor in the ring to indicate wrap */
198 		txbdp--;
199 		txbdp->status |= TXBD_WRAP;
200 	}
201 
202 	for (i = 0; i < priv->num_rx_queues; i++) {
203 		rx_queue = priv->rx_queue[i];
204 		rx_queue->cur_rx = rx_queue->rx_bd_base;
205 		rx_queue->skb_currx = 0;
206 		rxbdp = rx_queue->rx_bd_base;
207 
208 		for (j = 0; j < rx_queue->rx_ring_size; j++) {
209 			struct sk_buff *skb = rx_queue->rx_skbuff[j];
210 
211 			if (skb) {
212 				gfar_init_rxbdp(rx_queue, rxbdp,
213 						rxbdp->bufPtr);
214 			} else {
215 				skb = gfar_new_skb(ndev);
216 				if (!skb) {
217 					netdev_err(ndev, "Can't allocate RX buffers\n");
218 					goto err_rxalloc_fail;
219 				}
220 				rx_queue->rx_skbuff[j] = skb;
221 
222 				gfar_new_rxbdp(rx_queue, rxbdp, skb);
223 			}
224 
225 			rxbdp++;
226 		}
227 
228 	}
229 
230 	return 0;
231 
232 err_rxalloc_fail:
233 	free_skb_resources(priv);
234 	return -ENOMEM;
235 }
236 
237 static int gfar_alloc_skb_resources(struct net_device *ndev)
238 {
239 	void *vaddr;
240 	dma_addr_t addr;
241 	int i, j, k;
242 	struct gfar_private *priv = netdev_priv(ndev);
243 	struct device *dev = &priv->ofdev->dev;
244 	struct gfar_priv_tx_q *tx_queue = NULL;
245 	struct gfar_priv_rx_q *rx_queue = NULL;
246 
247 	priv->total_tx_ring_size = 0;
248 	for (i = 0; i < priv->num_tx_queues; i++)
249 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
250 
251 	priv->total_rx_ring_size = 0;
252 	for (i = 0; i < priv->num_rx_queues; i++)
253 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
254 
255 	/* Allocate memory for the buffer descriptors */
256 	vaddr = dma_alloc_coherent(dev,
257 			sizeof(struct txbd8) * priv->total_tx_ring_size +
258 			sizeof(struct rxbd8) * priv->total_rx_ring_size,
259 			&addr, GFP_KERNEL);
260 	if (!vaddr) {
261 		netif_err(priv, ifup, ndev,
262 			  "Could not allocate buffer descriptors!\n");
263 		return -ENOMEM;
264 	}
265 
266 	for (i = 0; i < priv->num_tx_queues; i++) {
267 		tx_queue = priv->tx_queue[i];
268 		tx_queue->tx_bd_base = vaddr;
269 		tx_queue->tx_bd_dma_base = addr;
270 		tx_queue->dev = ndev;
271 		/* enet DMA only understands physical addresses */
272 		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
274 	}
275 
276 	/* Start the rx descriptor ring where the tx ring leaves off */
277 	for (i = 0; i < priv->num_rx_queues; i++) {
278 		rx_queue = priv->rx_queue[i];
279 		rx_queue->rx_bd_base = vaddr;
280 		rx_queue->rx_bd_dma_base = addr;
281 		rx_queue->dev = ndev;
282 		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
284 	}
285 
286 	/* Setup the skbuff rings */
287 	for (i = 0; i < priv->num_tx_queues; i++) {
288 		tx_queue = priv->tx_queue[i];
289 		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
290 				  tx_queue->tx_ring_size, GFP_KERNEL);
291 		if (!tx_queue->tx_skbuff) {
292 			netif_err(priv, ifup, ndev,
293 				  "Could not allocate tx_skbuff\n");
294 			goto cleanup;
295 		}
296 
297 		for (k = 0; k < tx_queue->tx_ring_size; k++)
298 			tx_queue->tx_skbuff[k] = NULL;
299 	}
300 
301 	for (i = 0; i < priv->num_rx_queues; i++) {
302 		rx_queue = priv->rx_queue[i];
303 		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 				  rx_queue->rx_ring_size, GFP_KERNEL);
305 
306 		if (!rx_queue->rx_skbuff) {
307 			netif_err(priv, ifup, ndev,
308 				  "Could not allocate rx_skbuff\n");
309 			goto cleanup;
310 		}
311 
312 		for (j = 0; j < rx_queue->rx_ring_size; j++)
313 			rx_queue->rx_skbuff[j] = NULL;
314 	}
315 
316 	if (gfar_init_bds(ndev))
317 		goto cleanup;
318 
319 	return 0;
320 
321 cleanup:
322 	free_skb_resources(priv);
323 	return -ENOMEM;
324 }
325 
326 static void gfar_init_tx_rx_base(struct gfar_private *priv)
327 {
328 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
329 	u32 __iomem *baddr;
330 	int i;
331 
332 	baddr = &regs->tbase0;
333 	for(i = 0; i < priv->num_tx_queues; i++) {
334 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
335 		baddr	+= 2;
336 	}
337 
338 	baddr = &regs->rbase0;
339 	for(i = 0; i < priv->num_rx_queues; i++) {
340 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
341 		baddr   += 2;
342 	}
343 }
344 
345 static void gfar_init_mac(struct net_device *ndev)
346 {
347 	struct gfar_private *priv = netdev_priv(ndev);
348 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
349 	u32 rctrl = 0;
350 	u32 tctrl = 0;
351 	u32 attrs = 0;
352 
353 	/* write the tx/rx base registers */
354 	gfar_init_tx_rx_base(priv);
355 
356 	/* Configure the coalescing support */
357 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
358 
359 	if (priv->rx_filer_enable) {
360 		rctrl |= RCTRL_FILREN;
361 		/* Program the RIR0 reg with the required distribution */
362 		gfar_write(&regs->rir0, DEFAULT_RIR0);
363 	}
364 
365 	if (ndev->features & NETIF_F_RXCSUM)
366 		rctrl |= RCTRL_CHECKSUMMING;
367 
368 	if (priv->extended_hash) {
369 		rctrl |= RCTRL_EXTHASH;
370 
371 		gfar_clear_exact_match(ndev);
372 		rctrl |= RCTRL_EMEN;
373 	}
374 
375 	if (priv->padding) {
376 		rctrl &= ~RCTRL_PAL_MASK;
377 		rctrl |= RCTRL_PADDING(priv->padding);
378 	}
379 
380 	/* Insert receive time stamps into padding alignment bytes */
381 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
382 		rctrl &= ~RCTRL_PAL_MASK;
383 		rctrl |= RCTRL_PADDING(8);
384 		priv->padding = 8;
385 	}
386 
387 	/* Enable HW time stamping if requested from user space */
388 	if (priv->hwts_rx_en)
389 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
390 
391 	if (ndev->features & NETIF_F_HW_VLAN_RX)
392 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
393 
394 	/* Init rctrl based on our settings */
395 	gfar_write(&regs->rctrl, rctrl);
396 
397 	if (ndev->features & NETIF_F_IP_CSUM)
398 		tctrl |= TCTRL_INIT_CSUM;
399 
400 	tctrl |= TCTRL_TXSCHED_PRIO;
401 
402 	gfar_write(&regs->tctrl, tctrl);
403 
404 	/* Set the extraction length and index */
405 	attrs = ATTRELI_EL(priv->rx_stash_size) |
406 		ATTRELI_EI(priv->rx_stash_index);
407 
408 	gfar_write(&regs->attreli, attrs);
409 
410 	/* Start with defaults, and add stashing or locking
411 	 * depending on the approprate variables */
412 	attrs = ATTR_INIT_SETTINGS;
413 
414 	if (priv->bd_stash_en)
415 		attrs |= ATTR_BDSTASH;
416 
417 	if (priv->rx_stash_size != 0)
418 		attrs |= ATTR_BUFSTASH;
419 
420 	gfar_write(&regs->attr, attrs);
421 
422 	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
423 	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
424 	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
425 }
426 
427 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
428 {
429 	struct gfar_private *priv = netdev_priv(dev);
430 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
431 	unsigned long tx_packets = 0, tx_bytes = 0;
432 	int i = 0;
433 
434 	for (i = 0; i < priv->num_rx_queues; i++) {
435 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
436 		rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
437 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
438 	}
439 
440 	dev->stats.rx_packets = rx_packets;
441 	dev->stats.rx_bytes = rx_bytes;
442 	dev->stats.rx_dropped = rx_dropped;
443 
444 	for (i = 0; i < priv->num_tx_queues; i++) {
445 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
446 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
447 	}
448 
449 	dev->stats.tx_bytes = tx_bytes;
450 	dev->stats.tx_packets = tx_packets;
451 
452 	return &dev->stats;
453 }
454 
455 static const struct net_device_ops gfar_netdev_ops = {
456 	.ndo_open = gfar_enet_open,
457 	.ndo_start_xmit = gfar_start_xmit,
458 	.ndo_stop = gfar_close,
459 	.ndo_change_mtu = gfar_change_mtu,
460 	.ndo_set_features = gfar_set_features,
461 	.ndo_set_rx_mode = gfar_set_multi,
462 	.ndo_tx_timeout = gfar_timeout,
463 	.ndo_do_ioctl = gfar_ioctl,
464 	.ndo_get_stats = gfar_get_stats,
465 	.ndo_set_mac_address = eth_mac_addr,
466 	.ndo_validate_addr = eth_validate_addr,
467 #ifdef CONFIG_NET_POLL_CONTROLLER
468 	.ndo_poll_controller = gfar_netpoll,
469 #endif
470 };
471 
472 void lock_rx_qs(struct gfar_private *priv)
473 {
474 	int i = 0x0;
475 
476 	for (i = 0; i < priv->num_rx_queues; i++)
477 		spin_lock(&priv->rx_queue[i]->rxlock);
478 }
479 
480 void lock_tx_qs(struct gfar_private *priv)
481 {
482 	int i = 0x0;
483 
484 	for (i = 0; i < priv->num_tx_queues; i++)
485 		spin_lock(&priv->tx_queue[i]->txlock);
486 }
487 
488 void unlock_rx_qs(struct gfar_private *priv)
489 {
490 	int i = 0x0;
491 
492 	for (i = 0; i < priv->num_rx_queues; i++)
493 		spin_unlock(&priv->rx_queue[i]->rxlock);
494 }
495 
496 void unlock_tx_qs(struct gfar_private *priv)
497 {
498 	int i = 0x0;
499 
500 	for (i = 0; i < priv->num_tx_queues; i++)
501 		spin_unlock(&priv->tx_queue[i]->txlock);
502 }
503 
504 static bool gfar_is_vlan_on(struct gfar_private *priv)
505 {
506 	return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
507 	       (priv->ndev->features & NETIF_F_HW_VLAN_TX);
508 }
509 
510 /* Returns 1 if incoming frames use an FCB */
511 static inline int gfar_uses_fcb(struct gfar_private *priv)
512 {
513 	return gfar_is_vlan_on(priv) ||
514 		(priv->ndev->features & NETIF_F_RXCSUM) ||
515 		(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
516 }
517 
518 static void free_tx_pointers(struct gfar_private *priv)
519 {
520 	int i = 0;
521 
522 	for (i = 0; i < priv->num_tx_queues; i++)
523 		kfree(priv->tx_queue[i]);
524 }
525 
526 static void free_rx_pointers(struct gfar_private *priv)
527 {
528 	int i = 0;
529 
530 	for (i = 0; i < priv->num_rx_queues; i++)
531 		kfree(priv->rx_queue[i]);
532 }
533 
534 static void unmap_group_regs(struct gfar_private *priv)
535 {
536 	int i = 0;
537 
538 	for (i = 0; i < MAXGROUPS; i++)
539 		if (priv->gfargrp[i].regs)
540 			iounmap(priv->gfargrp[i].regs);
541 }
542 
543 static void disable_napi(struct gfar_private *priv)
544 {
545 	int i = 0;
546 
547 	for (i = 0; i < priv->num_grps; i++)
548 		napi_disable(&priv->gfargrp[i].napi);
549 }
550 
551 static void enable_napi(struct gfar_private *priv)
552 {
553 	int i = 0;
554 
555 	for (i = 0; i < priv->num_grps; i++)
556 		napi_enable(&priv->gfargrp[i].napi);
557 }
558 
559 static int gfar_parse_group(struct device_node *np,
560 		struct gfar_private *priv, const char *model)
561 {
562 	u32 *queue_mask;
563 
564 	priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
565 	if (!priv->gfargrp[priv->num_grps].regs)
566 		return -ENOMEM;
567 
568 	priv->gfargrp[priv->num_grps].interruptTransmit =
569 			irq_of_parse_and_map(np, 0);
570 
571 	/* If we aren't the FEC we have multiple interrupts */
572 	if (model && strcasecmp(model, "FEC")) {
573 		priv->gfargrp[priv->num_grps].interruptReceive =
574 			irq_of_parse_and_map(np, 1);
575 		priv->gfargrp[priv->num_grps].interruptError =
576 			irq_of_parse_and_map(np,2);
577 		if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
578 		    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
579 		    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
580 			return -EINVAL;
581 	}
582 
583 	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
584 	priv->gfargrp[priv->num_grps].priv = priv;
585 	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
586 	if(priv->mode == MQ_MG_MODE) {
587 		queue_mask = (u32 *)of_get_property(np,
588 					"fsl,rx-bit-map", NULL);
589 		priv->gfargrp[priv->num_grps].rx_bit_map =
590 			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
591 		queue_mask = (u32 *)of_get_property(np,
592 					"fsl,tx-bit-map", NULL);
593 		priv->gfargrp[priv->num_grps].tx_bit_map =
594 			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
595 	} else {
596 		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
597 		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
598 	}
599 	priv->num_grps++;
600 
601 	return 0;
602 }
603 
604 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
605 {
606 	const char *model;
607 	const char *ctype;
608 	const void *mac_addr;
609 	int err = 0, i;
610 	struct net_device *dev = NULL;
611 	struct gfar_private *priv = NULL;
612 	struct device_node *np = ofdev->dev.of_node;
613 	struct device_node *child = NULL;
614 	const u32 *stash;
615 	const u32 *stash_len;
616 	const u32 *stash_idx;
617 	unsigned int num_tx_qs, num_rx_qs;
618 	u32 *tx_queues, *rx_queues;
619 
620 	if (!np || !of_device_is_available(np))
621 		return -ENODEV;
622 
623 	/* parse the num of tx and rx queues */
624 	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
625 	num_tx_qs = tx_queues ? *tx_queues : 1;
626 
627 	if (num_tx_qs > MAX_TX_QS) {
628 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
629 		       num_tx_qs, MAX_TX_QS);
630 		pr_err("Cannot do alloc_etherdev, aborting\n");
631 		return -EINVAL;
632 	}
633 
634 	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
635 	num_rx_qs = rx_queues ? *rx_queues : 1;
636 
637 	if (num_rx_qs > MAX_RX_QS) {
638 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
639 		       num_rx_qs, MAX_RX_QS);
640 		pr_err("Cannot do alloc_etherdev, aborting\n");
641 		return -EINVAL;
642 	}
643 
644 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
645 	dev = *pdev;
646 	if (NULL == dev)
647 		return -ENOMEM;
648 
649 	priv = netdev_priv(dev);
650 	priv->node = ofdev->dev.of_node;
651 	priv->ndev = dev;
652 
653 	priv->num_tx_queues = num_tx_qs;
654 	netif_set_real_num_rx_queues(dev, num_rx_qs);
655 	priv->num_rx_queues = num_rx_qs;
656 	priv->num_grps = 0x0;
657 
658 	/* Init Rx queue filer rule set linked list*/
659 	INIT_LIST_HEAD(&priv->rx_list.list);
660 	priv->rx_list.count = 0;
661 	mutex_init(&priv->rx_queue_access);
662 
663 	model = of_get_property(np, "model", NULL);
664 
665 	for (i = 0; i < MAXGROUPS; i++)
666 		priv->gfargrp[i].regs = NULL;
667 
668 	/* Parse and initialize group specific information */
669 	if (of_device_is_compatible(np, "fsl,etsec2")) {
670 		priv->mode = MQ_MG_MODE;
671 		for_each_child_of_node(np, child) {
672 			err = gfar_parse_group(child, priv, model);
673 			if (err)
674 				goto err_grp_init;
675 		}
676 	} else {
677 		priv->mode = SQ_SG_MODE;
678 		err = gfar_parse_group(np, priv, model);
679 		if(err)
680 			goto err_grp_init;
681 	}
682 
683 	for (i = 0; i < priv->num_tx_queues; i++)
684 	       priv->tx_queue[i] = NULL;
685 	for (i = 0; i < priv->num_rx_queues; i++)
686 		priv->rx_queue[i] = NULL;
687 
688 	for (i = 0; i < priv->num_tx_queues; i++) {
689 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
690 					    GFP_KERNEL);
691 		if (!priv->tx_queue[i]) {
692 			err = -ENOMEM;
693 			goto tx_alloc_failed;
694 		}
695 		priv->tx_queue[i]->tx_skbuff = NULL;
696 		priv->tx_queue[i]->qindex = i;
697 		priv->tx_queue[i]->dev = dev;
698 		spin_lock_init(&(priv->tx_queue[i]->txlock));
699 	}
700 
701 	for (i = 0; i < priv->num_rx_queues; i++) {
702 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
703 					    GFP_KERNEL);
704 		if (!priv->rx_queue[i]) {
705 			err = -ENOMEM;
706 			goto rx_alloc_failed;
707 		}
708 		priv->rx_queue[i]->rx_skbuff = NULL;
709 		priv->rx_queue[i]->qindex = i;
710 		priv->rx_queue[i]->dev = dev;
711 		spin_lock_init(&(priv->rx_queue[i]->rxlock));
712 	}
713 
714 
715 	stash = of_get_property(np, "bd-stash", NULL);
716 
717 	if (stash) {
718 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
719 		priv->bd_stash_en = 1;
720 	}
721 
722 	stash_len = of_get_property(np, "rx-stash-len", NULL);
723 
724 	if (stash_len)
725 		priv->rx_stash_size = *stash_len;
726 
727 	stash_idx = of_get_property(np, "rx-stash-idx", NULL);
728 
729 	if (stash_idx)
730 		priv->rx_stash_index = *stash_idx;
731 
732 	if (stash_len || stash_idx)
733 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
734 
735 	mac_addr = of_get_mac_address(np);
736 	if (mac_addr)
737 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
738 
739 	if (model && !strcasecmp(model, "TSEC"))
740 		priv->device_flags =
741 			FSL_GIANFAR_DEV_HAS_GIGABIT |
742 			FSL_GIANFAR_DEV_HAS_COALESCE |
743 			FSL_GIANFAR_DEV_HAS_RMON |
744 			FSL_GIANFAR_DEV_HAS_MULTI_INTR;
745 	if (model && !strcasecmp(model, "eTSEC"))
746 		priv->device_flags =
747 			FSL_GIANFAR_DEV_HAS_GIGABIT |
748 			FSL_GIANFAR_DEV_HAS_COALESCE |
749 			FSL_GIANFAR_DEV_HAS_RMON |
750 			FSL_GIANFAR_DEV_HAS_MULTI_INTR |
751 			FSL_GIANFAR_DEV_HAS_PADDING |
752 			FSL_GIANFAR_DEV_HAS_CSUM |
753 			FSL_GIANFAR_DEV_HAS_VLAN |
754 			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
755 			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
756 			FSL_GIANFAR_DEV_HAS_TIMER;
757 
758 	ctype = of_get_property(np, "phy-connection-type", NULL);
759 
760 	/* We only care about rgmii-id.  The rest are autodetected */
761 	if (ctype && !strcmp(ctype, "rgmii-id"))
762 		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
763 	else
764 		priv->interface = PHY_INTERFACE_MODE_MII;
765 
766 	if (of_get_property(np, "fsl,magic-packet", NULL))
767 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
768 
769 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
770 
771 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
772 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
773 
774 	return 0;
775 
776 rx_alloc_failed:
777 	free_rx_pointers(priv);
778 tx_alloc_failed:
779 	free_tx_pointers(priv);
780 err_grp_init:
781 	unmap_group_regs(priv);
782 	free_netdev(dev);
783 	return err;
784 }
785 
786 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
787 			struct ifreq *ifr, int cmd)
788 {
789 	struct hwtstamp_config config;
790 	struct gfar_private *priv = netdev_priv(netdev);
791 
792 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
793 		return -EFAULT;
794 
795 	/* reserved for future extensions */
796 	if (config.flags)
797 		return -EINVAL;
798 
799 	switch (config.tx_type) {
800 	case HWTSTAMP_TX_OFF:
801 		priv->hwts_tx_en = 0;
802 		break;
803 	case HWTSTAMP_TX_ON:
804 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
805 			return -ERANGE;
806 		priv->hwts_tx_en = 1;
807 		break;
808 	default:
809 		return -ERANGE;
810 	}
811 
812 	switch (config.rx_filter) {
813 	case HWTSTAMP_FILTER_NONE:
814 		if (priv->hwts_rx_en) {
815 			stop_gfar(netdev);
816 			priv->hwts_rx_en = 0;
817 			startup_gfar(netdev);
818 		}
819 		break;
820 	default:
821 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
822 			return -ERANGE;
823 		if (!priv->hwts_rx_en) {
824 			stop_gfar(netdev);
825 			priv->hwts_rx_en = 1;
826 			startup_gfar(netdev);
827 		}
828 		config.rx_filter = HWTSTAMP_FILTER_ALL;
829 		break;
830 	}
831 
832 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
833 		-EFAULT : 0;
834 }
835 
836 /* Ioctl MII Interface */
837 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
838 {
839 	struct gfar_private *priv = netdev_priv(dev);
840 
841 	if (!netif_running(dev))
842 		return -EINVAL;
843 
844 	if (cmd == SIOCSHWTSTAMP)
845 		return gfar_hwtstamp_ioctl(dev, rq, cmd);
846 
847 	if (!priv->phydev)
848 		return -ENODEV;
849 
850 	return phy_mii_ioctl(priv->phydev, rq, cmd);
851 }
852 
853 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
854 {
855 	unsigned int new_bit_map = 0x0;
856 	int mask = 0x1 << (max_qs - 1), i;
857 	for (i = 0; i < max_qs; i++) {
858 		if (bit_map & mask)
859 			new_bit_map = new_bit_map + (1 << i);
860 		mask = mask >> 0x1;
861 	}
862 	return new_bit_map;
863 }
864 
865 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
866 				   u32 class)
867 {
868 	u32 rqfpr = FPR_FILER_MASK;
869 	u32 rqfcr = 0x0;
870 
871 	rqfar--;
872 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
873 	priv->ftp_rqfpr[rqfar] = rqfpr;
874 	priv->ftp_rqfcr[rqfar] = rqfcr;
875 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
876 
877 	rqfar--;
878 	rqfcr = RQFCR_CMP_NOMATCH;
879 	priv->ftp_rqfpr[rqfar] = rqfpr;
880 	priv->ftp_rqfcr[rqfar] = rqfcr;
881 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
882 
883 	rqfar--;
884 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
885 	rqfpr = class;
886 	priv->ftp_rqfcr[rqfar] = rqfcr;
887 	priv->ftp_rqfpr[rqfar] = rqfpr;
888 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
889 
890 	rqfar--;
891 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
892 	rqfpr = class;
893 	priv->ftp_rqfcr[rqfar] = rqfcr;
894 	priv->ftp_rqfpr[rqfar] = rqfpr;
895 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
896 
897 	return rqfar;
898 }
899 
900 static void gfar_init_filer_table(struct gfar_private *priv)
901 {
902 	int i = 0x0;
903 	u32 rqfar = MAX_FILER_IDX;
904 	u32 rqfcr = 0x0;
905 	u32 rqfpr = FPR_FILER_MASK;
906 
907 	/* Default rule */
908 	rqfcr = RQFCR_CMP_MATCH;
909 	priv->ftp_rqfcr[rqfar] = rqfcr;
910 	priv->ftp_rqfpr[rqfar] = rqfpr;
911 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
912 
913 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
914 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
915 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
916 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
917 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
918 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
919 
920 	/* cur_filer_idx indicated the first non-masked rule */
921 	priv->cur_filer_idx = rqfar;
922 
923 	/* Rest are masked rules */
924 	rqfcr = RQFCR_CMP_NOMATCH;
925 	for (i = 0; i < rqfar; i++) {
926 		priv->ftp_rqfcr[i] = rqfcr;
927 		priv->ftp_rqfpr[i] = rqfpr;
928 		gfar_write_filer(priv, i, rqfcr, rqfpr);
929 	}
930 }
931 
932 static void gfar_detect_errata(struct gfar_private *priv)
933 {
934 	struct device *dev = &priv->ofdev->dev;
935 	unsigned int pvr = mfspr(SPRN_PVR);
936 	unsigned int svr = mfspr(SPRN_SVR);
937 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
938 	unsigned int rev = svr & 0xffff;
939 
940 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
941 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
942 			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
943 		priv->errata |= GFAR_ERRATA_74;
944 
945 	/* MPC8313 and MPC837x all rev */
946 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
947 			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
948 		priv->errata |= GFAR_ERRATA_76;
949 
950 	/* MPC8313 and MPC837x all rev */
951 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
952 			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
953 		priv->errata |= GFAR_ERRATA_A002;
954 
955 	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
956 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
957 			(pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
958 		priv->errata |= GFAR_ERRATA_12;
959 
960 	if (priv->errata)
961 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
962 			 priv->errata);
963 }
964 
965 /* Set up the ethernet device structure, private data,
966  * and anything else we need before we start */
967 static int gfar_probe(struct platform_device *ofdev)
968 {
969 	u32 tempval;
970 	struct net_device *dev = NULL;
971 	struct gfar_private *priv = NULL;
972 	struct gfar __iomem *regs = NULL;
973 	int err = 0, i, grp_idx = 0;
974 	int len_devname;
975 	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
976 	u32 isrg = 0;
977 	u32 __iomem *baddr;
978 
979 	err = gfar_of_init(ofdev, &dev);
980 
981 	if (err)
982 		return err;
983 
984 	priv = netdev_priv(dev);
985 	priv->ndev = dev;
986 	priv->ofdev = ofdev;
987 	priv->node = ofdev->dev.of_node;
988 	SET_NETDEV_DEV(dev, &ofdev->dev);
989 
990 	spin_lock_init(&priv->bflock);
991 	INIT_WORK(&priv->reset_task, gfar_reset_task);
992 
993 	dev_set_drvdata(&ofdev->dev, priv);
994 	regs = priv->gfargrp[0].regs;
995 
996 	gfar_detect_errata(priv);
997 
998 	/* Stop the DMA engine now, in case it was running before */
999 	/* (The firmware could have used it, and left it running). */
1000 	gfar_halt(dev);
1001 
1002 	/* Reset MAC layer */
1003 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1004 
1005 	/* We need to delay at least 3 TX clocks */
1006 	udelay(2);
1007 
1008 	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1009 	gfar_write(&regs->maccfg1, tempval);
1010 
1011 	/* Initialize MACCFG2. */
1012 	tempval = MACCFG2_INIT_SETTINGS;
1013 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
1014 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1015 	gfar_write(&regs->maccfg2, tempval);
1016 
1017 	/* Initialize ECNTRL */
1018 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1019 
1020 	/* Set the dev->base_addr to the gfar reg region */
1021 	dev->base_addr = (unsigned long) regs;
1022 
1023 	SET_NETDEV_DEV(dev, &ofdev->dev);
1024 
1025 	/* Fill in the dev structure */
1026 	dev->watchdog_timeo = TX_TIMEOUT;
1027 	dev->mtu = 1500;
1028 	dev->netdev_ops = &gfar_netdev_ops;
1029 	dev->ethtool_ops = &gfar_ethtool_ops;
1030 
1031 	/* Register for napi ...We are registering NAPI for each grp */
1032 	for (i = 0; i < priv->num_grps; i++)
1033 		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1034 
1035 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1036 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1037 			NETIF_F_RXCSUM;
1038 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1039 			NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1040 	}
1041 
1042 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1043 		dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1044 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1045 	}
1046 
1047 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1048 		priv->extended_hash = 1;
1049 		priv->hash_width = 9;
1050 
1051 		priv->hash_regs[0] = &regs->igaddr0;
1052 		priv->hash_regs[1] = &regs->igaddr1;
1053 		priv->hash_regs[2] = &regs->igaddr2;
1054 		priv->hash_regs[3] = &regs->igaddr3;
1055 		priv->hash_regs[4] = &regs->igaddr4;
1056 		priv->hash_regs[5] = &regs->igaddr5;
1057 		priv->hash_regs[6] = &regs->igaddr6;
1058 		priv->hash_regs[7] = &regs->igaddr7;
1059 		priv->hash_regs[8] = &regs->gaddr0;
1060 		priv->hash_regs[9] = &regs->gaddr1;
1061 		priv->hash_regs[10] = &regs->gaddr2;
1062 		priv->hash_regs[11] = &regs->gaddr3;
1063 		priv->hash_regs[12] = &regs->gaddr4;
1064 		priv->hash_regs[13] = &regs->gaddr5;
1065 		priv->hash_regs[14] = &regs->gaddr6;
1066 		priv->hash_regs[15] = &regs->gaddr7;
1067 
1068 	} else {
1069 		priv->extended_hash = 0;
1070 		priv->hash_width = 8;
1071 
1072 		priv->hash_regs[0] = &regs->gaddr0;
1073 		priv->hash_regs[1] = &regs->gaddr1;
1074 		priv->hash_regs[2] = &regs->gaddr2;
1075 		priv->hash_regs[3] = &regs->gaddr3;
1076 		priv->hash_regs[4] = &regs->gaddr4;
1077 		priv->hash_regs[5] = &regs->gaddr5;
1078 		priv->hash_regs[6] = &regs->gaddr6;
1079 		priv->hash_regs[7] = &regs->gaddr7;
1080 	}
1081 
1082 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1083 		priv->padding = DEFAULT_PADDING;
1084 	else
1085 		priv->padding = 0;
1086 
1087 	if (dev->features & NETIF_F_IP_CSUM ||
1088 			priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1089 		dev->hard_header_len += GMAC_FCB_LEN;
1090 
1091 	/* Program the isrg regs only if number of grps > 1 */
1092 	if (priv->num_grps > 1) {
1093 		baddr = &regs->isrg0;
1094 		for (i = 0; i < priv->num_grps; i++) {
1095 			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1096 			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1097 			gfar_write(baddr, isrg);
1098 			baddr++;
1099 			isrg = 0x0;
1100 		}
1101 	}
1102 
1103 	/* Need to reverse the bit maps as  bit_map's MSB is q0
1104 	 * but, for_each_set_bit parses from right to left, which
1105 	 * basically reverses the queue numbers */
1106 	for (i = 0; i< priv->num_grps; i++) {
1107 		priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1108 				priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1109 		priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1110 				priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1111 	}
1112 
1113 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1114 	 * also assign queues to groups */
1115 	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1116 		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1117 		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1118 				priv->num_rx_queues) {
1119 			priv->gfargrp[grp_idx].num_rx_queues++;
1120 			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1121 			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1122 			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1123 		}
1124 		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1125 		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1126 				priv->num_tx_queues) {
1127 			priv->gfargrp[grp_idx].num_tx_queues++;
1128 			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1129 			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1130 			tqueue = tqueue | (TQUEUE_EN0 >> i);
1131 		}
1132 		priv->gfargrp[grp_idx].rstat = rstat;
1133 		priv->gfargrp[grp_idx].tstat = tstat;
1134 		rstat = tstat =0;
1135 	}
1136 
1137 	gfar_write(&regs->rqueue, rqueue);
1138 	gfar_write(&regs->tqueue, tqueue);
1139 
1140 	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1141 
1142 	/* Initializing some of the rx/tx queue level parameters */
1143 	for (i = 0; i < priv->num_tx_queues; i++) {
1144 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1145 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1146 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1147 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1148 	}
1149 
1150 	for (i = 0; i < priv->num_rx_queues; i++) {
1151 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1152 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1153 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1154 	}
1155 
1156 	/* always enable rx filer*/
1157 	priv->rx_filer_enable = 1;
1158 	/* Enable most messages by default */
1159 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1160 
1161 	/* Carrier starts down, phylib will bring it up */
1162 	netif_carrier_off(dev);
1163 
1164 	err = register_netdev(dev);
1165 
1166 	if (err) {
1167 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1168 		goto register_fail;
1169 	}
1170 
1171 	device_init_wakeup(&dev->dev,
1172 		priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1173 
1174 	/* fill out IRQ number and name fields */
1175 	len_devname = strlen(dev->name);
1176 	for (i = 0; i < priv->num_grps; i++) {
1177 		strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1178 				len_devname);
1179 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1180 			strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1181 				"_g", sizeof("_g"));
1182 			priv->gfargrp[i].int_name_tx[
1183 				strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1184 			strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1185 				priv->gfargrp[i].int_name_tx)],
1186 				"_tx", sizeof("_tx") + 1);
1187 
1188 			strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1189 					len_devname);
1190 			strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1191 					"_g", sizeof("_g"));
1192 			priv->gfargrp[i].int_name_rx[
1193 				strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1194 			strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1195 				priv->gfargrp[i].int_name_rx)],
1196 				"_rx", sizeof("_rx") + 1);
1197 
1198 			strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1199 					len_devname);
1200 			strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1201 				"_g", sizeof("_g"));
1202 			priv->gfargrp[i].int_name_er[strlen(
1203 					priv->gfargrp[i].int_name_er)] = i+48;
1204 			strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1205 				priv->gfargrp[i].int_name_er)],
1206 				"_er", sizeof("_er") + 1);
1207 		} else
1208 			priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1209 	}
1210 
1211 	/* Initialize the filer table */
1212 	gfar_init_filer_table(priv);
1213 
1214 	/* Create all the sysfs files */
1215 	gfar_init_sysfs(dev);
1216 
1217 	/* Print out the device info */
1218 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1219 
1220 	/* Even more device info helps when determining which kernel */
1221 	/* provided which set of benchmarks. */
1222 	netdev_info(dev, "Running with NAPI enabled\n");
1223 	for (i = 0; i < priv->num_rx_queues; i++)
1224 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1225 			    i, priv->rx_queue[i]->rx_ring_size);
1226 	for(i = 0; i < priv->num_tx_queues; i++)
1227 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1228 			    i, priv->tx_queue[i]->tx_ring_size);
1229 
1230 	return 0;
1231 
1232 register_fail:
1233 	unmap_group_regs(priv);
1234 	free_tx_pointers(priv);
1235 	free_rx_pointers(priv);
1236 	if (priv->phy_node)
1237 		of_node_put(priv->phy_node);
1238 	if (priv->tbi_node)
1239 		of_node_put(priv->tbi_node);
1240 	free_netdev(dev);
1241 	return err;
1242 }
1243 
1244 static int gfar_remove(struct platform_device *ofdev)
1245 {
1246 	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1247 
1248 	if (priv->phy_node)
1249 		of_node_put(priv->phy_node);
1250 	if (priv->tbi_node)
1251 		of_node_put(priv->tbi_node);
1252 
1253 	dev_set_drvdata(&ofdev->dev, NULL);
1254 
1255 	unregister_netdev(priv->ndev);
1256 	unmap_group_regs(priv);
1257 	free_netdev(priv->ndev);
1258 
1259 	return 0;
1260 }
1261 
1262 #ifdef CONFIG_PM
1263 
1264 static int gfar_suspend(struct device *dev)
1265 {
1266 	struct gfar_private *priv = dev_get_drvdata(dev);
1267 	struct net_device *ndev = priv->ndev;
1268 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1269 	unsigned long flags;
1270 	u32 tempval;
1271 
1272 	int magic_packet = priv->wol_en &&
1273 		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1274 
1275 	netif_device_detach(ndev);
1276 
1277 	if (netif_running(ndev)) {
1278 
1279 		local_irq_save(flags);
1280 		lock_tx_qs(priv);
1281 		lock_rx_qs(priv);
1282 
1283 		gfar_halt_nodisable(ndev);
1284 
1285 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1286 		tempval = gfar_read(&regs->maccfg1);
1287 
1288 		tempval &= ~MACCFG1_TX_EN;
1289 
1290 		if (!magic_packet)
1291 			tempval &= ~MACCFG1_RX_EN;
1292 
1293 		gfar_write(&regs->maccfg1, tempval);
1294 
1295 		unlock_rx_qs(priv);
1296 		unlock_tx_qs(priv);
1297 		local_irq_restore(flags);
1298 
1299 		disable_napi(priv);
1300 
1301 		if (magic_packet) {
1302 			/* Enable interrupt on Magic Packet */
1303 			gfar_write(&regs->imask, IMASK_MAG);
1304 
1305 			/* Enable Magic Packet mode */
1306 			tempval = gfar_read(&regs->maccfg2);
1307 			tempval |= MACCFG2_MPEN;
1308 			gfar_write(&regs->maccfg2, tempval);
1309 		} else {
1310 			phy_stop(priv->phydev);
1311 		}
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 static int gfar_resume(struct device *dev)
1318 {
1319 	struct gfar_private *priv = dev_get_drvdata(dev);
1320 	struct net_device *ndev = priv->ndev;
1321 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1322 	unsigned long flags;
1323 	u32 tempval;
1324 	int magic_packet = priv->wol_en &&
1325 		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1326 
1327 	if (!netif_running(ndev)) {
1328 		netif_device_attach(ndev);
1329 		return 0;
1330 	}
1331 
1332 	if (!magic_packet && priv->phydev)
1333 		phy_start(priv->phydev);
1334 
1335 	/* Disable Magic Packet mode, in case something
1336 	 * else woke us up.
1337 	 */
1338 	local_irq_save(flags);
1339 	lock_tx_qs(priv);
1340 	lock_rx_qs(priv);
1341 
1342 	tempval = gfar_read(&regs->maccfg2);
1343 	tempval &= ~MACCFG2_MPEN;
1344 	gfar_write(&regs->maccfg2, tempval);
1345 
1346 	gfar_start(ndev);
1347 
1348 	unlock_rx_qs(priv);
1349 	unlock_tx_qs(priv);
1350 	local_irq_restore(flags);
1351 
1352 	netif_device_attach(ndev);
1353 
1354 	enable_napi(priv);
1355 
1356 	return 0;
1357 }
1358 
1359 static int gfar_restore(struct device *dev)
1360 {
1361 	struct gfar_private *priv = dev_get_drvdata(dev);
1362 	struct net_device *ndev = priv->ndev;
1363 
1364 	if (!netif_running(ndev))
1365 		return 0;
1366 
1367 	gfar_init_bds(ndev);
1368 	init_registers(ndev);
1369 	gfar_set_mac_address(ndev);
1370 	gfar_init_mac(ndev);
1371 	gfar_start(ndev);
1372 
1373 	priv->oldlink = 0;
1374 	priv->oldspeed = 0;
1375 	priv->oldduplex = -1;
1376 
1377 	if (priv->phydev)
1378 		phy_start(priv->phydev);
1379 
1380 	netif_device_attach(ndev);
1381 	enable_napi(priv);
1382 
1383 	return 0;
1384 }
1385 
1386 static struct dev_pm_ops gfar_pm_ops = {
1387 	.suspend = gfar_suspend,
1388 	.resume = gfar_resume,
1389 	.freeze = gfar_suspend,
1390 	.thaw = gfar_resume,
1391 	.restore = gfar_restore,
1392 };
1393 
1394 #define GFAR_PM_OPS (&gfar_pm_ops)
1395 
1396 #else
1397 
1398 #define GFAR_PM_OPS NULL
1399 
1400 #endif
1401 
1402 /* Reads the controller's registers to determine what interface
1403  * connects it to the PHY.
1404  */
1405 static phy_interface_t gfar_get_interface(struct net_device *dev)
1406 {
1407 	struct gfar_private *priv = netdev_priv(dev);
1408 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1409 	u32 ecntrl;
1410 
1411 	ecntrl = gfar_read(&regs->ecntrl);
1412 
1413 	if (ecntrl & ECNTRL_SGMII_MODE)
1414 		return PHY_INTERFACE_MODE_SGMII;
1415 
1416 	if (ecntrl & ECNTRL_TBI_MODE) {
1417 		if (ecntrl & ECNTRL_REDUCED_MODE)
1418 			return PHY_INTERFACE_MODE_RTBI;
1419 		else
1420 			return PHY_INTERFACE_MODE_TBI;
1421 	}
1422 
1423 	if (ecntrl & ECNTRL_REDUCED_MODE) {
1424 		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1425 			return PHY_INTERFACE_MODE_RMII;
1426 		else {
1427 			phy_interface_t interface = priv->interface;
1428 
1429 			/*
1430 			 * This isn't autodetected right now, so it must
1431 			 * be set by the device tree or platform code.
1432 			 */
1433 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1434 				return PHY_INTERFACE_MODE_RGMII_ID;
1435 
1436 			return PHY_INTERFACE_MODE_RGMII;
1437 		}
1438 	}
1439 
1440 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1441 		return PHY_INTERFACE_MODE_GMII;
1442 
1443 	return PHY_INTERFACE_MODE_MII;
1444 }
1445 
1446 
1447 /* Initializes driver's PHY state, and attaches to the PHY.
1448  * Returns 0 on success.
1449  */
1450 static int init_phy(struct net_device *dev)
1451 {
1452 	struct gfar_private *priv = netdev_priv(dev);
1453 	uint gigabit_support =
1454 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1455 		SUPPORTED_1000baseT_Full : 0;
1456 	phy_interface_t interface;
1457 
1458 	priv->oldlink = 0;
1459 	priv->oldspeed = 0;
1460 	priv->oldduplex = -1;
1461 
1462 	interface = gfar_get_interface(dev);
1463 
1464 	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1465 				      interface);
1466 	if (!priv->phydev)
1467 		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1468 							 interface);
1469 	if (!priv->phydev) {
1470 		dev_err(&dev->dev, "could not attach to PHY\n");
1471 		return -ENODEV;
1472 	}
1473 
1474 	if (interface == PHY_INTERFACE_MODE_SGMII)
1475 		gfar_configure_serdes(dev);
1476 
1477 	/* Remove any features not supported by the controller */
1478 	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1479 	priv->phydev->advertising = priv->phydev->supported;
1480 
1481 	return 0;
1482 }
1483 
1484 /*
1485  * Initialize TBI PHY interface for communicating with the
1486  * SERDES lynx PHY on the chip.  We communicate with this PHY
1487  * through the MDIO bus on each controller, treating it as a
1488  * "normal" PHY at the address found in the TBIPA register.  We assume
1489  * that the TBIPA register is valid.  Either the MDIO bus code will set
1490  * it to a value that doesn't conflict with other PHYs on the bus, or the
1491  * value doesn't matter, as there are no other PHYs on the bus.
1492  */
1493 static void gfar_configure_serdes(struct net_device *dev)
1494 {
1495 	struct gfar_private *priv = netdev_priv(dev);
1496 	struct phy_device *tbiphy;
1497 
1498 	if (!priv->tbi_node) {
1499 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1500 				    "device tree specify a tbi-handle\n");
1501 		return;
1502 	}
1503 
1504 	tbiphy = of_phy_find_device(priv->tbi_node);
1505 	if (!tbiphy) {
1506 		dev_err(&dev->dev, "error: Could not get TBI device\n");
1507 		return;
1508 	}
1509 
1510 	/*
1511 	 * If the link is already up, we must already be ok, and don't need to
1512 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1513 	 * everything for us?  Resetting it takes the link down and requires
1514 	 * several seconds for it to come back.
1515 	 */
1516 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1517 		return;
1518 
1519 	/* Single clk mode, mii mode off(for serdes communication) */
1520 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1521 
1522 	phy_write(tbiphy, MII_ADVERTISE,
1523 			ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1524 			ADVERTISE_1000XPSE_ASYM);
1525 
1526 	phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1527 			BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1528 }
1529 
1530 static void init_registers(struct net_device *dev)
1531 {
1532 	struct gfar_private *priv = netdev_priv(dev);
1533 	struct gfar __iomem *regs = NULL;
1534 	int i = 0;
1535 
1536 	for (i = 0; i < priv->num_grps; i++) {
1537 		regs = priv->gfargrp[i].regs;
1538 		/* Clear IEVENT */
1539 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1540 
1541 		/* Initialize IMASK */
1542 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1543 	}
1544 
1545 	regs = priv->gfargrp[0].regs;
1546 	/* Init hash registers to zero */
1547 	gfar_write(&regs->igaddr0, 0);
1548 	gfar_write(&regs->igaddr1, 0);
1549 	gfar_write(&regs->igaddr2, 0);
1550 	gfar_write(&regs->igaddr3, 0);
1551 	gfar_write(&regs->igaddr4, 0);
1552 	gfar_write(&regs->igaddr5, 0);
1553 	gfar_write(&regs->igaddr6, 0);
1554 	gfar_write(&regs->igaddr7, 0);
1555 
1556 	gfar_write(&regs->gaddr0, 0);
1557 	gfar_write(&regs->gaddr1, 0);
1558 	gfar_write(&regs->gaddr2, 0);
1559 	gfar_write(&regs->gaddr3, 0);
1560 	gfar_write(&regs->gaddr4, 0);
1561 	gfar_write(&regs->gaddr5, 0);
1562 	gfar_write(&regs->gaddr6, 0);
1563 	gfar_write(&regs->gaddr7, 0);
1564 
1565 	/* Zero out the rmon mib registers if it has them */
1566 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1567 		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1568 
1569 		/* Mask off the CAM interrupts */
1570 		gfar_write(&regs->rmon.cam1, 0xffffffff);
1571 		gfar_write(&regs->rmon.cam2, 0xffffffff);
1572 	}
1573 
1574 	/* Initialize the max receive buffer length */
1575 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1576 
1577 	/* Initialize the Minimum Frame Length Register */
1578 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1579 }
1580 
1581 static int __gfar_is_rx_idle(struct gfar_private *priv)
1582 {
1583 	u32 res;
1584 
1585 	/*
1586 	 * Normaly TSEC should not hang on GRS commands, so we should
1587 	 * actually wait for IEVENT_GRSC flag.
1588 	 */
1589 	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1590 		return 0;
1591 
1592 	/*
1593 	 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1594 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1595 	 * and the Rx can be safely reset.
1596 	 */
1597 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1598 	res &= 0x7f807f80;
1599 	if ((res & 0xffff) == (res >> 16))
1600 		return 1;
1601 
1602 	return 0;
1603 }
1604 
1605 /* Halt the receive and transmit queues */
1606 static void gfar_halt_nodisable(struct net_device *dev)
1607 {
1608 	struct gfar_private *priv = netdev_priv(dev);
1609 	struct gfar __iomem *regs = NULL;
1610 	u32 tempval;
1611 	int i = 0;
1612 
1613 	for (i = 0; i < priv->num_grps; i++) {
1614 		regs = priv->gfargrp[i].regs;
1615 		/* Mask all interrupts */
1616 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1617 
1618 		/* Clear all interrupts */
1619 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1620 	}
1621 
1622 	regs = priv->gfargrp[0].regs;
1623 	/* Stop the DMA, and wait for it to stop */
1624 	tempval = gfar_read(&regs->dmactrl);
1625 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1626 	    != (DMACTRL_GRS | DMACTRL_GTS)) {
1627 		int ret;
1628 
1629 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1630 		gfar_write(&regs->dmactrl, tempval);
1631 
1632 		do {
1633 			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1634 				 (IEVENT_GRSC | IEVENT_GTSC)) ==
1635 				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1636 			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1637 				ret = __gfar_is_rx_idle(priv);
1638 		} while (!ret);
1639 	}
1640 }
1641 
1642 /* Halt the receive and transmit queues */
1643 void gfar_halt(struct net_device *dev)
1644 {
1645 	struct gfar_private *priv = netdev_priv(dev);
1646 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1647 	u32 tempval;
1648 
1649 	gfar_halt_nodisable(dev);
1650 
1651 	/* Disable Rx and Tx */
1652 	tempval = gfar_read(&regs->maccfg1);
1653 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1654 	gfar_write(&regs->maccfg1, tempval);
1655 }
1656 
1657 static void free_grp_irqs(struct gfar_priv_grp *grp)
1658 {
1659 	free_irq(grp->interruptError, grp);
1660 	free_irq(grp->interruptTransmit, grp);
1661 	free_irq(grp->interruptReceive, grp);
1662 }
1663 
1664 void stop_gfar(struct net_device *dev)
1665 {
1666 	struct gfar_private *priv = netdev_priv(dev);
1667 	unsigned long flags;
1668 	int i;
1669 
1670 	phy_stop(priv->phydev);
1671 
1672 
1673 	/* Lock it down */
1674 	local_irq_save(flags);
1675 	lock_tx_qs(priv);
1676 	lock_rx_qs(priv);
1677 
1678 	gfar_halt(dev);
1679 
1680 	unlock_rx_qs(priv);
1681 	unlock_tx_qs(priv);
1682 	local_irq_restore(flags);
1683 
1684 	/* Free the IRQs */
1685 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1686 		for (i = 0; i < priv->num_grps; i++)
1687 			free_grp_irqs(&priv->gfargrp[i]);
1688 	} else {
1689 		for (i = 0; i < priv->num_grps; i++)
1690 			free_irq(priv->gfargrp[i].interruptTransmit,
1691 					&priv->gfargrp[i]);
1692 	}
1693 
1694 	free_skb_resources(priv);
1695 }
1696 
1697 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1698 {
1699 	struct txbd8 *txbdp;
1700 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1701 	int i, j;
1702 
1703 	txbdp = tx_queue->tx_bd_base;
1704 
1705 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1706 		if (!tx_queue->tx_skbuff[i])
1707 			continue;
1708 
1709 		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1710 				txbdp->length, DMA_TO_DEVICE);
1711 		txbdp->lstatus = 0;
1712 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1713 				j++) {
1714 			txbdp++;
1715 			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1716 					txbdp->length, DMA_TO_DEVICE);
1717 		}
1718 		txbdp++;
1719 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1720 		tx_queue->tx_skbuff[i] = NULL;
1721 	}
1722 	kfree(tx_queue->tx_skbuff);
1723 }
1724 
1725 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1726 {
1727 	struct rxbd8 *rxbdp;
1728 	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1729 	int i;
1730 
1731 	rxbdp = rx_queue->rx_bd_base;
1732 
1733 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1734 		if (rx_queue->rx_skbuff[i]) {
1735 			dma_unmap_single(&priv->ofdev->dev,
1736 					rxbdp->bufPtr, priv->rx_buffer_size,
1737 					DMA_FROM_DEVICE);
1738 			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1739 			rx_queue->rx_skbuff[i] = NULL;
1740 		}
1741 		rxbdp->lstatus = 0;
1742 		rxbdp->bufPtr = 0;
1743 		rxbdp++;
1744 	}
1745 	kfree(rx_queue->rx_skbuff);
1746 }
1747 
1748 /* If there are any tx skbs or rx skbs still around, free them.
1749  * Then free tx_skbuff and rx_skbuff */
1750 static void free_skb_resources(struct gfar_private *priv)
1751 {
1752 	struct gfar_priv_tx_q *tx_queue = NULL;
1753 	struct gfar_priv_rx_q *rx_queue = NULL;
1754 	int i;
1755 
1756 	/* Go through all the buffer descriptors and free their data buffers */
1757 	for (i = 0; i < priv->num_tx_queues; i++) {
1758 		tx_queue = priv->tx_queue[i];
1759 		if(tx_queue->tx_skbuff)
1760 			free_skb_tx_queue(tx_queue);
1761 	}
1762 
1763 	for (i = 0; i < priv->num_rx_queues; i++) {
1764 		rx_queue = priv->rx_queue[i];
1765 		if(rx_queue->rx_skbuff)
1766 			free_skb_rx_queue(rx_queue);
1767 	}
1768 
1769 	dma_free_coherent(&priv->ofdev->dev,
1770 			sizeof(struct txbd8) * priv->total_tx_ring_size +
1771 			sizeof(struct rxbd8) * priv->total_rx_ring_size,
1772 			priv->tx_queue[0]->tx_bd_base,
1773 			priv->tx_queue[0]->tx_bd_dma_base);
1774 	skb_queue_purge(&priv->rx_recycle);
1775 }
1776 
1777 void gfar_start(struct net_device *dev)
1778 {
1779 	struct gfar_private *priv = netdev_priv(dev);
1780 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1781 	u32 tempval;
1782 	int i = 0;
1783 
1784 	/* Enable Rx and Tx in MACCFG1 */
1785 	tempval = gfar_read(&regs->maccfg1);
1786 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1787 	gfar_write(&regs->maccfg1, tempval);
1788 
1789 	/* Initialize DMACTRL to have WWR and WOP */
1790 	tempval = gfar_read(&regs->dmactrl);
1791 	tempval |= DMACTRL_INIT_SETTINGS;
1792 	gfar_write(&regs->dmactrl, tempval);
1793 
1794 	/* Make sure we aren't stopped */
1795 	tempval = gfar_read(&regs->dmactrl);
1796 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1797 	gfar_write(&regs->dmactrl, tempval);
1798 
1799 	for (i = 0; i < priv->num_grps; i++) {
1800 		regs = priv->gfargrp[i].regs;
1801 		/* Clear THLT/RHLT, so that the DMA starts polling now */
1802 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1803 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1804 		/* Unmask the interrupts we look for */
1805 		gfar_write(&regs->imask, IMASK_DEFAULT);
1806 	}
1807 
1808 	dev->trans_start = jiffies; /* prevent tx timeout */
1809 }
1810 
1811 void gfar_configure_coalescing(struct gfar_private *priv,
1812 	unsigned long tx_mask, unsigned long rx_mask)
1813 {
1814 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1815 	u32 __iomem *baddr;
1816 	int i = 0;
1817 
1818 	/* Backward compatible case ---- even if we enable
1819 	 * multiple queues, there's only single reg to program
1820 	 */
1821 	gfar_write(&regs->txic, 0);
1822 	if(likely(priv->tx_queue[0]->txcoalescing))
1823 		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1824 
1825 	gfar_write(&regs->rxic, 0);
1826 	if(unlikely(priv->rx_queue[0]->rxcoalescing))
1827 		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1828 
1829 	if (priv->mode == MQ_MG_MODE) {
1830 		baddr = &regs->txic0;
1831 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1832 			if (likely(priv->tx_queue[i]->txcoalescing)) {
1833 				gfar_write(baddr + i, 0);
1834 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
1835 			}
1836 		}
1837 
1838 		baddr = &regs->rxic0;
1839 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1840 			if (likely(priv->rx_queue[i]->rxcoalescing)) {
1841 				gfar_write(baddr + i, 0);
1842 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1843 			}
1844 		}
1845 	}
1846 }
1847 
1848 static int register_grp_irqs(struct gfar_priv_grp *grp)
1849 {
1850 	struct gfar_private *priv = grp->priv;
1851 	struct net_device *dev = priv->ndev;
1852 	int err;
1853 
1854 	/* If the device has multiple interrupts, register for
1855 	 * them.  Otherwise, only register for the one */
1856 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1857 		/* Install our interrupt handlers for Error,
1858 		 * Transmit, and Receive */
1859 		if ((err = request_irq(grp->interruptError, gfar_error, 0,
1860 				grp->int_name_er,grp)) < 0) {
1861 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1862 				  grp->interruptError);
1863 
1864 			goto err_irq_fail;
1865 		}
1866 
1867 		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1868 				0, grp->int_name_tx, grp)) < 0) {
1869 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1870 				  grp->interruptTransmit);
1871 			goto tx_irq_fail;
1872 		}
1873 
1874 		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1875 				grp->int_name_rx, grp)) < 0) {
1876 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1877 				  grp->interruptReceive);
1878 			goto rx_irq_fail;
1879 		}
1880 	} else {
1881 		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1882 				grp->int_name_tx, grp)) < 0) {
1883 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1884 				  grp->interruptTransmit);
1885 			goto err_irq_fail;
1886 		}
1887 	}
1888 
1889 	return 0;
1890 
1891 rx_irq_fail:
1892 	free_irq(grp->interruptTransmit, grp);
1893 tx_irq_fail:
1894 	free_irq(grp->interruptError, grp);
1895 err_irq_fail:
1896 	return err;
1897 
1898 }
1899 
1900 /* Bring the controller up and running */
1901 int startup_gfar(struct net_device *ndev)
1902 {
1903 	struct gfar_private *priv = netdev_priv(ndev);
1904 	struct gfar __iomem *regs = NULL;
1905 	int err, i, j;
1906 
1907 	for (i = 0; i < priv->num_grps; i++) {
1908 		regs= priv->gfargrp[i].regs;
1909 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1910 	}
1911 
1912 	regs= priv->gfargrp[0].regs;
1913 	err = gfar_alloc_skb_resources(ndev);
1914 	if (err)
1915 		return err;
1916 
1917 	gfar_init_mac(ndev);
1918 
1919 	for (i = 0; i < priv->num_grps; i++) {
1920 		err = register_grp_irqs(&priv->gfargrp[i]);
1921 		if (err) {
1922 			for (j = 0; j < i; j++)
1923 				free_grp_irqs(&priv->gfargrp[j]);
1924 			goto irq_fail;
1925 		}
1926 	}
1927 
1928 	/* Start the controller */
1929 	gfar_start(ndev);
1930 
1931 	phy_start(priv->phydev);
1932 
1933 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
1934 
1935 	return 0;
1936 
1937 irq_fail:
1938 	free_skb_resources(priv);
1939 	return err;
1940 }
1941 
1942 /* Called when something needs to use the ethernet device */
1943 /* Returns 0 for success. */
1944 static int gfar_enet_open(struct net_device *dev)
1945 {
1946 	struct gfar_private *priv = netdev_priv(dev);
1947 	int err;
1948 
1949 	enable_napi(priv);
1950 
1951 	skb_queue_head_init(&priv->rx_recycle);
1952 
1953 	/* Initialize a bunch of registers */
1954 	init_registers(dev);
1955 
1956 	gfar_set_mac_address(dev);
1957 
1958 	err = init_phy(dev);
1959 
1960 	if (err) {
1961 		disable_napi(priv);
1962 		return err;
1963 	}
1964 
1965 	err = startup_gfar(dev);
1966 	if (err) {
1967 		disable_napi(priv);
1968 		return err;
1969 	}
1970 
1971 	netif_tx_start_all_queues(dev);
1972 
1973 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
1974 
1975 	return err;
1976 }
1977 
1978 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1979 {
1980 	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1981 
1982 	memset(fcb, 0, GMAC_FCB_LEN);
1983 
1984 	return fcb;
1985 }
1986 
1987 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1988 		int fcb_length)
1989 {
1990 	u8 flags = 0;
1991 
1992 	/* If we're here, it's a IP packet with a TCP or UDP
1993 	 * payload.  We set it to checksum, using a pseudo-header
1994 	 * we provide
1995 	 */
1996 	flags = TXFCB_DEFAULT;
1997 
1998 	/* Tell the controller what the protocol is */
1999 	/* And provide the already calculated phcs */
2000 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2001 		flags |= TXFCB_UDP;
2002 		fcb->phcs = udp_hdr(skb)->check;
2003 	} else
2004 		fcb->phcs = tcp_hdr(skb)->check;
2005 
2006 	/* l3os is the distance between the start of the
2007 	 * frame (skb->data) and the start of the IP hdr.
2008 	 * l4os is the distance between the start of the
2009 	 * l3 hdr and the l4 hdr */
2010 	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2011 	fcb->l4os = skb_network_header_len(skb);
2012 
2013 	fcb->flags = flags;
2014 }
2015 
2016 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2017 {
2018 	fcb->flags |= TXFCB_VLN;
2019 	fcb->vlctl = vlan_tx_tag_get(skb);
2020 }
2021 
2022 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2023 			       struct txbd8 *base, int ring_size)
2024 {
2025 	struct txbd8 *new_bd = bdp + stride;
2026 
2027 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2028 }
2029 
2030 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2031 		int ring_size)
2032 {
2033 	return skip_txbd(bdp, 1, base, ring_size);
2034 }
2035 
2036 /* This is called by the kernel when a frame is ready for transmission. */
2037 /* It is pointed to by the dev->hard_start_xmit function pointer */
2038 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2039 {
2040 	struct gfar_private *priv = netdev_priv(dev);
2041 	struct gfar_priv_tx_q *tx_queue = NULL;
2042 	struct netdev_queue *txq;
2043 	struct gfar __iomem *regs = NULL;
2044 	struct txfcb *fcb = NULL;
2045 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2046 	u32 lstatus;
2047 	int i, rq = 0, do_tstamp = 0;
2048 	u32 bufaddr;
2049 	unsigned long flags;
2050 	unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2051 
2052 	/*
2053 	 * TOE=1 frames larger than 2500 bytes may see excess delays
2054 	 * before start of transmission.
2055 	 */
2056 	if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2057 			skb->ip_summed == CHECKSUM_PARTIAL &&
2058 			skb->len > 2500)) {
2059 		int ret;
2060 
2061 		ret = skb_checksum_help(skb);
2062 		if (ret)
2063 			return ret;
2064 	}
2065 
2066 	rq = skb->queue_mapping;
2067 	tx_queue = priv->tx_queue[rq];
2068 	txq = netdev_get_tx_queue(dev, rq);
2069 	base = tx_queue->tx_bd_base;
2070 	regs = tx_queue->grp->regs;
2071 
2072 	/* check if time stamp should be generated */
2073 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074 			priv->hwts_tx_en)) {
2075 		do_tstamp = 1;
2076 		fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2077 	}
2078 
2079 	/* make space for additional header when fcb is needed */
2080 	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2081 			vlan_tx_tag_present(skb) ||
2082 			unlikely(do_tstamp)) &&
2083 			(skb_headroom(skb) < fcb_length)) {
2084 		struct sk_buff *skb_new;
2085 
2086 		skb_new = skb_realloc_headroom(skb, fcb_length);
2087 		if (!skb_new) {
2088 			dev->stats.tx_errors++;
2089 			kfree_skb(skb);
2090 			return NETDEV_TX_OK;
2091 		}
2092 
2093 		/* Steal sock reference for processing TX time stamps */
2094 		swap(skb_new->sk, skb->sk);
2095 		swap(skb_new->destructor, skb->destructor);
2096 		kfree_skb(skb);
2097 		skb = skb_new;
2098 	}
2099 
2100 	/* total number of fragments in the SKB */
2101 	nr_frags = skb_shinfo(skb)->nr_frags;
2102 
2103 	/* calculate the required number of TxBDs for this skb */
2104 	if (unlikely(do_tstamp))
2105 		nr_txbds = nr_frags + 2;
2106 	else
2107 		nr_txbds = nr_frags + 1;
2108 
2109 	/* check if there is space to queue this packet */
2110 	if (nr_txbds > tx_queue->num_txbdfree) {
2111 		/* no space, stop the queue */
2112 		netif_tx_stop_queue(txq);
2113 		dev->stats.tx_fifo_errors++;
2114 		return NETDEV_TX_BUSY;
2115 	}
2116 
2117 	/* Update transmit stats */
2118 	tx_queue->stats.tx_bytes += skb->len;
2119 	tx_queue->stats.tx_packets++;
2120 
2121 	txbdp = txbdp_start = tx_queue->cur_tx;
2122 	lstatus = txbdp->lstatus;
2123 
2124 	/* Time stamp insertion requires one additional TxBD */
2125 	if (unlikely(do_tstamp))
2126 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2127 				tx_queue->tx_ring_size);
2128 
2129 	if (nr_frags == 0) {
2130 		if (unlikely(do_tstamp))
2131 			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2132 					TXBD_INTERRUPT);
2133 		else
2134 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2135 	} else {
2136 		/* Place the fragment addresses and lengths into the TxBDs */
2137 		for (i = 0; i < nr_frags; i++) {
2138 			/* Point at the next BD, wrapping as needed */
2139 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2140 
2141 			length = skb_shinfo(skb)->frags[i].size;
2142 
2143 			lstatus = txbdp->lstatus | length |
2144 				BD_LFLAG(TXBD_READY);
2145 
2146 			/* Handle the last BD specially */
2147 			if (i == nr_frags - 1)
2148 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2149 
2150 			bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2151 						   &skb_shinfo(skb)->frags[i],
2152 						   0,
2153 						   length,
2154 						   DMA_TO_DEVICE);
2155 
2156 			/* set the TxBD length and buffer pointer */
2157 			txbdp->bufPtr = bufaddr;
2158 			txbdp->lstatus = lstatus;
2159 		}
2160 
2161 		lstatus = txbdp_start->lstatus;
2162 	}
2163 
2164 	/* Add TxPAL between FCB and frame if required */
2165 	if (unlikely(do_tstamp)) {
2166 		skb_push(skb, GMAC_TXPAL_LEN);
2167 		memset(skb->data, 0, GMAC_TXPAL_LEN);
2168 	}
2169 
2170 	/* Set up checksumming */
2171 	if (CHECKSUM_PARTIAL == skb->ip_summed) {
2172 		fcb = gfar_add_fcb(skb);
2173 		/* as specified by errata */
2174 		if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2175 			     && ((unsigned long)fcb % 0x20) > 0x18)) {
2176 			__skb_pull(skb, GMAC_FCB_LEN);
2177 			skb_checksum_help(skb);
2178 		} else {
2179 			lstatus |= BD_LFLAG(TXBD_TOE);
2180 			gfar_tx_checksum(skb, fcb, fcb_length);
2181 		}
2182 	}
2183 
2184 	if (vlan_tx_tag_present(skb)) {
2185 		if (unlikely(NULL == fcb)) {
2186 			fcb = gfar_add_fcb(skb);
2187 			lstatus |= BD_LFLAG(TXBD_TOE);
2188 		}
2189 
2190 		gfar_tx_vlan(skb, fcb);
2191 	}
2192 
2193 	/* Setup tx hardware time stamping if requested */
2194 	if (unlikely(do_tstamp)) {
2195 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2196 		if (fcb == NULL)
2197 			fcb = gfar_add_fcb(skb);
2198 		fcb->ptp = 1;
2199 		lstatus |= BD_LFLAG(TXBD_TOE);
2200 	}
2201 
2202 	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2203 			skb_headlen(skb), DMA_TO_DEVICE);
2204 
2205 	/*
2206 	 * If time stamping is requested one additional TxBD must be set up. The
2207 	 * first TxBD points to the FCB and must have a data length of
2208 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2209 	 * the full frame length.
2210 	 */
2211 	if (unlikely(do_tstamp)) {
2212 		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2213 		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2214 				(skb_headlen(skb) - fcb_length);
2215 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2216 	} else {
2217 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2218 	}
2219 
2220 	/*
2221 	 * We can work in parallel with gfar_clean_tx_ring(), except
2222 	 * when modifying num_txbdfree. Note that we didn't grab the lock
2223 	 * when we were reading the num_txbdfree and checking for available
2224 	 * space, that's because outside of this function it can only grow,
2225 	 * and once we've got needed space, it cannot suddenly disappear.
2226 	 *
2227 	 * The lock also protects us from gfar_error(), which can modify
2228 	 * regs->tstat and thus retrigger the transfers, which is why we
2229 	 * also must grab the lock before setting ready bit for the first
2230 	 * to be transmitted BD.
2231 	 */
2232 	spin_lock_irqsave(&tx_queue->txlock, flags);
2233 
2234 	/*
2235 	 * The powerpc-specific eieio() is used, as wmb() has too strong
2236 	 * semantics (it requires synchronization between cacheable and
2237 	 * uncacheable mappings, which eieio doesn't provide and which we
2238 	 * don't need), thus requiring a more expensive sync instruction.  At
2239 	 * some point, the set of architecture-independent barrier functions
2240 	 * should be expanded to include weaker barriers.
2241 	 */
2242 	eieio();
2243 
2244 	txbdp_start->lstatus = lstatus;
2245 
2246 	eieio(); /* force lstatus write before tx_skbuff */
2247 
2248 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2249 
2250 	/* Update the current skb pointer to the next entry we will use
2251 	 * (wrapping if necessary) */
2252 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2253 		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2254 
2255 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2256 
2257 	/* reduce TxBD free count */
2258 	tx_queue->num_txbdfree -= (nr_txbds);
2259 
2260 	/* If the next BD still needs to be cleaned up, then the bds
2261 	   are full.  We need to tell the kernel to stop sending us stuff. */
2262 	if (!tx_queue->num_txbdfree) {
2263 		netif_tx_stop_queue(txq);
2264 
2265 		dev->stats.tx_fifo_errors++;
2266 	}
2267 
2268 	/* Tell the DMA to go go go */
2269 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2270 
2271 	/* Unlock priv */
2272 	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2273 
2274 	return NETDEV_TX_OK;
2275 }
2276 
2277 /* Stops the kernel queue, and halts the controller */
2278 static int gfar_close(struct net_device *dev)
2279 {
2280 	struct gfar_private *priv = netdev_priv(dev);
2281 
2282 	disable_napi(priv);
2283 
2284 	cancel_work_sync(&priv->reset_task);
2285 	stop_gfar(dev);
2286 
2287 	/* Disconnect from the PHY */
2288 	phy_disconnect(priv->phydev);
2289 	priv->phydev = NULL;
2290 
2291 	netif_tx_stop_all_queues(dev);
2292 
2293 	return 0;
2294 }
2295 
2296 /* Changes the mac address if the controller is not running. */
2297 static int gfar_set_mac_address(struct net_device *dev)
2298 {
2299 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2300 
2301 	return 0;
2302 }
2303 
2304 /* Check if rx parser should be activated */
2305 void gfar_check_rx_parser_mode(struct gfar_private *priv)
2306 {
2307 	struct gfar __iomem *regs;
2308 	u32 tempval;
2309 
2310 	regs = priv->gfargrp[0].regs;
2311 
2312 	tempval = gfar_read(&regs->rctrl);
2313 	/* If parse is no longer required, then disable parser */
2314 	if (tempval & RCTRL_REQ_PARSER)
2315 		tempval |= RCTRL_PRSDEP_INIT;
2316 	else
2317 		tempval &= ~RCTRL_PRSDEP_INIT;
2318 	gfar_write(&regs->rctrl, tempval);
2319 }
2320 
2321 /* Enables and disables VLAN insertion/extraction */
2322 void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2323 {
2324 	struct gfar_private *priv = netdev_priv(dev);
2325 	struct gfar __iomem *regs = NULL;
2326 	unsigned long flags;
2327 	u32 tempval;
2328 
2329 	regs = priv->gfargrp[0].regs;
2330 	local_irq_save(flags);
2331 	lock_rx_qs(priv);
2332 
2333 	if (features & NETIF_F_HW_VLAN_TX) {
2334 		/* Enable VLAN tag insertion */
2335 		tempval = gfar_read(&regs->tctrl);
2336 		tempval |= TCTRL_VLINS;
2337 		gfar_write(&regs->tctrl, tempval);
2338 	} else {
2339 		/* Disable VLAN tag insertion */
2340 		tempval = gfar_read(&regs->tctrl);
2341 		tempval &= ~TCTRL_VLINS;
2342 		gfar_write(&regs->tctrl, tempval);
2343 	}
2344 
2345 	if (features & NETIF_F_HW_VLAN_RX) {
2346 		/* Enable VLAN tag extraction */
2347 		tempval = gfar_read(&regs->rctrl);
2348 		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2349 		gfar_write(&regs->rctrl, tempval);
2350 	} else {
2351 		/* Disable VLAN tag extraction */
2352 		tempval = gfar_read(&regs->rctrl);
2353 		tempval &= ~RCTRL_VLEX;
2354 		gfar_write(&regs->rctrl, tempval);
2355 
2356 		gfar_check_rx_parser_mode(priv);
2357 	}
2358 
2359 	gfar_change_mtu(dev, dev->mtu);
2360 
2361 	unlock_rx_qs(priv);
2362 	local_irq_restore(flags);
2363 }
2364 
2365 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2366 {
2367 	int tempsize, tempval;
2368 	struct gfar_private *priv = netdev_priv(dev);
2369 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2370 	int oldsize = priv->rx_buffer_size;
2371 	int frame_size = new_mtu + ETH_HLEN;
2372 
2373 	if (gfar_is_vlan_on(priv))
2374 		frame_size += VLAN_HLEN;
2375 
2376 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2377 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2378 		return -EINVAL;
2379 	}
2380 
2381 	if (gfar_uses_fcb(priv))
2382 		frame_size += GMAC_FCB_LEN;
2383 
2384 	frame_size += priv->padding;
2385 
2386 	tempsize =
2387 	    (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2388 	    INCREMENTAL_BUFFER_SIZE;
2389 
2390 	/* Only stop and start the controller if it isn't already
2391 	 * stopped, and we changed something */
2392 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2393 		stop_gfar(dev);
2394 
2395 	priv->rx_buffer_size = tempsize;
2396 
2397 	dev->mtu = new_mtu;
2398 
2399 	gfar_write(&regs->mrblr, priv->rx_buffer_size);
2400 	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2401 
2402 	/* If the mtu is larger than the max size for standard
2403 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
2404 	 * to allow huge frames, and to check the length */
2405 	tempval = gfar_read(&regs->maccfg2);
2406 
2407 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2408 			gfar_has_errata(priv, GFAR_ERRATA_74))
2409 		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2410 	else
2411 		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2412 
2413 	gfar_write(&regs->maccfg2, tempval);
2414 
2415 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2416 		startup_gfar(dev);
2417 
2418 	return 0;
2419 }
2420 
2421 /* gfar_reset_task gets scheduled when a packet has not been
2422  * transmitted after a set amount of time.
2423  * For now, assume that clearing out all the structures, and
2424  * starting over will fix the problem.
2425  */
2426 static void gfar_reset_task(struct work_struct *work)
2427 {
2428 	struct gfar_private *priv = container_of(work, struct gfar_private,
2429 			reset_task);
2430 	struct net_device *dev = priv->ndev;
2431 
2432 	if (dev->flags & IFF_UP) {
2433 		netif_tx_stop_all_queues(dev);
2434 		stop_gfar(dev);
2435 		startup_gfar(dev);
2436 		netif_tx_start_all_queues(dev);
2437 	}
2438 
2439 	netif_tx_schedule_all(dev);
2440 }
2441 
2442 static void gfar_timeout(struct net_device *dev)
2443 {
2444 	struct gfar_private *priv = netdev_priv(dev);
2445 
2446 	dev->stats.tx_errors++;
2447 	schedule_work(&priv->reset_task);
2448 }
2449 
2450 static void gfar_align_skb(struct sk_buff *skb)
2451 {
2452 	/* We need the data buffer to be aligned properly.  We will reserve
2453 	 * as many bytes as needed to align the data properly
2454 	 */
2455 	skb_reserve(skb, RXBUF_ALIGNMENT -
2456 		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2457 }
2458 
2459 /* Interrupt Handler for Transmit complete */
2460 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2461 {
2462 	struct net_device *dev = tx_queue->dev;
2463 	struct gfar_private *priv = netdev_priv(dev);
2464 	struct gfar_priv_rx_q *rx_queue = NULL;
2465 	struct txbd8 *bdp, *next = NULL;
2466 	struct txbd8 *lbdp = NULL;
2467 	struct txbd8 *base = tx_queue->tx_bd_base;
2468 	struct sk_buff *skb;
2469 	int skb_dirtytx;
2470 	int tx_ring_size = tx_queue->tx_ring_size;
2471 	int frags = 0, nr_txbds = 0;
2472 	int i;
2473 	int howmany = 0;
2474 	u32 lstatus;
2475 	size_t buflen;
2476 
2477 	rx_queue = priv->rx_queue[tx_queue->qindex];
2478 	bdp = tx_queue->dirty_tx;
2479 	skb_dirtytx = tx_queue->skb_dirtytx;
2480 
2481 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2482 		unsigned long flags;
2483 
2484 		frags = skb_shinfo(skb)->nr_frags;
2485 
2486 		/*
2487 		 * When time stamping, one additional TxBD must be freed.
2488 		 * Also, we need to dma_unmap_single() the TxPAL.
2489 		 */
2490 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2491 			nr_txbds = frags + 2;
2492 		else
2493 			nr_txbds = frags + 1;
2494 
2495 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2496 
2497 		lstatus = lbdp->lstatus;
2498 
2499 		/* Only clean completed frames */
2500 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2501 				(lstatus & BD_LENGTH_MASK))
2502 			break;
2503 
2504 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2505 			next = next_txbd(bdp, base, tx_ring_size);
2506 			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2507 		} else
2508 			buflen = bdp->length;
2509 
2510 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2511 				buflen, DMA_TO_DEVICE);
2512 
2513 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2514 			struct skb_shared_hwtstamps shhwtstamps;
2515 			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2516 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2517 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2518 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2519 			skb_tstamp_tx(skb, &shhwtstamps);
2520 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2521 			bdp = next;
2522 		}
2523 
2524 		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2525 		bdp = next_txbd(bdp, base, tx_ring_size);
2526 
2527 		for (i = 0; i < frags; i++) {
2528 			dma_unmap_page(&priv->ofdev->dev,
2529 					bdp->bufPtr,
2530 					bdp->length,
2531 					DMA_TO_DEVICE);
2532 			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2533 			bdp = next_txbd(bdp, base, tx_ring_size);
2534 		}
2535 
2536 		/*
2537 		 * If there's room in the queue (limit it to rx_buffer_size)
2538 		 * we add this skb back into the pool, if it's the right size
2539 		 */
2540 		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2541 				skb_recycle_check(skb, priv->rx_buffer_size +
2542 					RXBUF_ALIGNMENT)) {
2543 			gfar_align_skb(skb);
2544 			skb_queue_head(&priv->rx_recycle, skb);
2545 		} else
2546 			dev_kfree_skb_any(skb);
2547 
2548 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2549 
2550 		skb_dirtytx = (skb_dirtytx + 1) &
2551 			TX_RING_MOD_MASK(tx_ring_size);
2552 
2553 		howmany++;
2554 		spin_lock_irqsave(&tx_queue->txlock, flags);
2555 		tx_queue->num_txbdfree += nr_txbds;
2556 		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2557 	}
2558 
2559 	/* If we freed a buffer, we can restart transmission, if necessary */
2560 	if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2561 		netif_wake_subqueue(dev, tx_queue->qindex);
2562 
2563 	/* Update dirty indicators */
2564 	tx_queue->skb_dirtytx = skb_dirtytx;
2565 	tx_queue->dirty_tx = bdp;
2566 
2567 	return howmany;
2568 }
2569 
2570 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2571 {
2572 	unsigned long flags;
2573 
2574 	spin_lock_irqsave(&gfargrp->grplock, flags);
2575 	if (napi_schedule_prep(&gfargrp->napi)) {
2576 		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2577 		__napi_schedule(&gfargrp->napi);
2578 	} else {
2579 		/*
2580 		 * Clear IEVENT, so interrupts aren't called again
2581 		 * because of the packets that have already arrived.
2582 		 */
2583 		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2584 	}
2585 	spin_unlock_irqrestore(&gfargrp->grplock, flags);
2586 
2587 }
2588 
2589 /* Interrupt Handler for Transmit complete */
2590 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2591 {
2592 	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2593 	return IRQ_HANDLED;
2594 }
2595 
2596 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2597 		struct sk_buff *skb)
2598 {
2599 	struct net_device *dev = rx_queue->dev;
2600 	struct gfar_private *priv = netdev_priv(dev);
2601 	dma_addr_t buf;
2602 
2603 	buf = dma_map_single(&priv->ofdev->dev, skb->data,
2604 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2605 	gfar_init_rxbdp(rx_queue, bdp, buf);
2606 }
2607 
2608 static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2609 {
2610 	struct gfar_private *priv = netdev_priv(dev);
2611 	struct sk_buff *skb = NULL;
2612 
2613 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2614 	if (!skb)
2615 		return NULL;
2616 
2617 	gfar_align_skb(skb);
2618 
2619 	return skb;
2620 }
2621 
2622 struct sk_buff * gfar_new_skb(struct net_device *dev)
2623 {
2624 	struct gfar_private *priv = netdev_priv(dev);
2625 	struct sk_buff *skb = NULL;
2626 
2627 	skb = skb_dequeue(&priv->rx_recycle);
2628 	if (!skb)
2629 		skb = gfar_alloc_skb(dev);
2630 
2631 	return skb;
2632 }
2633 
2634 static inline void count_errors(unsigned short status, struct net_device *dev)
2635 {
2636 	struct gfar_private *priv = netdev_priv(dev);
2637 	struct net_device_stats *stats = &dev->stats;
2638 	struct gfar_extra_stats *estats = &priv->extra_stats;
2639 
2640 	/* If the packet was truncated, none of the other errors
2641 	 * matter */
2642 	if (status & RXBD_TRUNCATED) {
2643 		stats->rx_length_errors++;
2644 
2645 		estats->rx_trunc++;
2646 
2647 		return;
2648 	}
2649 	/* Count the errors, if there were any */
2650 	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2651 		stats->rx_length_errors++;
2652 
2653 		if (status & RXBD_LARGE)
2654 			estats->rx_large++;
2655 		else
2656 			estats->rx_short++;
2657 	}
2658 	if (status & RXBD_NONOCTET) {
2659 		stats->rx_frame_errors++;
2660 		estats->rx_nonoctet++;
2661 	}
2662 	if (status & RXBD_CRCERR) {
2663 		estats->rx_crcerr++;
2664 		stats->rx_crc_errors++;
2665 	}
2666 	if (status & RXBD_OVERRUN) {
2667 		estats->rx_overrun++;
2668 		stats->rx_crc_errors++;
2669 	}
2670 }
2671 
2672 irqreturn_t gfar_receive(int irq, void *grp_id)
2673 {
2674 	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2675 	return IRQ_HANDLED;
2676 }
2677 
2678 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2679 {
2680 	/* If valid headers were found, and valid sums
2681 	 * were verified, then we tell the kernel that no
2682 	 * checksumming is necessary.  Otherwise, it is */
2683 	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2684 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2685 	else
2686 		skb_checksum_none_assert(skb);
2687 }
2688 
2689 
2690 /* gfar_process_frame() -- handle one incoming packet if skb
2691  * isn't NULL.  */
2692 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2693 			      int amount_pull)
2694 {
2695 	struct gfar_private *priv = netdev_priv(dev);
2696 	struct rxfcb *fcb = NULL;
2697 
2698 	int ret;
2699 
2700 	/* fcb is at the beginning if exists */
2701 	fcb = (struct rxfcb *)skb->data;
2702 
2703 	/* Remove the FCB from the skb */
2704 	/* Remove the padded bytes, if there are any */
2705 	if (amount_pull) {
2706 		skb_record_rx_queue(skb, fcb->rq);
2707 		skb_pull(skb, amount_pull);
2708 	}
2709 
2710 	/* Get receive timestamp from the skb */
2711 	if (priv->hwts_rx_en) {
2712 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2713 		u64 *ns = (u64 *) skb->data;
2714 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2715 		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2716 	}
2717 
2718 	if (priv->padding)
2719 		skb_pull(skb, priv->padding);
2720 
2721 	if (dev->features & NETIF_F_RXCSUM)
2722 		gfar_rx_checksum(skb, fcb);
2723 
2724 	/* Tell the skb what kind of packet this is */
2725 	skb->protocol = eth_type_trans(skb, dev);
2726 
2727 	/*
2728 	 * There's need to check for NETIF_F_HW_VLAN_RX here.
2729 	 * Even if vlan rx accel is disabled, on some chips
2730 	 * RXFCB_VLN is pseudo randomly set.
2731 	 */
2732 	if (dev->features & NETIF_F_HW_VLAN_RX &&
2733 	    fcb->flags & RXFCB_VLN)
2734 		__vlan_hwaccel_put_tag(skb, fcb->vlctl);
2735 
2736 	/* Send the packet up the stack */
2737 	ret = netif_receive_skb(skb);
2738 
2739 	if (NET_RX_DROP == ret)
2740 		priv->extra_stats.kernel_dropped++;
2741 
2742 	return 0;
2743 }
2744 
2745 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2746  *   until the budget/quota has been reached. Returns the number
2747  *   of frames handled
2748  */
2749 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2750 {
2751 	struct net_device *dev = rx_queue->dev;
2752 	struct rxbd8 *bdp, *base;
2753 	struct sk_buff *skb;
2754 	int pkt_len;
2755 	int amount_pull;
2756 	int howmany = 0;
2757 	struct gfar_private *priv = netdev_priv(dev);
2758 
2759 	/* Get the first full descriptor */
2760 	bdp = rx_queue->cur_rx;
2761 	base = rx_queue->rx_bd_base;
2762 
2763 	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2764 
2765 	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2766 		struct sk_buff *newskb;
2767 		rmb();
2768 
2769 		/* Add another skb for the future */
2770 		newskb = gfar_new_skb(dev);
2771 
2772 		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2773 
2774 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2775 				priv->rx_buffer_size, DMA_FROM_DEVICE);
2776 
2777 		if (unlikely(!(bdp->status & RXBD_ERR) &&
2778 				bdp->length > priv->rx_buffer_size))
2779 			bdp->status = RXBD_LARGE;
2780 
2781 		/* We drop the frame if we failed to allocate a new buffer */
2782 		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2783 				 bdp->status & RXBD_ERR)) {
2784 			count_errors(bdp->status, dev);
2785 
2786 			if (unlikely(!newskb))
2787 				newskb = skb;
2788 			else if (skb)
2789 				skb_queue_head(&priv->rx_recycle, skb);
2790 		} else {
2791 			/* Increment the number of packets */
2792 			rx_queue->stats.rx_packets++;
2793 			howmany++;
2794 
2795 			if (likely(skb)) {
2796 				pkt_len = bdp->length - ETH_FCS_LEN;
2797 				/* Remove the FCS from the packet length */
2798 				skb_put(skb, pkt_len);
2799 				rx_queue->stats.rx_bytes += pkt_len;
2800 				skb_record_rx_queue(skb, rx_queue->qindex);
2801 				gfar_process_frame(dev, skb, amount_pull);
2802 
2803 			} else {
2804 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2805 				rx_queue->stats.rx_dropped++;
2806 				priv->extra_stats.rx_skbmissing++;
2807 			}
2808 
2809 		}
2810 
2811 		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2812 
2813 		/* Setup the new bdp */
2814 		gfar_new_rxbdp(rx_queue, bdp, newskb);
2815 
2816 		/* Update to the next pointer */
2817 		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2818 
2819 		/* update to point at the next skb */
2820 		rx_queue->skb_currx =
2821 		    (rx_queue->skb_currx + 1) &
2822 		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2823 	}
2824 
2825 	/* Update the current rxbd pointer to be the next one */
2826 	rx_queue->cur_rx = bdp;
2827 
2828 	return howmany;
2829 }
2830 
2831 static int gfar_poll(struct napi_struct *napi, int budget)
2832 {
2833 	struct gfar_priv_grp *gfargrp = container_of(napi,
2834 			struct gfar_priv_grp, napi);
2835 	struct gfar_private *priv = gfargrp->priv;
2836 	struct gfar __iomem *regs = gfargrp->regs;
2837 	struct gfar_priv_tx_q *tx_queue = NULL;
2838 	struct gfar_priv_rx_q *rx_queue = NULL;
2839 	int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2840 	int tx_cleaned = 0, i, left_over_budget = budget;
2841 	unsigned long serviced_queues = 0;
2842 	int num_queues = 0;
2843 
2844 	num_queues = gfargrp->num_rx_queues;
2845 	budget_per_queue = budget/num_queues;
2846 
2847 	/* Clear IEVENT, so interrupts aren't called again
2848 	 * because of the packets that have already arrived */
2849 	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2850 
2851 	while (num_queues && left_over_budget) {
2852 
2853 		budget_per_queue = left_over_budget/num_queues;
2854 		left_over_budget = 0;
2855 
2856 		for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2857 			if (test_bit(i, &serviced_queues))
2858 				continue;
2859 			rx_queue = priv->rx_queue[i];
2860 			tx_queue = priv->tx_queue[rx_queue->qindex];
2861 
2862 			tx_cleaned += gfar_clean_tx_ring(tx_queue);
2863 			rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2864 							budget_per_queue);
2865 			rx_cleaned += rx_cleaned_per_queue;
2866 			if(rx_cleaned_per_queue < budget_per_queue) {
2867 				left_over_budget = left_over_budget +
2868 					(budget_per_queue - rx_cleaned_per_queue);
2869 				set_bit(i, &serviced_queues);
2870 				num_queues--;
2871 			}
2872 		}
2873 	}
2874 
2875 	if (tx_cleaned)
2876 		return budget;
2877 
2878 	if (rx_cleaned < budget) {
2879 		napi_complete(napi);
2880 
2881 		/* Clear the halt bit in RSTAT */
2882 		gfar_write(&regs->rstat, gfargrp->rstat);
2883 
2884 		gfar_write(&regs->imask, IMASK_DEFAULT);
2885 
2886 		/* If we are coalescing interrupts, update the timer */
2887 		/* Otherwise, clear it */
2888 		gfar_configure_coalescing(priv,
2889 				gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2890 	}
2891 
2892 	return rx_cleaned;
2893 }
2894 
2895 #ifdef CONFIG_NET_POLL_CONTROLLER
2896 /*
2897  * Polling 'interrupt' - used by things like netconsole to send skbs
2898  * without having to re-enable interrupts. It's not called while
2899  * the interrupt routine is executing.
2900  */
2901 static void gfar_netpoll(struct net_device *dev)
2902 {
2903 	struct gfar_private *priv = netdev_priv(dev);
2904 	int i = 0;
2905 
2906 	/* If the device has multiple interrupts, run tx/rx */
2907 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2908 		for (i = 0; i < priv->num_grps; i++) {
2909 			disable_irq(priv->gfargrp[i].interruptTransmit);
2910 			disable_irq(priv->gfargrp[i].interruptReceive);
2911 			disable_irq(priv->gfargrp[i].interruptError);
2912 			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2913 						&priv->gfargrp[i]);
2914 			enable_irq(priv->gfargrp[i].interruptError);
2915 			enable_irq(priv->gfargrp[i].interruptReceive);
2916 			enable_irq(priv->gfargrp[i].interruptTransmit);
2917 		}
2918 	} else {
2919 		for (i = 0; i < priv->num_grps; i++) {
2920 			disable_irq(priv->gfargrp[i].interruptTransmit);
2921 			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2922 						&priv->gfargrp[i]);
2923 			enable_irq(priv->gfargrp[i].interruptTransmit);
2924 		}
2925 	}
2926 }
2927 #endif
2928 
2929 /* The interrupt handler for devices with one interrupt */
2930 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2931 {
2932 	struct gfar_priv_grp *gfargrp = grp_id;
2933 
2934 	/* Save ievent for future reference */
2935 	u32 events = gfar_read(&gfargrp->regs->ievent);
2936 
2937 	/* Check for reception */
2938 	if (events & IEVENT_RX_MASK)
2939 		gfar_receive(irq, grp_id);
2940 
2941 	/* Check for transmit completion */
2942 	if (events & IEVENT_TX_MASK)
2943 		gfar_transmit(irq, grp_id);
2944 
2945 	/* Check for errors */
2946 	if (events & IEVENT_ERR_MASK)
2947 		gfar_error(irq, grp_id);
2948 
2949 	return IRQ_HANDLED;
2950 }
2951 
2952 /* Called every time the controller might need to be made
2953  * aware of new link state.  The PHY code conveys this
2954  * information through variables in the phydev structure, and this
2955  * function converts those variables into the appropriate
2956  * register values, and can bring down the device if needed.
2957  */
2958 static void adjust_link(struct net_device *dev)
2959 {
2960 	struct gfar_private *priv = netdev_priv(dev);
2961 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2962 	unsigned long flags;
2963 	struct phy_device *phydev = priv->phydev;
2964 	int new_state = 0;
2965 
2966 	local_irq_save(flags);
2967 	lock_tx_qs(priv);
2968 
2969 	if (phydev->link) {
2970 		u32 tempval = gfar_read(&regs->maccfg2);
2971 		u32 ecntrl = gfar_read(&regs->ecntrl);
2972 
2973 		/* Now we make sure that we can be in full duplex mode.
2974 		 * If not, we operate in half-duplex mode. */
2975 		if (phydev->duplex != priv->oldduplex) {
2976 			new_state = 1;
2977 			if (!(phydev->duplex))
2978 				tempval &= ~(MACCFG2_FULL_DUPLEX);
2979 			else
2980 				tempval |= MACCFG2_FULL_DUPLEX;
2981 
2982 			priv->oldduplex = phydev->duplex;
2983 		}
2984 
2985 		if (phydev->speed != priv->oldspeed) {
2986 			new_state = 1;
2987 			switch (phydev->speed) {
2988 			case 1000:
2989 				tempval =
2990 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2991 
2992 				ecntrl &= ~(ECNTRL_R100);
2993 				break;
2994 			case 100:
2995 			case 10:
2996 				tempval =
2997 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2998 
2999 				/* Reduced mode distinguishes
3000 				 * between 10 and 100 */
3001 				if (phydev->speed == SPEED_100)
3002 					ecntrl |= ECNTRL_R100;
3003 				else
3004 					ecntrl &= ~(ECNTRL_R100);
3005 				break;
3006 			default:
3007 				netif_warn(priv, link, dev,
3008 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3009 					   phydev->speed);
3010 				break;
3011 			}
3012 
3013 			priv->oldspeed = phydev->speed;
3014 		}
3015 
3016 		gfar_write(&regs->maccfg2, tempval);
3017 		gfar_write(&regs->ecntrl, ecntrl);
3018 
3019 		if (!priv->oldlink) {
3020 			new_state = 1;
3021 			priv->oldlink = 1;
3022 		}
3023 	} else if (priv->oldlink) {
3024 		new_state = 1;
3025 		priv->oldlink = 0;
3026 		priv->oldspeed = 0;
3027 		priv->oldduplex = -1;
3028 	}
3029 
3030 	if (new_state && netif_msg_link(priv))
3031 		phy_print_status(phydev);
3032 	unlock_tx_qs(priv);
3033 	local_irq_restore(flags);
3034 }
3035 
3036 /* Update the hash table based on the current list of multicast
3037  * addresses we subscribe to.  Also, change the promiscuity of
3038  * the device based on the flags (this function is called
3039  * whenever dev->flags is changed */
3040 static void gfar_set_multi(struct net_device *dev)
3041 {
3042 	struct netdev_hw_addr *ha;
3043 	struct gfar_private *priv = netdev_priv(dev);
3044 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3045 	u32 tempval;
3046 
3047 	if (dev->flags & IFF_PROMISC) {
3048 		/* Set RCTRL to PROM */
3049 		tempval = gfar_read(&regs->rctrl);
3050 		tempval |= RCTRL_PROM;
3051 		gfar_write(&regs->rctrl, tempval);
3052 	} else {
3053 		/* Set RCTRL to not PROM */
3054 		tempval = gfar_read(&regs->rctrl);
3055 		tempval &= ~(RCTRL_PROM);
3056 		gfar_write(&regs->rctrl, tempval);
3057 	}
3058 
3059 	if (dev->flags & IFF_ALLMULTI) {
3060 		/* Set the hash to rx all multicast frames */
3061 		gfar_write(&regs->igaddr0, 0xffffffff);
3062 		gfar_write(&regs->igaddr1, 0xffffffff);
3063 		gfar_write(&regs->igaddr2, 0xffffffff);
3064 		gfar_write(&regs->igaddr3, 0xffffffff);
3065 		gfar_write(&regs->igaddr4, 0xffffffff);
3066 		gfar_write(&regs->igaddr5, 0xffffffff);
3067 		gfar_write(&regs->igaddr6, 0xffffffff);
3068 		gfar_write(&regs->igaddr7, 0xffffffff);
3069 		gfar_write(&regs->gaddr0, 0xffffffff);
3070 		gfar_write(&regs->gaddr1, 0xffffffff);
3071 		gfar_write(&regs->gaddr2, 0xffffffff);
3072 		gfar_write(&regs->gaddr3, 0xffffffff);
3073 		gfar_write(&regs->gaddr4, 0xffffffff);
3074 		gfar_write(&regs->gaddr5, 0xffffffff);
3075 		gfar_write(&regs->gaddr6, 0xffffffff);
3076 		gfar_write(&regs->gaddr7, 0xffffffff);
3077 	} else {
3078 		int em_num;
3079 		int idx;
3080 
3081 		/* zero out the hash */
3082 		gfar_write(&regs->igaddr0, 0x0);
3083 		gfar_write(&regs->igaddr1, 0x0);
3084 		gfar_write(&regs->igaddr2, 0x0);
3085 		gfar_write(&regs->igaddr3, 0x0);
3086 		gfar_write(&regs->igaddr4, 0x0);
3087 		gfar_write(&regs->igaddr5, 0x0);
3088 		gfar_write(&regs->igaddr6, 0x0);
3089 		gfar_write(&regs->igaddr7, 0x0);
3090 		gfar_write(&regs->gaddr0, 0x0);
3091 		gfar_write(&regs->gaddr1, 0x0);
3092 		gfar_write(&regs->gaddr2, 0x0);
3093 		gfar_write(&regs->gaddr3, 0x0);
3094 		gfar_write(&regs->gaddr4, 0x0);
3095 		gfar_write(&regs->gaddr5, 0x0);
3096 		gfar_write(&regs->gaddr6, 0x0);
3097 		gfar_write(&regs->gaddr7, 0x0);
3098 
3099 		/* If we have extended hash tables, we need to
3100 		 * clear the exact match registers to prepare for
3101 		 * setting them */
3102 		if (priv->extended_hash) {
3103 			em_num = GFAR_EM_NUM + 1;
3104 			gfar_clear_exact_match(dev);
3105 			idx = 1;
3106 		} else {
3107 			idx = 0;
3108 			em_num = 0;
3109 		}
3110 
3111 		if (netdev_mc_empty(dev))
3112 			return;
3113 
3114 		/* Parse the list, and set the appropriate bits */
3115 		netdev_for_each_mc_addr(ha, dev) {
3116 			if (idx < em_num) {
3117 				gfar_set_mac_for_addr(dev, idx, ha->addr);
3118 				idx++;
3119 			} else
3120 				gfar_set_hash_for_addr(dev, ha->addr);
3121 		}
3122 	}
3123 }
3124 
3125 
3126 /* Clears each of the exact match registers to zero, so they
3127  * don't interfere with normal reception */
3128 static void gfar_clear_exact_match(struct net_device *dev)
3129 {
3130 	int idx;
3131 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3132 
3133 	for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3134 		gfar_set_mac_for_addr(dev, idx, zero_arr);
3135 }
3136 
3137 /* Set the appropriate hash bit for the given addr */
3138 /* The algorithm works like so:
3139  * 1) Take the Destination Address (ie the multicast address), and
3140  * do a CRC on it (little endian), and reverse the bits of the
3141  * result.
3142  * 2) Use the 8 most significant bits as a hash into a 256-entry
3143  * table.  The table is controlled through 8 32-bit registers:
3144  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3145  * gaddr7.  This means that the 3 most significant bits in the
3146  * hash index which gaddr register to use, and the 5 other bits
3147  * indicate which bit (assuming an IBM numbering scheme, which
3148  * for PowerPC (tm) is usually the case) in the register holds
3149  * the entry. */
3150 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3151 {
3152 	u32 tempval;
3153 	struct gfar_private *priv = netdev_priv(dev);
3154 	u32 result = ether_crc(ETH_ALEN, addr);
3155 	int width = priv->hash_width;
3156 	u8 whichbit = (result >> (32 - width)) & 0x1f;
3157 	u8 whichreg = result >> (32 - width + 5);
3158 	u32 value = (1 << (31-whichbit));
3159 
3160 	tempval = gfar_read(priv->hash_regs[whichreg]);
3161 	tempval |= value;
3162 	gfar_write(priv->hash_regs[whichreg], tempval);
3163 }
3164 
3165 
3166 /* There are multiple MAC Address register pairs on some controllers
3167  * This function sets the numth pair to a given address
3168  */
3169 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3170 				  const u8 *addr)
3171 {
3172 	struct gfar_private *priv = netdev_priv(dev);
3173 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3174 	int idx;
3175 	char tmpbuf[ETH_ALEN];
3176 	u32 tempval;
3177 	u32 __iomem *macptr = &regs->macstnaddr1;
3178 
3179 	macptr += num*2;
3180 
3181 	/* Now copy it into the mac registers backwards, cuz */
3182 	/* little endian is silly */
3183 	for (idx = 0; idx < ETH_ALEN; idx++)
3184 		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3185 
3186 	gfar_write(macptr, *((u32 *) (tmpbuf)));
3187 
3188 	tempval = *((u32 *) (tmpbuf + 4));
3189 
3190 	gfar_write(macptr+1, tempval);
3191 }
3192 
3193 /* GFAR error interrupt handler */
3194 static irqreturn_t gfar_error(int irq, void *grp_id)
3195 {
3196 	struct gfar_priv_grp *gfargrp = grp_id;
3197 	struct gfar __iomem *regs = gfargrp->regs;
3198 	struct gfar_private *priv= gfargrp->priv;
3199 	struct net_device *dev = priv->ndev;
3200 
3201 	/* Save ievent for future reference */
3202 	u32 events = gfar_read(&regs->ievent);
3203 
3204 	/* Clear IEVENT */
3205 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3206 
3207 	/* Magic Packet is not an error. */
3208 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3209 	    (events & IEVENT_MAG))
3210 		events &= ~IEVENT_MAG;
3211 
3212 	/* Hmm... */
3213 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3214 		netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3215 			   events, gfar_read(&regs->imask));
3216 
3217 	/* Update the error counters */
3218 	if (events & IEVENT_TXE) {
3219 		dev->stats.tx_errors++;
3220 
3221 		if (events & IEVENT_LC)
3222 			dev->stats.tx_window_errors++;
3223 		if (events & IEVENT_CRL)
3224 			dev->stats.tx_aborted_errors++;
3225 		if (events & IEVENT_XFUN) {
3226 			unsigned long flags;
3227 
3228 			netif_dbg(priv, tx_err, dev,
3229 				  "TX FIFO underrun, packet dropped\n");
3230 			dev->stats.tx_dropped++;
3231 			priv->extra_stats.tx_underrun++;
3232 
3233 			local_irq_save(flags);
3234 			lock_tx_qs(priv);
3235 
3236 			/* Reactivate the Tx Queues */
3237 			gfar_write(&regs->tstat, gfargrp->tstat);
3238 
3239 			unlock_tx_qs(priv);
3240 			local_irq_restore(flags);
3241 		}
3242 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3243 	}
3244 	if (events & IEVENT_BSY) {
3245 		dev->stats.rx_errors++;
3246 		priv->extra_stats.rx_bsy++;
3247 
3248 		gfar_receive(irq, grp_id);
3249 
3250 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3251 			  gfar_read(&regs->rstat));
3252 	}
3253 	if (events & IEVENT_BABR) {
3254 		dev->stats.rx_errors++;
3255 		priv->extra_stats.rx_babr++;
3256 
3257 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3258 	}
3259 	if (events & IEVENT_EBERR) {
3260 		priv->extra_stats.eberr++;
3261 		netif_dbg(priv, rx_err, dev, "bus error\n");
3262 	}
3263 	if (events & IEVENT_RXC)
3264 		netif_dbg(priv, rx_status, dev, "control frame\n");
3265 
3266 	if (events & IEVENT_BABT) {
3267 		priv->extra_stats.tx_babt++;
3268 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3269 	}
3270 	return IRQ_HANDLED;
3271 }
3272 
3273 static struct of_device_id gfar_match[] =
3274 {
3275 	{
3276 		.type = "network",
3277 		.compatible = "gianfar",
3278 	},
3279 	{
3280 		.compatible = "fsl,etsec2",
3281 	},
3282 	{},
3283 };
3284 MODULE_DEVICE_TABLE(of, gfar_match);
3285 
3286 /* Structure for a device driver */
3287 static struct platform_driver gfar_driver = {
3288 	.driver = {
3289 		.name = "fsl-gianfar",
3290 		.owner = THIS_MODULE,
3291 		.pm = GFAR_PM_OPS,
3292 		.of_match_table = gfar_match,
3293 	},
3294 	.probe = gfar_probe,
3295 	.remove = gfar_remove,
3296 };
3297 
3298 module_platform_driver(gfar_driver);
3299