xref: /linux/drivers/net/ethernet/freescale/fec_main.c (revision 6084a6e23c971ef703229ee1aec68d01688578d6)
1 /*
2  * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3  * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4  *
5  * Right now, I am very wasteful with the buffers.  I allocate memory
6  * pages and then divide them into 2K frame buffers.  This way I know I
7  * have buffers large enough to hold one frame within one buffer descriptor.
8  * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9  * will be much more memory efficient and will easily handle lots of
10  * small packets.
11  *
12  * Much better multiple PHY support by Magnus Damm.
13  * Copyright (c) 2000 Ericsson Radio Systems AB.
14  *
15  * Support for FEC controller of ColdFire processors.
16  * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17  *
18  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19  * Copyright (c) 2004-2006 Macq Electronique SA.
20  *
21  * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/ptrace.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/in.h>
37 #include <linux/ip.h>
38 #include <net/ip.h>
39 #include <net/tso.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 #include <linux/bitops.h>
46 #include <linux/io.h>
47 #include <linux/irq.h>
48 #include <linux/clk.h>
49 #include <linux/platform_device.h>
50 #include <linux/phy.h>
51 #include <linux/fec.h>
52 #include <linux/of.h>
53 #include <linux/of_device.h>
54 #include <linux/of_gpio.h>
55 #include <linux/of_net.h>
56 #include <linux/regulator/consumer.h>
57 #include <linux/if_vlan.h>
58 #include <linux/pinctrl/consumer.h>
59 
60 #include <asm/cacheflush.h>
61 
62 #include "fec.h"
63 
64 static void set_multicast_list(struct net_device *ndev);
65 
66 #if defined(CONFIG_ARM)
67 #define FEC_ALIGNMENT	0xf
68 #else
69 #define FEC_ALIGNMENT	0x3
70 #endif
71 
72 #define DRIVER_NAME	"fec"
73 
74 /* Pause frame feild and FIFO threshold */
75 #define FEC_ENET_FCE	(1 << 5)
76 #define FEC_ENET_RSEM_V	0x84
77 #define FEC_ENET_RSFL_V	16
78 #define FEC_ENET_RAEM_V	0x8
79 #define FEC_ENET_RAFL_V	0x8
80 #define FEC_ENET_OPD_V	0xFFF0
81 
82 /* Controller is ENET-MAC */
83 #define FEC_QUIRK_ENET_MAC		(1 << 0)
84 /* Controller needs driver to swap frame */
85 #define FEC_QUIRK_SWAP_FRAME		(1 << 1)
86 /* Controller uses gasket */
87 #define FEC_QUIRK_USE_GASKET		(1 << 2)
88 /* Controller has GBIT support */
89 #define FEC_QUIRK_HAS_GBIT		(1 << 3)
90 /* Controller has extend desc buffer */
91 #define FEC_QUIRK_HAS_BUFDESC_EX	(1 << 4)
92 /* Controller has hardware checksum support */
93 #define FEC_QUIRK_HAS_CSUM		(1 << 5)
94 /* Controller has hardware vlan support */
95 #define FEC_QUIRK_HAS_VLAN		(1 << 6)
96 /* ENET IP errata ERR006358
97  *
98  * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
99  * detected as not set during a prior frame transmission, then the
100  * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
101  * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
102  * frames not being transmitted until there is a 0-to-1 transition on
103  * ENET_TDAR[TDAR].
104  */
105 #define FEC_QUIRK_ERR006358            (1 << 7)
106 
107 static struct platform_device_id fec_devtype[] = {
108 	{
109 		/* keep it for coldfire */
110 		.name = DRIVER_NAME,
111 		.driver_data = 0,
112 	}, {
113 		.name = "imx25-fec",
114 		.driver_data = FEC_QUIRK_USE_GASKET,
115 	}, {
116 		.name = "imx27-fec",
117 		.driver_data = 0,
118 	}, {
119 		.name = "imx28-fec",
120 		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
121 	}, {
122 		.name = "imx6q-fec",
123 		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
124 				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
125 				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
126 	}, {
127 		.name = "mvf600-fec",
128 		.driver_data = FEC_QUIRK_ENET_MAC,
129 	}, {
130 		/* sentinel */
131 	}
132 };
133 MODULE_DEVICE_TABLE(platform, fec_devtype);
134 
135 enum imx_fec_type {
136 	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
137 	IMX27_FEC,	/* runs on i.mx27/35/51 */
138 	IMX28_FEC,
139 	IMX6Q_FEC,
140 	MVF600_FEC,
141 };
142 
143 static const struct of_device_id fec_dt_ids[] = {
144 	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
145 	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
146 	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
147 	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
148 	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
149 	{ /* sentinel */ }
150 };
151 MODULE_DEVICE_TABLE(of, fec_dt_ids);
152 
153 static unsigned char macaddr[ETH_ALEN];
154 module_param_array(macaddr, byte, NULL, 0);
155 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
156 
157 #if defined(CONFIG_M5272)
158 /*
159  * Some hardware gets it MAC address out of local flash memory.
160  * if this is non-zero then assume it is the address to get MAC from.
161  */
162 #if defined(CONFIG_NETtel)
163 #define	FEC_FLASHMAC	0xf0006006
164 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
165 #define	FEC_FLASHMAC	0xf0006000
166 #elif defined(CONFIG_CANCam)
167 #define	FEC_FLASHMAC	0xf0020000
168 #elif defined (CONFIG_M5272C3)
169 #define	FEC_FLASHMAC	(0xffe04000 + 4)
170 #elif defined(CONFIG_MOD5272)
171 #define FEC_FLASHMAC	0xffc0406b
172 #else
173 #define	FEC_FLASHMAC	0
174 #endif
175 #endif /* CONFIG_M5272 */
176 
177 /* Interrupt events/masks. */
178 #define FEC_ENET_HBERR	((uint)0x80000000)	/* Heartbeat error */
179 #define FEC_ENET_BABR	((uint)0x40000000)	/* Babbling receiver */
180 #define FEC_ENET_BABT	((uint)0x20000000)	/* Babbling transmitter */
181 #define FEC_ENET_GRA	((uint)0x10000000)	/* Graceful stop complete */
182 #define FEC_ENET_TXF	((uint)0x08000000)	/* Full frame transmitted */
183 #define FEC_ENET_TXB	((uint)0x04000000)	/* A buffer was transmitted */
184 #define FEC_ENET_RXF	((uint)0x02000000)	/* Full frame received */
185 #define FEC_ENET_RXB	((uint)0x01000000)	/* A buffer was received */
186 #define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
187 #define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
188 
189 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
190 #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
191 
192 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
193  */
194 #define PKT_MAXBUF_SIZE		1522
195 #define PKT_MINBUF_SIZE		64
196 #define PKT_MAXBLR_SIZE		1536
197 
198 /* FEC receive acceleration */
199 #define FEC_RACC_IPDIS		(1 << 1)
200 #define FEC_RACC_PRODIS		(1 << 2)
201 #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
202 
203 /*
204  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
205  * size bits. Other FEC hardware does not, so we need to take that into
206  * account when setting it.
207  */
208 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
209     defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
210 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
211 #else
212 #define	OPT_FRAME_SIZE	0
213 #endif
214 
215 /* FEC MII MMFR bits definition */
216 #define FEC_MMFR_ST		(1 << 30)
217 #define FEC_MMFR_OP_READ	(2 << 28)
218 #define FEC_MMFR_OP_WRITE	(1 << 28)
219 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
220 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
221 #define FEC_MMFR_TA		(2 << 16)
222 #define FEC_MMFR_DATA(v)	(v & 0xffff)
223 
224 #define FEC_MII_TIMEOUT		30000 /* us */
225 
226 /* Transmitter timeout */
227 #define TX_TIMEOUT (2 * HZ)
228 
229 #define FEC_PAUSE_FLAG_AUTONEG	0x1
230 #define FEC_PAUSE_FLAG_ENABLE	0x2
231 
232 #define TSO_HEADER_SIZE		128
233 /* Max number of allowed TCP segments for software TSO */
234 #define FEC_MAX_TSO_SEGS	100
235 #define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
236 
237 #define IS_TSO_HEADER(txq, addr) \
238 	((addr >= txq->tso_hdrs_dma) && \
239 	(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
240 
241 static int mii_cnt;
242 
243 static inline
244 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
245 {
246 	struct bufdesc *new_bd = bdp + 1;
247 	struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
248 	struct bufdesc_ex *ex_base;
249 	struct bufdesc *base;
250 	int ring_size;
251 
252 	if (bdp >= fep->tx_bd_base) {
253 		base = fep->tx_bd_base;
254 		ring_size = fep->tx_ring_size;
255 		ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
256 	} else {
257 		base = fep->rx_bd_base;
258 		ring_size = fep->rx_ring_size;
259 		ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
260 	}
261 
262 	if (fep->bufdesc_ex)
263 		return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
264 			ex_base : ex_new_bd);
265 	else
266 		return (new_bd >= (base + ring_size)) ?
267 			base : new_bd;
268 }
269 
270 static inline
271 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
272 {
273 	struct bufdesc *new_bd = bdp - 1;
274 	struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
275 	struct bufdesc_ex *ex_base;
276 	struct bufdesc *base;
277 	int ring_size;
278 
279 	if (bdp >= fep->tx_bd_base) {
280 		base = fep->tx_bd_base;
281 		ring_size = fep->tx_ring_size;
282 		ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
283 	} else {
284 		base = fep->rx_bd_base;
285 		ring_size = fep->rx_ring_size;
286 		ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
287 	}
288 
289 	if (fep->bufdesc_ex)
290 		return (struct bufdesc *)((ex_new_bd < ex_base) ?
291 			(ex_new_bd + ring_size) : ex_new_bd);
292 	else
293 		return (new_bd < base) ? (new_bd + ring_size) : new_bd;
294 }
295 
296 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
297 				struct fec_enet_private *fep)
298 {
299 	return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
300 }
301 
302 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
303 {
304 	int entries;
305 
306 	entries = ((const char *)fep->dirty_tx -
307 			(const char *)fep->cur_tx) / fep->bufdesc_size - 1;
308 
309 	return entries > 0 ? entries : entries + fep->tx_ring_size;
310 }
311 
312 static void *swap_buffer(void *bufaddr, int len)
313 {
314 	int i;
315 	unsigned int *buf = bufaddr;
316 
317 	for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
318 		*buf = cpu_to_be32(*buf);
319 
320 	return bufaddr;
321 }
322 
323 static int
324 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
325 {
326 	/* Only run for packets requiring a checksum. */
327 	if (skb->ip_summed != CHECKSUM_PARTIAL)
328 		return 0;
329 
330 	if (unlikely(skb_cow_head(skb, 0)))
331 		return -1;
332 
333 	ip_hdr(skb)->check = 0;
334 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
335 
336 	return 0;
337 }
338 
339 static void
340 fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
341 {
342 	const struct platform_device_id *id_entry =
343 				platform_get_device_id(fep->pdev);
344 	struct bufdesc *bdp_pre;
345 
346 	bdp_pre = fec_enet_get_prevdesc(bdp, fep);
347 	if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
348 	    !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
349 		fep->delay_work.trig_tx = true;
350 		schedule_delayed_work(&(fep->delay_work.delay_work),
351 					msecs_to_jiffies(1));
352 	}
353 }
354 
355 static int
356 fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
357 {
358 	struct fec_enet_private *fep = netdev_priv(ndev);
359 	const struct platform_device_id *id_entry =
360 				platform_get_device_id(fep->pdev);
361 	struct bufdesc *bdp = fep->cur_tx;
362 	struct bufdesc_ex *ebdp;
363 	int nr_frags = skb_shinfo(skb)->nr_frags;
364 	int frag, frag_len;
365 	unsigned short status;
366 	unsigned int estatus = 0;
367 	skb_frag_t *this_frag;
368 	unsigned int index;
369 	void *bufaddr;
370 	int i;
371 
372 	for (frag = 0; frag < nr_frags; frag++) {
373 		this_frag = &skb_shinfo(skb)->frags[frag];
374 		bdp = fec_enet_get_nextdesc(bdp, fep);
375 		ebdp = (struct bufdesc_ex *)bdp;
376 
377 		status = bdp->cbd_sc;
378 		status &= ~BD_ENET_TX_STATS;
379 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
380 		frag_len = skb_shinfo(skb)->frags[frag].size;
381 
382 		/* Handle the last BD specially */
383 		if (frag == nr_frags - 1) {
384 			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
385 			if (fep->bufdesc_ex) {
386 				estatus |= BD_ENET_TX_INT;
387 				if (unlikely(skb_shinfo(skb)->tx_flags &
388 					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
389 					estatus |= BD_ENET_TX_TS;
390 			}
391 		}
392 
393 		if (fep->bufdesc_ex) {
394 			if (skb->ip_summed == CHECKSUM_PARTIAL)
395 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
396 			ebdp->cbd_bdu = 0;
397 			ebdp->cbd_esc = estatus;
398 		}
399 
400 		bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
401 
402 		index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
403 		if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
404 			id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
405 			memcpy(fep->tx_bounce[index], bufaddr, frag_len);
406 			bufaddr = fep->tx_bounce[index];
407 
408 			if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
409 				swap_buffer(bufaddr, frag_len);
410 		}
411 
412 		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
413 						frag_len, DMA_TO_DEVICE);
414 		if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
415 			dev_kfree_skb_any(skb);
416 			if (net_ratelimit())
417 				netdev_err(ndev, "Tx DMA memory map failed\n");
418 			goto dma_mapping_error;
419 		}
420 
421 		bdp->cbd_datlen = frag_len;
422 		bdp->cbd_sc = status;
423 	}
424 
425 	fep->cur_tx = bdp;
426 
427 	return 0;
428 
429 dma_mapping_error:
430 	bdp = fep->cur_tx;
431 	for (i = 0; i < frag; i++) {
432 		bdp = fec_enet_get_nextdesc(bdp, fep);
433 		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
434 				bdp->cbd_datlen, DMA_TO_DEVICE);
435 	}
436 	return NETDEV_TX_OK;
437 }
438 
439 static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
440 {
441 	struct fec_enet_private *fep = netdev_priv(ndev);
442 	const struct platform_device_id *id_entry =
443 				platform_get_device_id(fep->pdev);
444 	int nr_frags = skb_shinfo(skb)->nr_frags;
445 	struct bufdesc *bdp, *last_bdp;
446 	void *bufaddr;
447 	unsigned short status;
448 	unsigned short buflen;
449 	unsigned int estatus = 0;
450 	unsigned int index;
451 	int entries_free;
452 	int ret;
453 
454 	entries_free = fec_enet_get_free_txdesc_num(fep);
455 	if (entries_free < MAX_SKB_FRAGS + 1) {
456 		dev_kfree_skb_any(skb);
457 		if (net_ratelimit())
458 			netdev_err(ndev, "NOT enough BD for SG!\n");
459 		return NETDEV_TX_OK;
460 	}
461 
462 	/* Protocol checksum off-load for TCP and UDP. */
463 	if (fec_enet_clear_csum(skb, ndev)) {
464 		dev_kfree_skb_any(skb);
465 		return NETDEV_TX_OK;
466 	}
467 
468 	/* Fill in a Tx ring entry */
469 	bdp = fep->cur_tx;
470 	status = bdp->cbd_sc;
471 	status &= ~BD_ENET_TX_STATS;
472 
473 	/* Set buffer length and buffer pointer */
474 	bufaddr = skb->data;
475 	buflen = skb_headlen(skb);
476 
477 	index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
478 	if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
479 		id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
480 		memcpy(fep->tx_bounce[index], skb->data, buflen);
481 		bufaddr = fep->tx_bounce[index];
482 
483 		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
484 			swap_buffer(bufaddr, buflen);
485 	}
486 
487 	/* Push the data cache so the CPM does not get stale memory
488 	 * data.
489 	 */
490 	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
491 					buflen, DMA_TO_DEVICE);
492 	if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
493 		dev_kfree_skb_any(skb);
494 		if (net_ratelimit())
495 			netdev_err(ndev, "Tx DMA memory map failed\n");
496 		return NETDEV_TX_OK;
497 	}
498 
499 	if (nr_frags) {
500 		ret = fec_enet_txq_submit_frag_skb(skb, ndev);
501 		if (ret)
502 			return ret;
503 	} else {
504 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
505 		if (fep->bufdesc_ex) {
506 			estatus = BD_ENET_TX_INT;
507 			if (unlikely(skb_shinfo(skb)->tx_flags &
508 				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
509 				estatus |= BD_ENET_TX_TS;
510 		}
511 	}
512 
513 	if (fep->bufdesc_ex) {
514 
515 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
516 
517 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
518 			fep->hwts_tx_en))
519 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
520 
521 		if (skb->ip_summed == CHECKSUM_PARTIAL)
522 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
523 
524 		ebdp->cbd_bdu = 0;
525 		ebdp->cbd_esc = estatus;
526 	}
527 
528 	last_bdp = fep->cur_tx;
529 	index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
530 	/* Save skb pointer */
531 	fep->tx_skbuff[index] = skb;
532 
533 	bdp->cbd_datlen = buflen;
534 
535 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
536 	 * it's the last BD of the frame, and to put the CRC on the end.
537 	 */
538 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
539 	bdp->cbd_sc = status;
540 
541 	fec_enet_submit_work(bdp, fep);
542 
543 	/* If this was the last BD in the ring, start at the beginning again. */
544 	bdp = fec_enet_get_nextdesc(last_bdp, fep);
545 
546 	skb_tx_timestamp(skb);
547 
548 	fep->cur_tx = bdp;
549 
550 	/* Trigger transmission start */
551 	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
552 
553 	return 0;
554 }
555 
556 static int
557 fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
558 			struct bufdesc *bdp, int index, char *data,
559 			int size, bool last_tcp, bool is_last)
560 {
561 	struct fec_enet_private *fep = netdev_priv(ndev);
562 	const struct platform_device_id *id_entry =
563 				platform_get_device_id(fep->pdev);
564 	struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
565 	unsigned short status;
566 	unsigned int estatus = 0;
567 
568 	status = bdp->cbd_sc;
569 	status &= ~BD_ENET_TX_STATS;
570 
571 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
572 	bdp->cbd_datlen = size;
573 
574 	if (((unsigned long) data) & FEC_ALIGNMENT ||
575 		id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
576 		memcpy(fep->tx_bounce[index], data, size);
577 		data = fep->tx_bounce[index];
578 
579 		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
580 			swap_buffer(data, size);
581 	}
582 
583 	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
584 					size, DMA_TO_DEVICE);
585 	if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
586 		dev_kfree_skb_any(skb);
587 		if (net_ratelimit())
588 			netdev_err(ndev, "Tx DMA memory map failed\n");
589 		return NETDEV_TX_BUSY;
590 	}
591 
592 	if (fep->bufdesc_ex) {
593 		if (skb->ip_summed == CHECKSUM_PARTIAL)
594 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
595 		ebdp->cbd_bdu = 0;
596 		ebdp->cbd_esc = estatus;
597 	}
598 
599 	/* Handle the last BD specially */
600 	if (last_tcp)
601 		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
602 	if (is_last) {
603 		status |= BD_ENET_TX_INTR;
604 		if (fep->bufdesc_ex)
605 			ebdp->cbd_esc |= BD_ENET_TX_INT;
606 	}
607 
608 	bdp->cbd_sc = status;
609 
610 	return 0;
611 }
612 
613 static int
614 fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
615 			struct bufdesc *bdp, int index)
616 {
617 	struct fec_enet_private *fep = netdev_priv(ndev);
618 	const struct platform_device_id *id_entry =
619 				platform_get_device_id(fep->pdev);
620 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
621 	struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
622 	void *bufaddr;
623 	unsigned long dmabuf;
624 	unsigned short status;
625 	unsigned int estatus = 0;
626 
627 	status = bdp->cbd_sc;
628 	status &= ~BD_ENET_TX_STATS;
629 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
630 
631 	bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
632 	dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
633 	if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
634 		id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
635 		memcpy(fep->tx_bounce[index], skb->data, hdr_len);
636 		bufaddr = fep->tx_bounce[index];
637 
638 		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
639 			swap_buffer(bufaddr, hdr_len);
640 
641 		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
642 					hdr_len, DMA_TO_DEVICE);
643 		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
644 			dev_kfree_skb_any(skb);
645 			if (net_ratelimit())
646 				netdev_err(ndev, "Tx DMA memory map failed\n");
647 			return NETDEV_TX_BUSY;
648 		}
649 	}
650 
651 	bdp->cbd_bufaddr = dmabuf;
652 	bdp->cbd_datlen = hdr_len;
653 
654 	if (fep->bufdesc_ex) {
655 		if (skb->ip_summed == CHECKSUM_PARTIAL)
656 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
657 		ebdp->cbd_bdu = 0;
658 		ebdp->cbd_esc = estatus;
659 	}
660 
661 	bdp->cbd_sc = status;
662 
663 	return 0;
664 }
665 
666 static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
667 {
668 	struct fec_enet_private *fep = netdev_priv(ndev);
669 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
670 	int total_len, data_left;
671 	struct bufdesc *bdp = fep->cur_tx;
672 	struct tso_t tso;
673 	unsigned int index = 0;
674 	int ret;
675 
676 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
677 		dev_kfree_skb_any(skb);
678 		if (net_ratelimit())
679 			netdev_err(ndev, "NOT enough BD for TSO!\n");
680 		return NETDEV_TX_OK;
681 	}
682 
683 	/* Protocol checksum off-load for TCP and UDP. */
684 	if (fec_enet_clear_csum(skb, ndev)) {
685 		dev_kfree_skb_any(skb);
686 		return NETDEV_TX_OK;
687 	}
688 
689 	/* Initialize the TSO handler, and prepare the first payload */
690 	tso_start(skb, &tso);
691 
692 	total_len = skb->len - hdr_len;
693 	while (total_len > 0) {
694 		char *hdr;
695 
696 		index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
697 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
698 		total_len -= data_left;
699 
700 		/* prepare packet headers: MAC + IP + TCP */
701 		hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
702 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
703 		ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
704 		if (ret)
705 			goto err_release;
706 
707 		while (data_left > 0) {
708 			int size;
709 
710 			size = min_t(int, tso.size, data_left);
711 			bdp = fec_enet_get_nextdesc(bdp, fep);
712 			index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
713 			ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
714 							size, size == data_left,
715 							total_len == 0);
716 			if (ret)
717 				goto err_release;
718 
719 			data_left -= size;
720 			tso_build_data(skb, &tso, size);
721 		}
722 
723 		bdp = fec_enet_get_nextdesc(bdp, fep);
724 	}
725 
726 	/* Save skb pointer */
727 	fep->tx_skbuff[index] = skb;
728 
729 	fec_enet_submit_work(bdp, fep);
730 
731 	skb_tx_timestamp(skb);
732 	fep->cur_tx = bdp;
733 
734 	/* Trigger transmission start */
735 	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
736 
737 	return 0;
738 
739 err_release:
740 	/* TODO: Release all used data descriptors for TSO */
741 	return ret;
742 }
743 
744 static netdev_tx_t
745 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
746 {
747 	struct fec_enet_private *fep = netdev_priv(ndev);
748 	int entries_free;
749 	int ret;
750 
751 	if (skb_is_gso(skb))
752 		ret = fec_enet_txq_submit_tso(skb, ndev);
753 	else
754 		ret = fec_enet_txq_submit_skb(skb, ndev);
755 	if (ret)
756 		return ret;
757 
758 	entries_free = fec_enet_get_free_txdesc_num(fep);
759 	if (entries_free <= fep->tx_stop_threshold)
760 		netif_stop_queue(ndev);
761 
762 	return NETDEV_TX_OK;
763 }
764 
765 /* Init RX & TX buffer descriptors
766  */
767 static void fec_enet_bd_init(struct net_device *dev)
768 {
769 	struct fec_enet_private *fep = netdev_priv(dev);
770 	struct bufdesc *bdp;
771 	unsigned int i;
772 
773 	/* Initialize the receive buffer descriptors. */
774 	bdp = fep->rx_bd_base;
775 	for (i = 0; i < fep->rx_ring_size; i++) {
776 
777 		/* Initialize the BD for every fragment in the page. */
778 		if (bdp->cbd_bufaddr)
779 			bdp->cbd_sc = BD_ENET_RX_EMPTY;
780 		else
781 			bdp->cbd_sc = 0;
782 		bdp = fec_enet_get_nextdesc(bdp, fep);
783 	}
784 
785 	/* Set the last buffer to wrap */
786 	bdp = fec_enet_get_prevdesc(bdp, fep);
787 	bdp->cbd_sc |= BD_SC_WRAP;
788 
789 	fep->cur_rx = fep->rx_bd_base;
790 
791 	/* ...and the same for transmit */
792 	bdp = fep->tx_bd_base;
793 	fep->cur_tx = bdp;
794 	for (i = 0; i < fep->tx_ring_size; i++) {
795 
796 		/* Initialize the BD for every fragment in the page. */
797 		bdp->cbd_sc = 0;
798 		if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
799 			dev_kfree_skb_any(fep->tx_skbuff[i]);
800 			fep->tx_skbuff[i] = NULL;
801 		}
802 		bdp->cbd_bufaddr = 0;
803 		bdp = fec_enet_get_nextdesc(bdp, fep);
804 	}
805 
806 	/* Set the last buffer to wrap */
807 	bdp = fec_enet_get_prevdesc(bdp, fep);
808 	bdp->cbd_sc |= BD_SC_WRAP;
809 	fep->dirty_tx = bdp;
810 }
811 
812 /* This function is called to start or restart the FEC during a link
813  * change.  This only happens when switching between half and full
814  * duplex.
815  */
816 static void
817 fec_restart(struct net_device *ndev, int duplex)
818 {
819 	struct fec_enet_private *fep = netdev_priv(ndev);
820 	const struct platform_device_id *id_entry =
821 				platform_get_device_id(fep->pdev);
822 	int i;
823 	u32 val;
824 	u32 temp_mac[2];
825 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
826 	u32 ecntl = 0x2; /* ETHEREN */
827 
828 	if (netif_running(ndev)) {
829 		netif_device_detach(ndev);
830 		napi_disable(&fep->napi);
831 		netif_stop_queue(ndev);
832 		netif_tx_lock_bh(ndev);
833 	}
834 
835 	/* Whack a reset.  We should wait for this. */
836 	writel(1, fep->hwp + FEC_ECNTRL);
837 	udelay(10);
838 
839 	/*
840 	 * enet-mac reset will reset mac address registers too,
841 	 * so need to reconfigure it.
842 	 */
843 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
844 		memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
845 		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
846 		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
847 	}
848 
849 	/* Clear any outstanding interrupt. */
850 	writel(0xffc00000, fep->hwp + FEC_IEVENT);
851 
852 	/* Set maximum receive buffer size. */
853 	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
854 
855 	fec_enet_bd_init(ndev);
856 
857 	/* Set receive and transmit descriptor base. */
858 	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
859 	if (fep->bufdesc_ex)
860 		writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
861 			* fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
862 	else
863 		writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
864 			* fep->rx_ring_size,	fep->hwp + FEC_X_DES_START);
865 
866 
867 	for (i = 0; i <= TX_RING_MOD_MASK; i++) {
868 		if (fep->tx_skbuff[i]) {
869 			dev_kfree_skb_any(fep->tx_skbuff[i]);
870 			fep->tx_skbuff[i] = NULL;
871 		}
872 	}
873 
874 	/* Enable MII mode */
875 	if (duplex) {
876 		/* FD enable */
877 		writel(0x04, fep->hwp + FEC_X_CNTRL);
878 	} else {
879 		/* No Rcv on Xmit */
880 		rcntl |= 0x02;
881 		writel(0x0, fep->hwp + FEC_X_CNTRL);
882 	}
883 
884 	fep->full_duplex = duplex;
885 
886 	/* Set MII speed */
887 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
888 
889 #if !defined(CONFIG_M5272)
890 	/* set RX checksum */
891 	val = readl(fep->hwp + FEC_RACC);
892 	if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
893 		val |= FEC_RACC_OPTIONS;
894 	else
895 		val &= ~FEC_RACC_OPTIONS;
896 	writel(val, fep->hwp + FEC_RACC);
897 #endif
898 
899 	/*
900 	 * The phy interface and speed need to get configured
901 	 * differently on enet-mac.
902 	 */
903 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
904 		/* Enable flow control and length check */
905 		rcntl |= 0x40000000 | 0x00000020;
906 
907 		/* RGMII, RMII or MII */
908 		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
909 			rcntl |= (1 << 6);
910 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
911 			rcntl |= (1 << 8);
912 		else
913 			rcntl &= ~(1 << 8);
914 
915 		/* 1G, 100M or 10M */
916 		if (fep->phy_dev) {
917 			if (fep->phy_dev->speed == SPEED_1000)
918 				ecntl |= (1 << 5);
919 			else if (fep->phy_dev->speed == SPEED_100)
920 				rcntl &= ~(1 << 9);
921 			else
922 				rcntl |= (1 << 9);
923 		}
924 	} else {
925 #ifdef FEC_MIIGSK_ENR
926 		if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
927 			u32 cfgr;
928 			/* disable the gasket and wait */
929 			writel(0, fep->hwp + FEC_MIIGSK_ENR);
930 			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
931 				udelay(1);
932 
933 			/*
934 			 * configure the gasket:
935 			 *   RMII, 50 MHz, no loopback, no echo
936 			 *   MII, 25 MHz, no loopback, no echo
937 			 */
938 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
939 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
940 			if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
941 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
942 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
943 
944 			/* re-enable the gasket */
945 			writel(2, fep->hwp + FEC_MIIGSK_ENR);
946 		}
947 #endif
948 	}
949 
950 #if !defined(CONFIG_M5272)
951 	/* enable pause frame*/
952 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
953 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
954 	     fep->phy_dev && fep->phy_dev->pause)) {
955 		rcntl |= FEC_ENET_FCE;
956 
957 		/* set FIFO threshold parameter to reduce overrun */
958 		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
959 		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
960 		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
961 		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
962 
963 		/* OPD */
964 		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
965 	} else {
966 		rcntl &= ~FEC_ENET_FCE;
967 	}
968 #endif /* !defined(CONFIG_M5272) */
969 
970 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
971 
972 	/* Setup multicast filter. */
973 	set_multicast_list(ndev);
974 #ifndef CONFIG_M5272
975 	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
976 	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
977 #endif
978 
979 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
980 		/* enable ENET endian swap */
981 		ecntl |= (1 << 8);
982 		/* enable ENET store and forward mode */
983 		writel(1 << 8, fep->hwp + FEC_X_WMRK);
984 	}
985 
986 	if (fep->bufdesc_ex)
987 		ecntl |= (1 << 4);
988 
989 #ifndef CONFIG_M5272
990 	/* Enable the MIB statistic event counters */
991 	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
992 #endif
993 
994 	/* And last, enable the transmit and receive processing */
995 	writel(ecntl, fep->hwp + FEC_ECNTRL);
996 	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
997 
998 	if (fep->bufdesc_ex)
999 		fec_ptp_start_cyclecounter(ndev);
1000 
1001 	/* Enable interrupts we wish to service */
1002 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1003 
1004 	if (netif_running(ndev)) {
1005 		netif_tx_unlock_bh(ndev);
1006 		netif_wake_queue(ndev);
1007 		napi_enable(&fep->napi);
1008 		netif_device_attach(ndev);
1009 	}
1010 }
1011 
1012 static void
1013 fec_stop(struct net_device *ndev)
1014 {
1015 	struct fec_enet_private *fep = netdev_priv(ndev);
1016 	const struct platform_device_id *id_entry =
1017 				platform_get_device_id(fep->pdev);
1018 	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1019 
1020 	/* We cannot expect a graceful transmit stop without link !!! */
1021 	if (fep->link) {
1022 		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1023 		udelay(10);
1024 		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1025 			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1026 	}
1027 
1028 	/* Whack a reset.  We should wait for this. */
1029 	writel(1, fep->hwp + FEC_ECNTRL);
1030 	udelay(10);
1031 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1032 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1033 
1034 	/* We have to keep ENET enabled to have MII interrupt stay working */
1035 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1036 		writel(2, fep->hwp + FEC_ECNTRL);
1037 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1038 	}
1039 }
1040 
1041 
1042 static void
1043 fec_timeout(struct net_device *ndev)
1044 {
1045 	struct fec_enet_private *fep = netdev_priv(ndev);
1046 
1047 	ndev->stats.tx_errors++;
1048 
1049 	fep->delay_work.timeout = true;
1050 	schedule_delayed_work(&(fep->delay_work.delay_work), 0);
1051 }
1052 
1053 static void fec_enet_work(struct work_struct *work)
1054 {
1055 	struct fec_enet_private *fep =
1056 		container_of(work,
1057 			     struct fec_enet_private,
1058 			     delay_work.delay_work.work);
1059 
1060 	if (fep->delay_work.timeout) {
1061 		fep->delay_work.timeout = false;
1062 		fec_restart(fep->netdev, fep->full_duplex);
1063 		netif_wake_queue(fep->netdev);
1064 	}
1065 
1066 	if (fep->delay_work.trig_tx) {
1067 		fep->delay_work.trig_tx = false;
1068 		writel(0, fep->hwp + FEC_X_DES_ACTIVE);
1069 	}
1070 }
1071 
1072 static void
1073 fec_enet_tx(struct net_device *ndev)
1074 {
1075 	struct	fec_enet_private *fep;
1076 	struct bufdesc *bdp;
1077 	unsigned short status;
1078 	struct	sk_buff	*skb;
1079 	int	index = 0;
1080 	int	entries_free;
1081 
1082 	fep = netdev_priv(ndev);
1083 	bdp = fep->dirty_tx;
1084 
1085 	/* get next bdp of dirty_tx */
1086 	bdp = fec_enet_get_nextdesc(bdp, fep);
1087 
1088 	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
1089 
1090 		/* current queue is empty */
1091 		if (bdp == fep->cur_tx)
1092 			break;
1093 
1094 		index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
1095 
1096 		skb = fep->tx_skbuff[index];
1097 		if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
1098 			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1099 					bdp->cbd_datlen, DMA_TO_DEVICE);
1100 		bdp->cbd_bufaddr = 0;
1101 		if (!skb) {
1102 			bdp = fec_enet_get_nextdesc(bdp, fep);
1103 			continue;
1104 		}
1105 
1106 		/* Check for errors. */
1107 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1108 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
1109 				   BD_ENET_TX_CSL)) {
1110 			ndev->stats.tx_errors++;
1111 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1112 				ndev->stats.tx_heartbeat_errors++;
1113 			if (status & BD_ENET_TX_LC)  /* Late collision */
1114 				ndev->stats.tx_window_errors++;
1115 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1116 				ndev->stats.tx_aborted_errors++;
1117 			if (status & BD_ENET_TX_UN)  /* Underrun */
1118 				ndev->stats.tx_fifo_errors++;
1119 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1120 				ndev->stats.tx_carrier_errors++;
1121 		} else {
1122 			ndev->stats.tx_packets++;
1123 			ndev->stats.tx_bytes += skb->len;
1124 		}
1125 
1126 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1127 			fep->bufdesc_ex) {
1128 			struct skb_shared_hwtstamps shhwtstamps;
1129 			unsigned long flags;
1130 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1131 
1132 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1133 			spin_lock_irqsave(&fep->tmreg_lock, flags);
1134 			shhwtstamps.hwtstamp = ns_to_ktime(
1135 				timecounter_cyc2time(&fep->tc, ebdp->ts));
1136 			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1137 			skb_tstamp_tx(skb, &shhwtstamps);
1138 		}
1139 
1140 		if (status & BD_ENET_TX_READY)
1141 			netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
1142 
1143 		/* Deferred means some collisions occurred during transmit,
1144 		 * but we eventually sent the packet OK.
1145 		 */
1146 		if (status & BD_ENET_TX_DEF)
1147 			ndev->stats.collisions++;
1148 
1149 		/* Free the sk buffer associated with this last transmit */
1150 		dev_kfree_skb_any(skb);
1151 		fep->tx_skbuff[index] = NULL;
1152 
1153 		fep->dirty_tx = bdp;
1154 
1155 		/* Update pointer to next buffer descriptor to be transmitted */
1156 		bdp = fec_enet_get_nextdesc(bdp, fep);
1157 
1158 		/* Since we have freed up a buffer, the ring is no longer full
1159 		 */
1160 		if (netif_queue_stopped(ndev)) {
1161 			entries_free = fec_enet_get_free_txdesc_num(fep);
1162 			if (entries_free >= fep->tx_wake_threshold)
1163 				netif_wake_queue(ndev);
1164 		}
1165 	}
1166 	return;
1167 }
1168 
1169 /* During a receive, the cur_rx points to the current incoming buffer.
1170  * When we update through the ring, if the next incoming buffer has
1171  * not been given to the system, we just set the empty indicator,
1172  * effectively tossing the packet.
1173  */
1174 static int
1175 fec_enet_rx(struct net_device *ndev, int budget)
1176 {
1177 	struct fec_enet_private *fep = netdev_priv(ndev);
1178 	const struct platform_device_id *id_entry =
1179 				platform_get_device_id(fep->pdev);
1180 	struct bufdesc *bdp;
1181 	unsigned short status;
1182 	struct	sk_buff	*skb;
1183 	ushort	pkt_len;
1184 	__u8 *data;
1185 	int	pkt_received = 0;
1186 	struct	bufdesc_ex *ebdp = NULL;
1187 	bool	vlan_packet_rcvd = false;
1188 	u16	vlan_tag;
1189 	int	index = 0;
1190 
1191 #ifdef CONFIG_M532x
1192 	flush_cache_all();
1193 #endif
1194 
1195 	/* First, grab all of the stats for the incoming packet.
1196 	 * These get messed up if we get called due to a busy condition.
1197 	 */
1198 	bdp = fep->cur_rx;
1199 
1200 	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1201 
1202 		if (pkt_received >= budget)
1203 			break;
1204 		pkt_received++;
1205 
1206 		/* Since we have allocated space to hold a complete frame,
1207 		 * the last indicator should be set.
1208 		 */
1209 		if ((status & BD_ENET_RX_LAST) == 0)
1210 			netdev_err(ndev, "rcv is not +last\n");
1211 
1212 		if (!fep->opened)
1213 			goto rx_processing_done;
1214 
1215 		/* Check for errors. */
1216 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1217 			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
1218 			ndev->stats.rx_errors++;
1219 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
1220 				/* Frame too long or too short. */
1221 				ndev->stats.rx_length_errors++;
1222 			}
1223 			if (status & BD_ENET_RX_NO)	/* Frame alignment */
1224 				ndev->stats.rx_frame_errors++;
1225 			if (status & BD_ENET_RX_CR)	/* CRC Error */
1226 				ndev->stats.rx_crc_errors++;
1227 			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
1228 				ndev->stats.rx_fifo_errors++;
1229 		}
1230 
1231 		/* Report late collisions as a frame error.
1232 		 * On this error, the BD is closed, but we don't know what we
1233 		 * have in the buffer.  So, just drop this frame on the floor.
1234 		 */
1235 		if (status & BD_ENET_RX_CL) {
1236 			ndev->stats.rx_errors++;
1237 			ndev->stats.rx_frame_errors++;
1238 			goto rx_processing_done;
1239 		}
1240 
1241 		/* Process the incoming frame. */
1242 		ndev->stats.rx_packets++;
1243 		pkt_len = bdp->cbd_datlen;
1244 		ndev->stats.rx_bytes += pkt_len;
1245 
1246 		index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
1247 		data = fep->rx_skbuff[index]->data;
1248 		dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1249 					FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1250 
1251 		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
1252 			swap_buffer(data, pkt_len);
1253 
1254 		/* Extract the enhanced buffer descriptor */
1255 		ebdp = NULL;
1256 		if (fep->bufdesc_ex)
1257 			ebdp = (struct bufdesc_ex *)bdp;
1258 
1259 		/* If this is a VLAN packet remove the VLAN Tag */
1260 		vlan_packet_rcvd = false;
1261 		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1262 		    fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1263 			/* Push and remove the vlan tag */
1264 			struct vlan_hdr *vlan_header =
1265 					(struct vlan_hdr *) (data + ETH_HLEN);
1266 			vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1267 			pkt_len -= VLAN_HLEN;
1268 
1269 			vlan_packet_rcvd = true;
1270 		}
1271 
1272 		/* This does 16 byte alignment, exactly what we need.
1273 		 * The packet length includes FCS, but we don't want to
1274 		 * include that when passing upstream as it messes up
1275 		 * bridging applications.
1276 		 */
1277 		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
1278 
1279 		if (unlikely(!skb)) {
1280 			ndev->stats.rx_dropped++;
1281 		} else {
1282 			int payload_offset = (2 * ETH_ALEN);
1283 			skb_reserve(skb, NET_IP_ALIGN);
1284 			skb_put(skb, pkt_len - 4);	/* Make room */
1285 
1286 			/* Extract the frame data without the VLAN header. */
1287 			skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
1288 			if (vlan_packet_rcvd)
1289 				payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
1290 			skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
1291 						       data + payload_offset,
1292 						       pkt_len - 4 - (2 * ETH_ALEN));
1293 
1294 			skb->protocol = eth_type_trans(skb, ndev);
1295 
1296 			/* Get receive timestamp from the skb */
1297 			if (fep->hwts_rx_en && fep->bufdesc_ex) {
1298 				struct skb_shared_hwtstamps *shhwtstamps =
1299 							    skb_hwtstamps(skb);
1300 				unsigned long flags;
1301 
1302 				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1303 
1304 				spin_lock_irqsave(&fep->tmreg_lock, flags);
1305 				shhwtstamps->hwtstamp = ns_to_ktime(
1306 				    timecounter_cyc2time(&fep->tc, ebdp->ts));
1307 				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1308 			}
1309 
1310 			if (fep->bufdesc_ex &&
1311 			    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1312 				if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1313 					/* don't check it */
1314 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1315 				} else {
1316 					skb_checksum_none_assert(skb);
1317 				}
1318 			}
1319 
1320 			/* Handle received VLAN packets */
1321 			if (vlan_packet_rcvd)
1322 				__vlan_hwaccel_put_tag(skb,
1323 						       htons(ETH_P_8021Q),
1324 						       vlan_tag);
1325 
1326 			napi_gro_receive(&fep->napi, skb);
1327 		}
1328 
1329 		dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1330 					FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1331 rx_processing_done:
1332 		/* Clear the status flags for this buffer */
1333 		status &= ~BD_ENET_RX_STATS;
1334 
1335 		/* Mark the buffer empty */
1336 		status |= BD_ENET_RX_EMPTY;
1337 		bdp->cbd_sc = status;
1338 
1339 		if (fep->bufdesc_ex) {
1340 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1341 
1342 			ebdp->cbd_esc = BD_ENET_RX_INT;
1343 			ebdp->cbd_prot = 0;
1344 			ebdp->cbd_bdu = 0;
1345 		}
1346 
1347 		/* Update BD pointer to next entry */
1348 		bdp = fec_enet_get_nextdesc(bdp, fep);
1349 
1350 		/* Doing this here will keep the FEC running while we process
1351 		 * incoming frames.  On a heavily loaded network, we should be
1352 		 * able to keep up at the expense of system resources.
1353 		 */
1354 		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1355 	}
1356 	fep->cur_rx = bdp;
1357 
1358 	return pkt_received;
1359 }
1360 
1361 static irqreturn_t
1362 fec_enet_interrupt(int irq, void *dev_id)
1363 {
1364 	struct net_device *ndev = dev_id;
1365 	struct fec_enet_private *fep = netdev_priv(ndev);
1366 	uint int_events;
1367 	irqreturn_t ret = IRQ_NONE;
1368 
1369 	do {
1370 		int_events = readl(fep->hwp + FEC_IEVENT);
1371 		writel(int_events, fep->hwp + FEC_IEVENT);
1372 
1373 		if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
1374 			ret = IRQ_HANDLED;
1375 
1376 			/* Disable the RX interrupt */
1377 			if (napi_schedule_prep(&fep->napi)) {
1378 				writel(FEC_RX_DISABLED_IMASK,
1379 					fep->hwp + FEC_IMASK);
1380 				__napi_schedule(&fep->napi);
1381 			}
1382 		}
1383 
1384 		if (int_events & FEC_ENET_MII) {
1385 			ret = IRQ_HANDLED;
1386 			complete(&fep->mdio_done);
1387 		}
1388 	} while (int_events);
1389 
1390 	return ret;
1391 }
1392 
1393 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1394 {
1395 	struct net_device *ndev = napi->dev;
1396 	int pkts = fec_enet_rx(ndev, budget);
1397 	struct fec_enet_private *fep = netdev_priv(ndev);
1398 
1399 	fec_enet_tx(ndev);
1400 
1401 	if (pkts < budget) {
1402 		napi_complete(napi);
1403 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1404 	}
1405 	return pkts;
1406 }
1407 
1408 /* ------------------------------------------------------------------------- */
1409 static void fec_get_mac(struct net_device *ndev)
1410 {
1411 	struct fec_enet_private *fep = netdev_priv(ndev);
1412 	struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1413 	unsigned char *iap, tmpaddr[ETH_ALEN];
1414 
1415 	/*
1416 	 * try to get mac address in following order:
1417 	 *
1418 	 * 1) module parameter via kernel command line in form
1419 	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1420 	 */
1421 	iap = macaddr;
1422 
1423 	/*
1424 	 * 2) from device tree data
1425 	 */
1426 	if (!is_valid_ether_addr(iap)) {
1427 		struct device_node *np = fep->pdev->dev.of_node;
1428 		if (np) {
1429 			const char *mac = of_get_mac_address(np);
1430 			if (mac)
1431 				iap = (unsigned char *) mac;
1432 		}
1433 	}
1434 
1435 	/*
1436 	 * 3) from flash or fuse (via platform data)
1437 	 */
1438 	if (!is_valid_ether_addr(iap)) {
1439 #ifdef CONFIG_M5272
1440 		if (FEC_FLASHMAC)
1441 			iap = (unsigned char *)FEC_FLASHMAC;
1442 #else
1443 		if (pdata)
1444 			iap = (unsigned char *)&pdata->mac;
1445 #endif
1446 	}
1447 
1448 	/*
1449 	 * 4) FEC mac registers set by bootloader
1450 	 */
1451 	if (!is_valid_ether_addr(iap)) {
1452 		*((__be32 *) &tmpaddr[0]) =
1453 			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1454 		*((__be16 *) &tmpaddr[4]) =
1455 			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1456 		iap = &tmpaddr[0];
1457 	}
1458 
1459 	/*
1460 	 * 5) random mac address
1461 	 */
1462 	if (!is_valid_ether_addr(iap)) {
1463 		/* Report it and use a random ethernet address instead */
1464 		netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1465 		eth_hw_addr_random(ndev);
1466 		netdev_info(ndev, "Using random MAC address: %pM\n",
1467 			    ndev->dev_addr);
1468 		return;
1469 	}
1470 
1471 	memcpy(ndev->dev_addr, iap, ETH_ALEN);
1472 
1473 	/* Adjust MAC if using macaddr */
1474 	if (iap == macaddr)
1475 		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1476 }
1477 
1478 /* ------------------------------------------------------------------------- */
1479 
1480 /*
1481  * Phy section
1482  */
1483 static void fec_enet_adjust_link(struct net_device *ndev)
1484 {
1485 	struct fec_enet_private *fep = netdev_priv(ndev);
1486 	struct phy_device *phy_dev = fep->phy_dev;
1487 	int status_change = 0;
1488 
1489 	/* Prevent a state halted on mii error */
1490 	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1491 		phy_dev->state = PHY_RESUMING;
1492 		return;
1493 	}
1494 
1495 	if (phy_dev->link) {
1496 		if (!fep->link) {
1497 			fep->link = phy_dev->link;
1498 			status_change = 1;
1499 		}
1500 
1501 		if (fep->full_duplex != phy_dev->duplex)
1502 			status_change = 1;
1503 
1504 		if (phy_dev->speed != fep->speed) {
1505 			fep->speed = phy_dev->speed;
1506 			status_change = 1;
1507 		}
1508 
1509 		/* if any of the above changed restart the FEC */
1510 		if (status_change)
1511 			fec_restart(ndev, phy_dev->duplex);
1512 	} else {
1513 		if (fep->link) {
1514 			fec_stop(ndev);
1515 			fep->link = phy_dev->link;
1516 			status_change = 1;
1517 		}
1518 	}
1519 
1520 	if (status_change)
1521 		phy_print_status(phy_dev);
1522 }
1523 
1524 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1525 {
1526 	struct fec_enet_private *fep = bus->priv;
1527 	unsigned long time_left;
1528 
1529 	fep->mii_timeout = 0;
1530 	init_completion(&fep->mdio_done);
1531 
1532 	/* start a read op */
1533 	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1534 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1535 		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1536 
1537 	/* wait for end of transfer */
1538 	time_left = wait_for_completion_timeout(&fep->mdio_done,
1539 			usecs_to_jiffies(FEC_MII_TIMEOUT));
1540 	if (time_left == 0) {
1541 		fep->mii_timeout = 1;
1542 		netdev_err(fep->netdev, "MDIO read timeout\n");
1543 		return -ETIMEDOUT;
1544 	}
1545 
1546 	/* return value */
1547 	return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1548 }
1549 
1550 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1551 			   u16 value)
1552 {
1553 	struct fec_enet_private *fep = bus->priv;
1554 	unsigned long time_left;
1555 
1556 	fep->mii_timeout = 0;
1557 	init_completion(&fep->mdio_done);
1558 
1559 	/* start a write op */
1560 	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1561 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1562 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
1563 		fep->hwp + FEC_MII_DATA);
1564 
1565 	/* wait for end of transfer */
1566 	time_left = wait_for_completion_timeout(&fep->mdio_done,
1567 			usecs_to_jiffies(FEC_MII_TIMEOUT));
1568 	if (time_left == 0) {
1569 		fep->mii_timeout = 1;
1570 		netdev_err(fep->netdev, "MDIO write timeout\n");
1571 		return -ETIMEDOUT;
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1578 {
1579 	struct fec_enet_private *fep = netdev_priv(ndev);
1580 	int ret;
1581 
1582 	if (enable) {
1583 		ret = clk_prepare_enable(fep->clk_ahb);
1584 		if (ret)
1585 			return ret;
1586 		ret = clk_prepare_enable(fep->clk_ipg);
1587 		if (ret)
1588 			goto failed_clk_ipg;
1589 		if (fep->clk_enet_out) {
1590 			ret = clk_prepare_enable(fep->clk_enet_out);
1591 			if (ret)
1592 				goto failed_clk_enet_out;
1593 		}
1594 		if (fep->clk_ptp) {
1595 			ret = clk_prepare_enable(fep->clk_ptp);
1596 			if (ret)
1597 				goto failed_clk_ptp;
1598 		}
1599 	} else {
1600 		clk_disable_unprepare(fep->clk_ahb);
1601 		clk_disable_unprepare(fep->clk_ipg);
1602 		if (fep->clk_enet_out)
1603 			clk_disable_unprepare(fep->clk_enet_out);
1604 		if (fep->clk_ptp)
1605 			clk_disable_unprepare(fep->clk_ptp);
1606 	}
1607 
1608 	return 0;
1609 failed_clk_ptp:
1610 	if (fep->clk_enet_out)
1611 		clk_disable_unprepare(fep->clk_enet_out);
1612 failed_clk_enet_out:
1613 		clk_disable_unprepare(fep->clk_ipg);
1614 failed_clk_ipg:
1615 		clk_disable_unprepare(fep->clk_ahb);
1616 
1617 	return ret;
1618 }
1619 
1620 static int fec_enet_mii_probe(struct net_device *ndev)
1621 {
1622 	struct fec_enet_private *fep = netdev_priv(ndev);
1623 	const struct platform_device_id *id_entry =
1624 				platform_get_device_id(fep->pdev);
1625 	struct phy_device *phy_dev = NULL;
1626 	char mdio_bus_id[MII_BUS_ID_SIZE];
1627 	char phy_name[MII_BUS_ID_SIZE + 3];
1628 	int phy_id;
1629 	int dev_id = fep->dev_id;
1630 
1631 	fep->phy_dev = NULL;
1632 
1633 	/* check for attached phy */
1634 	for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1635 		if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1636 			continue;
1637 		if (fep->mii_bus->phy_map[phy_id] == NULL)
1638 			continue;
1639 		if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1640 			continue;
1641 		if (dev_id--)
1642 			continue;
1643 		strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1644 		break;
1645 	}
1646 
1647 	if (phy_id >= PHY_MAX_ADDR) {
1648 		netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1649 		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1650 		phy_id = 0;
1651 	}
1652 
1653 	snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
1654 	phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1655 			      fep->phy_interface);
1656 	if (IS_ERR(phy_dev)) {
1657 		netdev_err(ndev, "could not attach to PHY\n");
1658 		return PTR_ERR(phy_dev);
1659 	}
1660 
1661 	/* mask with MAC supported features */
1662 	if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1663 		phy_dev->supported &= PHY_GBIT_FEATURES;
1664 #if !defined(CONFIG_M5272)
1665 		phy_dev->supported |= SUPPORTED_Pause;
1666 #endif
1667 	}
1668 	else
1669 		phy_dev->supported &= PHY_BASIC_FEATURES;
1670 
1671 	phy_dev->advertising = phy_dev->supported;
1672 
1673 	fep->phy_dev = phy_dev;
1674 	fep->link = 0;
1675 	fep->full_duplex = 0;
1676 
1677 	netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1678 		    fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1679 		    fep->phy_dev->irq);
1680 
1681 	return 0;
1682 }
1683 
1684 static int fec_enet_mii_init(struct platform_device *pdev)
1685 {
1686 	static struct mii_bus *fec0_mii_bus;
1687 	struct net_device *ndev = platform_get_drvdata(pdev);
1688 	struct fec_enet_private *fep = netdev_priv(ndev);
1689 	const struct platform_device_id *id_entry =
1690 				platform_get_device_id(fep->pdev);
1691 	int err = -ENXIO, i;
1692 
1693 	/*
1694 	 * The dual fec interfaces are not equivalent with enet-mac.
1695 	 * Here are the differences:
1696 	 *
1697 	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
1698 	 *  - fec0 acts as the 1588 time master while fec1 is slave
1699 	 *  - external phys can only be configured by fec0
1700 	 *
1701 	 * That is to say fec1 can not work independently. It only works
1702 	 * when fec0 is working. The reason behind this design is that the
1703 	 * second interface is added primarily for Switch mode.
1704 	 *
1705 	 * Because of the last point above, both phys are attached on fec0
1706 	 * mdio interface in board design, and need to be configured by
1707 	 * fec0 mii_bus.
1708 	 */
1709 	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1710 		/* fec1 uses fec0 mii_bus */
1711 		if (mii_cnt && fec0_mii_bus) {
1712 			fep->mii_bus = fec0_mii_bus;
1713 			mii_cnt++;
1714 			return 0;
1715 		}
1716 		return -ENOENT;
1717 	}
1718 
1719 	fep->mii_timeout = 0;
1720 
1721 	/*
1722 	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1723 	 *
1724 	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1725 	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
1726 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1727 	 * document.
1728 	 */
1729 	fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1730 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1731 		fep->phy_speed--;
1732 	fep->phy_speed <<= 1;
1733 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1734 
1735 	fep->mii_bus = mdiobus_alloc();
1736 	if (fep->mii_bus == NULL) {
1737 		err = -ENOMEM;
1738 		goto err_out;
1739 	}
1740 
1741 	fep->mii_bus->name = "fec_enet_mii_bus";
1742 	fep->mii_bus->read = fec_enet_mdio_read;
1743 	fep->mii_bus->write = fec_enet_mdio_write;
1744 	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1745 		pdev->name, fep->dev_id + 1);
1746 	fep->mii_bus->priv = fep;
1747 	fep->mii_bus->parent = &pdev->dev;
1748 
1749 	fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1750 	if (!fep->mii_bus->irq) {
1751 		err = -ENOMEM;
1752 		goto err_out_free_mdiobus;
1753 	}
1754 
1755 	for (i = 0; i < PHY_MAX_ADDR; i++)
1756 		fep->mii_bus->irq[i] = PHY_POLL;
1757 
1758 	if (mdiobus_register(fep->mii_bus))
1759 		goto err_out_free_mdio_irq;
1760 
1761 	mii_cnt++;
1762 
1763 	/* save fec0 mii_bus */
1764 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1765 		fec0_mii_bus = fep->mii_bus;
1766 
1767 	return 0;
1768 
1769 err_out_free_mdio_irq:
1770 	kfree(fep->mii_bus->irq);
1771 err_out_free_mdiobus:
1772 	mdiobus_free(fep->mii_bus);
1773 err_out:
1774 	return err;
1775 }
1776 
1777 static void fec_enet_mii_remove(struct fec_enet_private *fep)
1778 {
1779 	if (--mii_cnt == 0) {
1780 		mdiobus_unregister(fep->mii_bus);
1781 		kfree(fep->mii_bus->irq);
1782 		mdiobus_free(fep->mii_bus);
1783 	}
1784 }
1785 
1786 static int fec_enet_get_settings(struct net_device *ndev,
1787 				  struct ethtool_cmd *cmd)
1788 {
1789 	struct fec_enet_private *fep = netdev_priv(ndev);
1790 	struct phy_device *phydev = fep->phy_dev;
1791 
1792 	if (!phydev)
1793 		return -ENODEV;
1794 
1795 	return phy_ethtool_gset(phydev, cmd);
1796 }
1797 
1798 static int fec_enet_set_settings(struct net_device *ndev,
1799 				 struct ethtool_cmd *cmd)
1800 {
1801 	struct fec_enet_private *fep = netdev_priv(ndev);
1802 	struct phy_device *phydev = fep->phy_dev;
1803 
1804 	if (!phydev)
1805 		return -ENODEV;
1806 
1807 	return phy_ethtool_sset(phydev, cmd);
1808 }
1809 
1810 static void fec_enet_get_drvinfo(struct net_device *ndev,
1811 				 struct ethtool_drvinfo *info)
1812 {
1813 	struct fec_enet_private *fep = netdev_priv(ndev);
1814 
1815 	strlcpy(info->driver, fep->pdev->dev.driver->name,
1816 		sizeof(info->driver));
1817 	strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
1818 	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
1819 }
1820 
1821 static int fec_enet_get_ts_info(struct net_device *ndev,
1822 				struct ethtool_ts_info *info)
1823 {
1824 	struct fec_enet_private *fep = netdev_priv(ndev);
1825 
1826 	if (fep->bufdesc_ex) {
1827 
1828 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1829 					SOF_TIMESTAMPING_RX_SOFTWARE |
1830 					SOF_TIMESTAMPING_SOFTWARE |
1831 					SOF_TIMESTAMPING_TX_HARDWARE |
1832 					SOF_TIMESTAMPING_RX_HARDWARE |
1833 					SOF_TIMESTAMPING_RAW_HARDWARE;
1834 		if (fep->ptp_clock)
1835 			info->phc_index = ptp_clock_index(fep->ptp_clock);
1836 		else
1837 			info->phc_index = -1;
1838 
1839 		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1840 				 (1 << HWTSTAMP_TX_ON);
1841 
1842 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1843 				   (1 << HWTSTAMP_FILTER_ALL);
1844 		return 0;
1845 	} else {
1846 		return ethtool_op_get_ts_info(ndev, info);
1847 	}
1848 }
1849 
1850 #if !defined(CONFIG_M5272)
1851 
1852 static void fec_enet_get_pauseparam(struct net_device *ndev,
1853 				    struct ethtool_pauseparam *pause)
1854 {
1855 	struct fec_enet_private *fep = netdev_priv(ndev);
1856 
1857 	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
1858 	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
1859 	pause->rx_pause = pause->tx_pause;
1860 }
1861 
1862 static int fec_enet_set_pauseparam(struct net_device *ndev,
1863 				   struct ethtool_pauseparam *pause)
1864 {
1865 	struct fec_enet_private *fep = netdev_priv(ndev);
1866 
1867 	if (pause->tx_pause != pause->rx_pause) {
1868 		netdev_info(ndev,
1869 			"hardware only support enable/disable both tx and rx");
1870 		return -EINVAL;
1871 	}
1872 
1873 	fep->pause_flag = 0;
1874 
1875 	/* tx pause must be same as rx pause */
1876 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
1877 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1878 
1879 	if (pause->rx_pause || pause->autoneg) {
1880 		fep->phy_dev->supported |= ADVERTISED_Pause;
1881 		fep->phy_dev->advertising |= ADVERTISED_Pause;
1882 	} else {
1883 		fep->phy_dev->supported &= ~ADVERTISED_Pause;
1884 		fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1885 	}
1886 
1887 	if (pause->autoneg) {
1888 		if (netif_running(ndev))
1889 			fec_stop(ndev);
1890 		phy_start_aneg(fep->phy_dev);
1891 	}
1892 	if (netif_running(ndev))
1893 		fec_restart(ndev, 0);
1894 
1895 	return 0;
1896 }
1897 
1898 static const struct fec_stat {
1899 	char name[ETH_GSTRING_LEN];
1900 	u16 offset;
1901 } fec_stats[] = {
1902 	/* RMON TX */
1903 	{ "tx_dropped", RMON_T_DROP },
1904 	{ "tx_packets", RMON_T_PACKETS },
1905 	{ "tx_broadcast", RMON_T_BC_PKT },
1906 	{ "tx_multicast", RMON_T_MC_PKT },
1907 	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
1908 	{ "tx_undersize", RMON_T_UNDERSIZE },
1909 	{ "tx_oversize", RMON_T_OVERSIZE },
1910 	{ "tx_fragment", RMON_T_FRAG },
1911 	{ "tx_jabber", RMON_T_JAB },
1912 	{ "tx_collision", RMON_T_COL },
1913 	{ "tx_64byte", RMON_T_P64 },
1914 	{ "tx_65to127byte", RMON_T_P65TO127 },
1915 	{ "tx_128to255byte", RMON_T_P128TO255 },
1916 	{ "tx_256to511byte", RMON_T_P256TO511 },
1917 	{ "tx_512to1023byte", RMON_T_P512TO1023 },
1918 	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
1919 	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
1920 	{ "tx_octets", RMON_T_OCTETS },
1921 
1922 	/* IEEE TX */
1923 	{ "IEEE_tx_drop", IEEE_T_DROP },
1924 	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
1925 	{ "IEEE_tx_1col", IEEE_T_1COL },
1926 	{ "IEEE_tx_mcol", IEEE_T_MCOL },
1927 	{ "IEEE_tx_def", IEEE_T_DEF },
1928 	{ "IEEE_tx_lcol", IEEE_T_LCOL },
1929 	{ "IEEE_tx_excol", IEEE_T_EXCOL },
1930 	{ "IEEE_tx_macerr", IEEE_T_MACERR },
1931 	{ "IEEE_tx_cserr", IEEE_T_CSERR },
1932 	{ "IEEE_tx_sqe", IEEE_T_SQE },
1933 	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
1934 	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
1935 
1936 	/* RMON RX */
1937 	{ "rx_packets", RMON_R_PACKETS },
1938 	{ "rx_broadcast", RMON_R_BC_PKT },
1939 	{ "rx_multicast", RMON_R_MC_PKT },
1940 	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
1941 	{ "rx_undersize", RMON_R_UNDERSIZE },
1942 	{ "rx_oversize", RMON_R_OVERSIZE },
1943 	{ "rx_fragment", RMON_R_FRAG },
1944 	{ "rx_jabber", RMON_R_JAB },
1945 	{ "rx_64byte", RMON_R_P64 },
1946 	{ "rx_65to127byte", RMON_R_P65TO127 },
1947 	{ "rx_128to255byte", RMON_R_P128TO255 },
1948 	{ "rx_256to511byte", RMON_R_P256TO511 },
1949 	{ "rx_512to1023byte", RMON_R_P512TO1023 },
1950 	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
1951 	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
1952 	{ "rx_octets", RMON_R_OCTETS },
1953 
1954 	/* IEEE RX */
1955 	{ "IEEE_rx_drop", IEEE_R_DROP },
1956 	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
1957 	{ "IEEE_rx_crc", IEEE_R_CRC },
1958 	{ "IEEE_rx_align", IEEE_R_ALIGN },
1959 	{ "IEEE_rx_macerr", IEEE_R_MACERR },
1960 	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
1961 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1962 };
1963 
1964 static void fec_enet_get_ethtool_stats(struct net_device *dev,
1965 	struct ethtool_stats *stats, u64 *data)
1966 {
1967 	struct fec_enet_private *fep = netdev_priv(dev);
1968 	int i;
1969 
1970 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1971 		data[i] = readl(fep->hwp + fec_stats[i].offset);
1972 }
1973 
1974 static void fec_enet_get_strings(struct net_device *netdev,
1975 	u32 stringset, u8 *data)
1976 {
1977 	int i;
1978 	switch (stringset) {
1979 	case ETH_SS_STATS:
1980 		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1981 			memcpy(data + i * ETH_GSTRING_LEN,
1982 				fec_stats[i].name, ETH_GSTRING_LEN);
1983 		break;
1984 	}
1985 }
1986 
1987 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1988 {
1989 	switch (sset) {
1990 	case ETH_SS_STATS:
1991 		return ARRAY_SIZE(fec_stats);
1992 	default:
1993 		return -EOPNOTSUPP;
1994 	}
1995 }
1996 #endif /* !defined(CONFIG_M5272) */
1997 
1998 static int fec_enet_nway_reset(struct net_device *dev)
1999 {
2000 	struct fec_enet_private *fep = netdev_priv(dev);
2001 	struct phy_device *phydev = fep->phy_dev;
2002 
2003 	if (!phydev)
2004 		return -ENODEV;
2005 
2006 	return genphy_restart_aneg(phydev);
2007 }
2008 
2009 static const struct ethtool_ops fec_enet_ethtool_ops = {
2010 #if !defined(CONFIG_M5272)
2011 	.get_pauseparam		= fec_enet_get_pauseparam,
2012 	.set_pauseparam		= fec_enet_set_pauseparam,
2013 #endif
2014 	.get_settings		= fec_enet_get_settings,
2015 	.set_settings		= fec_enet_set_settings,
2016 	.get_drvinfo		= fec_enet_get_drvinfo,
2017 	.get_link		= ethtool_op_get_link,
2018 	.get_ts_info		= fec_enet_get_ts_info,
2019 	.nway_reset		= fec_enet_nway_reset,
2020 #ifndef CONFIG_M5272
2021 	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
2022 	.get_strings		= fec_enet_get_strings,
2023 	.get_sset_count		= fec_enet_get_sset_count,
2024 #endif
2025 };
2026 
2027 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2028 {
2029 	struct fec_enet_private *fep = netdev_priv(ndev);
2030 	struct phy_device *phydev = fep->phy_dev;
2031 
2032 	if (!netif_running(ndev))
2033 		return -EINVAL;
2034 
2035 	if (!phydev)
2036 		return -ENODEV;
2037 
2038 	if (fep->bufdesc_ex) {
2039 		if (cmd == SIOCSHWTSTAMP)
2040 			return fec_ptp_set(ndev, rq);
2041 		if (cmd == SIOCGHWTSTAMP)
2042 			return fec_ptp_get(ndev, rq);
2043 	}
2044 
2045 	return phy_mii_ioctl(phydev, rq, cmd);
2046 }
2047 
2048 static void fec_enet_free_buffers(struct net_device *ndev)
2049 {
2050 	struct fec_enet_private *fep = netdev_priv(ndev);
2051 	unsigned int i;
2052 	struct sk_buff *skb;
2053 	struct bufdesc	*bdp;
2054 
2055 	bdp = fep->rx_bd_base;
2056 	for (i = 0; i < fep->rx_ring_size; i++) {
2057 		skb = fep->rx_skbuff[i];
2058 
2059 		if (bdp->cbd_bufaddr)
2060 			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
2061 					FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2062 		if (skb)
2063 			dev_kfree_skb(skb);
2064 		bdp = fec_enet_get_nextdesc(bdp, fep);
2065 	}
2066 
2067 	bdp = fep->tx_bd_base;
2068 	for (i = 0; i < fep->tx_ring_size; i++)
2069 		kfree(fep->tx_bounce[i]);
2070 }
2071 
2072 static int fec_enet_alloc_buffers(struct net_device *ndev)
2073 {
2074 	struct fec_enet_private *fep = netdev_priv(ndev);
2075 	unsigned int i;
2076 	struct sk_buff *skb;
2077 	struct bufdesc	*bdp;
2078 
2079 	bdp = fep->rx_bd_base;
2080 	for (i = 0; i < fep->rx_ring_size; i++) {
2081 		skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2082 		if (!skb) {
2083 			fec_enet_free_buffers(ndev);
2084 			return -ENOMEM;
2085 		}
2086 		fep->rx_skbuff[i] = skb;
2087 
2088 		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
2089 				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2090 		if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
2091 			fec_enet_free_buffers(ndev);
2092 			if (net_ratelimit())
2093 				netdev_err(ndev, "Rx DMA memory map failed\n");
2094 			return -ENOMEM;
2095 		}
2096 		bdp->cbd_sc = BD_ENET_RX_EMPTY;
2097 
2098 		if (fep->bufdesc_ex) {
2099 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2100 			ebdp->cbd_esc = BD_ENET_RX_INT;
2101 		}
2102 
2103 		bdp = fec_enet_get_nextdesc(bdp, fep);
2104 	}
2105 
2106 	/* Set the last buffer to wrap. */
2107 	bdp = fec_enet_get_prevdesc(bdp, fep);
2108 	bdp->cbd_sc |= BD_SC_WRAP;
2109 
2110 	bdp = fep->tx_bd_base;
2111 	for (i = 0; i < fep->tx_ring_size; i++) {
2112 		fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2113 
2114 		bdp->cbd_sc = 0;
2115 		bdp->cbd_bufaddr = 0;
2116 
2117 		if (fep->bufdesc_ex) {
2118 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2119 			ebdp->cbd_esc = BD_ENET_TX_INT;
2120 		}
2121 
2122 		bdp = fec_enet_get_nextdesc(bdp, fep);
2123 	}
2124 
2125 	/* Set the last buffer to wrap. */
2126 	bdp = fec_enet_get_prevdesc(bdp, fep);
2127 	bdp->cbd_sc |= BD_SC_WRAP;
2128 
2129 	return 0;
2130 }
2131 
2132 static int
2133 fec_enet_open(struct net_device *ndev)
2134 {
2135 	struct fec_enet_private *fep = netdev_priv(ndev);
2136 	int ret;
2137 
2138 	pinctrl_pm_select_default_state(&fep->pdev->dev);
2139 	ret = fec_enet_clk_enable(ndev, true);
2140 	if (ret)
2141 		return ret;
2142 
2143 	/* I should reset the ring buffers here, but I don't yet know
2144 	 * a simple way to do that.
2145 	 */
2146 
2147 	ret = fec_enet_alloc_buffers(ndev);
2148 	if (ret)
2149 		return ret;
2150 
2151 	/* Probe and connect to PHY when open the interface */
2152 	ret = fec_enet_mii_probe(ndev);
2153 	if (ret) {
2154 		fec_enet_free_buffers(ndev);
2155 		return ret;
2156 	}
2157 
2158 	napi_enable(&fep->napi);
2159 	phy_start(fep->phy_dev);
2160 	netif_start_queue(ndev);
2161 	fep->opened = 1;
2162 	return 0;
2163 }
2164 
2165 static int
2166 fec_enet_close(struct net_device *ndev)
2167 {
2168 	struct fec_enet_private *fep = netdev_priv(ndev);
2169 
2170 	/* Don't know what to do yet. */
2171 	napi_disable(&fep->napi);
2172 	fep->opened = 0;
2173 	netif_stop_queue(ndev);
2174 	fec_stop(ndev);
2175 
2176 	if (fep->phy_dev) {
2177 		phy_stop(fep->phy_dev);
2178 		phy_disconnect(fep->phy_dev);
2179 	}
2180 
2181 	fec_enet_clk_enable(ndev, false);
2182 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2183 	fec_enet_free_buffers(ndev);
2184 
2185 	return 0;
2186 }
2187 
2188 /* Set or clear the multicast filter for this adaptor.
2189  * Skeleton taken from sunlance driver.
2190  * The CPM Ethernet implementation allows Multicast as well as individual
2191  * MAC address filtering.  Some of the drivers check to make sure it is
2192  * a group multicast address, and discard those that are not.  I guess I
2193  * will do the same for now, but just remove the test if you want
2194  * individual filtering as well (do the upper net layers want or support
2195  * this kind of feature?).
2196  */
2197 
2198 #define HASH_BITS	6		/* #bits in hash */
2199 #define CRC32_POLY	0xEDB88320
2200 
2201 static void set_multicast_list(struct net_device *ndev)
2202 {
2203 	struct fec_enet_private *fep = netdev_priv(ndev);
2204 	struct netdev_hw_addr *ha;
2205 	unsigned int i, bit, data, crc, tmp;
2206 	unsigned char hash;
2207 
2208 	if (ndev->flags & IFF_PROMISC) {
2209 		tmp = readl(fep->hwp + FEC_R_CNTRL);
2210 		tmp |= 0x8;
2211 		writel(tmp, fep->hwp + FEC_R_CNTRL);
2212 		return;
2213 	}
2214 
2215 	tmp = readl(fep->hwp + FEC_R_CNTRL);
2216 	tmp &= ~0x8;
2217 	writel(tmp, fep->hwp + FEC_R_CNTRL);
2218 
2219 	if (ndev->flags & IFF_ALLMULTI) {
2220 		/* Catch all multicast addresses, so set the
2221 		 * filter to all 1's
2222 		 */
2223 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2224 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2225 
2226 		return;
2227 	}
2228 
2229 	/* Clear filter and add the addresses in hash register
2230 	 */
2231 	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2232 	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2233 
2234 	netdev_for_each_mc_addr(ha, ndev) {
2235 		/* calculate crc32 value of mac address */
2236 		crc = 0xffffffff;
2237 
2238 		for (i = 0; i < ndev->addr_len; i++) {
2239 			data = ha->addr[i];
2240 			for (bit = 0; bit < 8; bit++, data >>= 1) {
2241 				crc = (crc >> 1) ^
2242 				(((crc ^ data) & 1) ? CRC32_POLY : 0);
2243 			}
2244 		}
2245 
2246 		/* only upper 6 bits (HASH_BITS) are used
2247 		 * which point to specific bit in he hash registers
2248 		 */
2249 		hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2250 
2251 		if (hash > 31) {
2252 			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2253 			tmp |= 1 << (hash - 32);
2254 			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2255 		} else {
2256 			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2257 			tmp |= 1 << hash;
2258 			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2259 		}
2260 	}
2261 }
2262 
2263 /* Set a MAC change in hardware. */
2264 static int
2265 fec_set_mac_address(struct net_device *ndev, void *p)
2266 {
2267 	struct fec_enet_private *fep = netdev_priv(ndev);
2268 	struct sockaddr *addr = p;
2269 
2270 	if (addr) {
2271 		if (!is_valid_ether_addr(addr->sa_data))
2272 			return -EADDRNOTAVAIL;
2273 		memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
2274 	}
2275 
2276 	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
2277 		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
2278 		fep->hwp + FEC_ADDR_LOW);
2279 	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2280 		fep->hwp + FEC_ADDR_HIGH);
2281 	return 0;
2282 }
2283 
2284 #ifdef CONFIG_NET_POLL_CONTROLLER
2285 /**
2286  * fec_poll_controller - FEC Poll controller function
2287  * @dev: The FEC network adapter
2288  *
2289  * Polled functionality used by netconsole and others in non interrupt mode
2290  *
2291  */
2292 static void fec_poll_controller(struct net_device *dev)
2293 {
2294 	int i;
2295 	struct fec_enet_private *fep = netdev_priv(dev);
2296 
2297 	for (i = 0; i < FEC_IRQ_NUM; i++) {
2298 		if (fep->irq[i] > 0) {
2299 			disable_irq(fep->irq[i]);
2300 			fec_enet_interrupt(fep->irq[i], dev);
2301 			enable_irq(fep->irq[i]);
2302 		}
2303 	}
2304 }
2305 #endif
2306 
2307 static int fec_set_features(struct net_device *netdev,
2308 	netdev_features_t features)
2309 {
2310 	struct fec_enet_private *fep = netdev_priv(netdev);
2311 	netdev_features_t changed = features ^ netdev->features;
2312 
2313 	netdev->features = features;
2314 
2315 	/* Receive checksum has been changed */
2316 	if (changed & NETIF_F_RXCSUM) {
2317 		if (features & NETIF_F_RXCSUM)
2318 			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2319 		else
2320 			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
2321 
2322 		if (netif_running(netdev)) {
2323 			fec_stop(netdev);
2324 			fec_restart(netdev, fep->phy_dev->duplex);
2325 			netif_wake_queue(netdev);
2326 		} else {
2327 			fec_restart(netdev, fep->phy_dev->duplex);
2328 		}
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static const struct net_device_ops fec_netdev_ops = {
2335 	.ndo_open		= fec_enet_open,
2336 	.ndo_stop		= fec_enet_close,
2337 	.ndo_start_xmit		= fec_enet_start_xmit,
2338 	.ndo_set_rx_mode	= set_multicast_list,
2339 	.ndo_change_mtu		= eth_change_mtu,
2340 	.ndo_validate_addr	= eth_validate_addr,
2341 	.ndo_tx_timeout		= fec_timeout,
2342 	.ndo_set_mac_address	= fec_set_mac_address,
2343 	.ndo_do_ioctl		= fec_enet_ioctl,
2344 #ifdef CONFIG_NET_POLL_CONTROLLER
2345 	.ndo_poll_controller	= fec_poll_controller,
2346 #endif
2347 	.ndo_set_features	= fec_set_features,
2348 };
2349 
2350  /*
2351   * XXX:  We need to clean up on failure exits here.
2352   *
2353   */
2354 static int fec_enet_init(struct net_device *ndev)
2355 {
2356 	struct fec_enet_private *fep = netdev_priv(ndev);
2357 	const struct platform_device_id *id_entry =
2358 				platform_get_device_id(fep->pdev);
2359 	struct bufdesc *cbd_base;
2360 	int bd_size;
2361 
2362 	/* init the tx & rx ring size */
2363 	fep->tx_ring_size = TX_RING_SIZE;
2364 	fep->rx_ring_size = RX_RING_SIZE;
2365 
2366 	fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2367 	fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2368 
2369 	if (fep->bufdesc_ex)
2370 		fep->bufdesc_size = sizeof(struct bufdesc_ex);
2371 	else
2372 		fep->bufdesc_size = sizeof(struct bufdesc);
2373 	bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2374 			fep->bufdesc_size;
2375 
2376 	/* Allocate memory for buffer descriptors. */
2377 	cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
2378 				      GFP_KERNEL);
2379 	if (!cbd_base)
2380 		return -ENOMEM;
2381 
2382 	fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2383 						&fep->tso_hdrs_dma, GFP_KERNEL);
2384 	if (!fep->tso_hdrs) {
2385 		dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2386 		return -ENOMEM;
2387 	}
2388 
2389 	memset(cbd_base, 0, PAGE_SIZE);
2390 
2391 	fep->netdev = ndev;
2392 
2393 	/* Get the Ethernet address */
2394 	fec_get_mac(ndev);
2395 	/* make sure MAC we just acquired is programmed into the hw */
2396 	fec_set_mac_address(ndev, NULL);
2397 
2398 	/* Set receive and transmit descriptor base. */
2399 	fep->rx_bd_base = cbd_base;
2400 	if (fep->bufdesc_ex)
2401 		fep->tx_bd_base = (struct bufdesc *)
2402 			(((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
2403 	else
2404 		fep->tx_bd_base = cbd_base + fep->rx_ring_size;
2405 
2406 	/* The FEC Ethernet specific entries in the device structure */
2407 	ndev->watchdog_timeo = TX_TIMEOUT;
2408 	ndev->netdev_ops = &fec_netdev_ops;
2409 	ndev->ethtool_ops = &fec_enet_ethtool_ops;
2410 
2411 	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2412 	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2413 
2414 	if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2415 		/* enable hw VLAN support */
2416 		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2417 
2418 	if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2419 		ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2420 
2421 		/* enable hw accelerator */
2422 		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2423 				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2424 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2425 	}
2426 
2427 	ndev->hw_features = ndev->features;
2428 
2429 	fec_restart(ndev, 0);
2430 
2431 	return 0;
2432 }
2433 
2434 #ifdef CONFIG_OF
2435 static void fec_reset_phy(struct platform_device *pdev)
2436 {
2437 	int err, phy_reset;
2438 	int msec = 1;
2439 	struct device_node *np = pdev->dev.of_node;
2440 
2441 	if (!np)
2442 		return;
2443 
2444 	of_property_read_u32(np, "phy-reset-duration", &msec);
2445 	/* A sane reset duration should not be longer than 1s */
2446 	if (msec > 1000)
2447 		msec = 1;
2448 
2449 	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2450 	if (!gpio_is_valid(phy_reset))
2451 		return;
2452 
2453 	err = devm_gpio_request_one(&pdev->dev, phy_reset,
2454 				    GPIOF_OUT_INIT_LOW, "phy-reset");
2455 	if (err) {
2456 		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2457 		return;
2458 	}
2459 	msleep(msec);
2460 	gpio_set_value(phy_reset, 1);
2461 }
2462 #else /* CONFIG_OF */
2463 static void fec_reset_phy(struct platform_device *pdev)
2464 {
2465 	/*
2466 	 * In case of platform probe, the reset has been done
2467 	 * by machine code.
2468 	 */
2469 }
2470 #endif /* CONFIG_OF */
2471 
2472 static int
2473 fec_probe(struct platform_device *pdev)
2474 {
2475 	struct fec_enet_private *fep;
2476 	struct fec_platform_data *pdata;
2477 	struct net_device *ndev;
2478 	int i, irq, ret = 0;
2479 	struct resource *r;
2480 	const struct of_device_id *of_id;
2481 	static int dev_id;
2482 
2483 	of_id = of_match_device(fec_dt_ids, &pdev->dev);
2484 	if (of_id)
2485 		pdev->id_entry = of_id->data;
2486 
2487 	/* Init network device */
2488 	ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2489 	if (!ndev)
2490 		return -ENOMEM;
2491 
2492 	SET_NETDEV_DEV(ndev, &pdev->dev);
2493 
2494 	/* setup board info structure */
2495 	fep = netdev_priv(ndev);
2496 
2497 #if !defined(CONFIG_M5272)
2498 	/* default enable pause frame auto negotiation */
2499 	if (pdev->id_entry &&
2500 	    (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
2501 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2502 #endif
2503 
2504 	/* Select default pin state */
2505 	pinctrl_pm_select_default_state(&pdev->dev);
2506 
2507 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2508 	fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2509 	if (IS_ERR(fep->hwp)) {
2510 		ret = PTR_ERR(fep->hwp);
2511 		goto failed_ioremap;
2512 	}
2513 
2514 	fep->pdev = pdev;
2515 	fep->dev_id = dev_id++;
2516 
2517 	fep->bufdesc_ex = 0;
2518 
2519 	platform_set_drvdata(pdev, ndev);
2520 
2521 	ret = of_get_phy_mode(pdev->dev.of_node);
2522 	if (ret < 0) {
2523 		pdata = dev_get_platdata(&pdev->dev);
2524 		if (pdata)
2525 			fep->phy_interface = pdata->phy;
2526 		else
2527 			fep->phy_interface = PHY_INTERFACE_MODE_MII;
2528 	} else {
2529 		fep->phy_interface = ret;
2530 	}
2531 
2532 	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2533 	if (IS_ERR(fep->clk_ipg)) {
2534 		ret = PTR_ERR(fep->clk_ipg);
2535 		goto failed_clk;
2536 	}
2537 
2538 	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2539 	if (IS_ERR(fep->clk_ahb)) {
2540 		ret = PTR_ERR(fep->clk_ahb);
2541 		goto failed_clk;
2542 	}
2543 
2544 	/* enet_out is optional, depends on board */
2545 	fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
2546 	if (IS_ERR(fep->clk_enet_out))
2547 		fep->clk_enet_out = NULL;
2548 
2549 	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
2550 	fep->bufdesc_ex =
2551 		pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
2552 	if (IS_ERR(fep->clk_ptp)) {
2553 		fep->clk_ptp = NULL;
2554 		fep->bufdesc_ex = 0;
2555 	}
2556 
2557 	ret = fec_enet_clk_enable(ndev, true);
2558 	if (ret)
2559 		goto failed_clk;
2560 
2561 	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2562 	if (!IS_ERR(fep->reg_phy)) {
2563 		ret = regulator_enable(fep->reg_phy);
2564 		if (ret) {
2565 			dev_err(&pdev->dev,
2566 				"Failed to enable phy regulator: %d\n", ret);
2567 			goto failed_regulator;
2568 		}
2569 	} else {
2570 		fep->reg_phy = NULL;
2571 	}
2572 
2573 	fec_reset_phy(pdev);
2574 
2575 	if (fep->bufdesc_ex)
2576 		fec_ptp_init(pdev);
2577 
2578 	ret = fec_enet_init(ndev);
2579 	if (ret)
2580 		goto failed_init;
2581 
2582 	for (i = 0; i < FEC_IRQ_NUM; i++) {
2583 		irq = platform_get_irq(pdev, i);
2584 		if (irq < 0) {
2585 			if (i)
2586 				break;
2587 			ret = irq;
2588 			goto failed_irq;
2589 		}
2590 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2591 				       0, pdev->name, ndev);
2592 		if (ret)
2593 			goto failed_irq;
2594 	}
2595 
2596 	ret = fec_enet_mii_init(pdev);
2597 	if (ret)
2598 		goto failed_mii_init;
2599 
2600 	/* Carrier starts down, phylib will bring it up */
2601 	netif_carrier_off(ndev);
2602 	fec_enet_clk_enable(ndev, false);
2603 	pinctrl_pm_select_sleep_state(&pdev->dev);
2604 
2605 	ret = register_netdev(ndev);
2606 	if (ret)
2607 		goto failed_register;
2608 
2609 	if (fep->bufdesc_ex && fep->ptp_clock)
2610 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
2611 
2612 	INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
2613 	return 0;
2614 
2615 failed_register:
2616 	fec_enet_mii_remove(fep);
2617 failed_mii_init:
2618 failed_irq:
2619 failed_init:
2620 	if (fep->reg_phy)
2621 		regulator_disable(fep->reg_phy);
2622 failed_regulator:
2623 	fec_enet_clk_enable(ndev, false);
2624 failed_clk:
2625 failed_ioremap:
2626 	free_netdev(ndev);
2627 
2628 	return ret;
2629 }
2630 
2631 static int
2632 fec_drv_remove(struct platform_device *pdev)
2633 {
2634 	struct net_device *ndev = platform_get_drvdata(pdev);
2635 	struct fec_enet_private *fep = netdev_priv(ndev);
2636 
2637 	cancel_delayed_work_sync(&(fep->delay_work.delay_work));
2638 	unregister_netdev(ndev);
2639 	fec_enet_mii_remove(fep);
2640 	del_timer_sync(&fep->time_keep);
2641 	if (fep->reg_phy)
2642 		regulator_disable(fep->reg_phy);
2643 	if (fep->ptp_clock)
2644 		ptp_clock_unregister(fep->ptp_clock);
2645 	fec_enet_clk_enable(ndev, false);
2646 	free_netdev(ndev);
2647 
2648 	return 0;
2649 }
2650 
2651 #ifdef CONFIG_PM_SLEEP
2652 static int
2653 fec_suspend(struct device *dev)
2654 {
2655 	struct net_device *ndev = dev_get_drvdata(dev);
2656 	struct fec_enet_private *fep = netdev_priv(ndev);
2657 
2658 	if (netif_running(ndev)) {
2659 		fec_stop(ndev);
2660 		netif_device_detach(ndev);
2661 	}
2662 	fec_enet_clk_enable(ndev, false);
2663 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2664 
2665 	if (fep->reg_phy)
2666 		regulator_disable(fep->reg_phy);
2667 
2668 	return 0;
2669 }
2670 
2671 static int
2672 fec_resume(struct device *dev)
2673 {
2674 	struct net_device *ndev = dev_get_drvdata(dev);
2675 	struct fec_enet_private *fep = netdev_priv(ndev);
2676 	int ret;
2677 
2678 	if (fep->reg_phy) {
2679 		ret = regulator_enable(fep->reg_phy);
2680 		if (ret)
2681 			return ret;
2682 	}
2683 
2684 	pinctrl_pm_select_default_state(&fep->pdev->dev);
2685 	ret = fec_enet_clk_enable(ndev, true);
2686 	if (ret)
2687 		goto failed_clk;
2688 
2689 	if (netif_running(ndev)) {
2690 		fec_restart(ndev, fep->full_duplex);
2691 		netif_device_attach(ndev);
2692 	}
2693 
2694 	return 0;
2695 
2696 failed_clk:
2697 	if (fep->reg_phy)
2698 		regulator_disable(fep->reg_phy);
2699 	return ret;
2700 }
2701 #endif /* CONFIG_PM_SLEEP */
2702 
2703 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
2704 
2705 static struct platform_driver fec_driver = {
2706 	.driver	= {
2707 		.name	= DRIVER_NAME,
2708 		.owner	= THIS_MODULE,
2709 		.pm	= &fec_pm_ops,
2710 		.of_match_table = fec_dt_ids,
2711 	},
2712 	.id_table = fec_devtype,
2713 	.probe	= fec_probe,
2714 	.remove	= fec_drv_remove,
2715 };
2716 
2717 module_platform_driver(fec_driver);
2718 
2719 MODULE_ALIAS("platform:"DRIVER_NAME);
2720 MODULE_LICENSE("GPL");
2721