xref: /linux/drivers/net/ethernet/ibm/emac/core.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/ethernet/ibm/emac/core.c
4  *
5  * Driver for PowerPC 4xx on-chip ethernet controller.
6  *
7  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8  *                <benh@kernel.crashing.org>
9  *
10  * Based on the arch/ppc version of the driver:
11  *
12  * Copyright (c) 2004, 2005 Zultys Technologies.
13  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14  *
15  * Based on original work by
16  * 	Matt Porter <mporter@kernel.crashing.org>
17  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
18  *      Armin Kuster <akuster@mvista.com>
19  * 	Johnnie Peters <jpeters@mvista.com>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/delay.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/crc32.h>
32 #include <linux/ethtool.h>
33 #include <linux/mii.h>
34 #include <linux/bitops.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 #include <linux/of_address.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_net.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_platform.h>
42 #include <linux/platform_device.h>
43 #include <linux/slab.h>
44 
45 #include <asm/processor.h>
46 #include <asm/io.h>
47 #include <asm/dma.h>
48 #include <linux/uaccess.h>
49 #include <asm/dcr.h>
50 #include <asm/dcr-regs.h>
51 
52 #include "core.h"
53 
54 /*
55  * Lack of dma_unmap_???? calls is intentional.
56  *
57  * API-correct usage requires additional support state information to be
58  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59  * EMAC design (e.g. TX buffer passed from network stack can be split into
60  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61  * maintaining such information will add additional overhead.
62  * Current DMA API implementation for 4xx processors only ensures cache coherency
63  * and dma_unmap_???? routines are empty and are likely to stay this way.
64  * I decided to omit dma_unmap_??? calls because I don't want to add additional
65  * complexity just for the sake of following some abstract API, when it doesn't
66  * add any real benefit to the driver. I understand that this decision maybe
67  * controversial, but I really tried to make code API-correct and efficient
68  * at the same time and didn't come up with code I liked :(.                --ebs
69  */
70 
71 #define DRV_NAME        "emac"
72 #define DRV_VERSION     "3.54"
73 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
74 
75 MODULE_DESCRIPTION(DRV_DESC);
76 MODULE_AUTHOR
77     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78 MODULE_LICENSE("GPL");
79 
80 /* minimum number of free TX descriptors required to wake up TX process */
81 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
82 
83 /* If packet size is less than this number, we allocate small skb and copy packet
84  * contents into it instead of just sending original big skb up
85  */
86 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
87 
88 /* Since multiple EMACs share MDIO lines in various ways, we need
89  * to avoid re-using the same PHY ID in cases where the arch didn't
90  * setup precise phy_map entries
91  *
92  * XXX This is something that needs to be reworked as we can have multiple
93  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
94  * probably require in that case to have explicit PHY IDs in the device-tree
95  */
96 static u32 busy_phy_map;
97 static DEFINE_MUTEX(emac_phy_map_lock);
98 
99 /* This is the wait queue used to wait on any event related to probe, that
100  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
101  */
102 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
103 
104 /* Having stable interface names is a doomed idea. However, it would be nice
105  * if we didn't have completely random interface names at boot too :-) It's
106  * just a matter of making everybody's life easier. Since we are doing
107  * threaded probing, it's a bit harder though. The base idea here is that
108  * we make up a list of all emacs in the device-tree before we register the
109  * driver. Every emac will then wait for the previous one in the list to
110  * initialize before itself. We should also keep that list ordered by
111  * cell_index.
112  * That list is only 4 entries long, meaning that additional EMACs don't
113  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
114  */
115 
116 #define EMAC_BOOT_LIST_SIZE	4
117 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
118 
119 /* How long should I wait for dependent devices ? */
120 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
121 
122 /* I don't want to litter system log with timeout errors
123  * when we have brain-damaged PHY.
124  */
125 static inline void emac_report_timeout_error(struct emac_instance *dev,
126 					     const char *error)
127 {
128 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
129 				  EMAC_FTR_460EX_PHY_CLK_FIX |
130 				  EMAC_FTR_440EP_PHY_CLK_FIX))
131 		DBG(dev, "%s" NL, error);
132 	else if (net_ratelimit())
133 		printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
134 }
135 
136 /* EMAC PHY clock workaround:
137  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
138  * which allows controlling each EMAC clock
139  */
140 static inline void emac_rx_clk_tx(struct emac_instance *dev)
141 {
142 #ifdef CONFIG_PPC_DCR_NATIVE
143 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
144 		dcri_clrset(SDR0, SDR0_MFR,
145 			    0, SDR0_MFR_ECS >> dev->cell_index);
146 #endif
147 }
148 
149 static inline void emac_rx_clk_default(struct emac_instance *dev)
150 {
151 #ifdef CONFIG_PPC_DCR_NATIVE
152 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
153 		dcri_clrset(SDR0, SDR0_MFR,
154 			    SDR0_MFR_ECS >> dev->cell_index, 0);
155 #endif
156 }
157 
158 /* PHY polling intervals */
159 #define PHY_POLL_LINK_ON	HZ
160 #define PHY_POLL_LINK_OFF	(HZ / 5)
161 
162 /* Graceful stop timeouts in us.
163  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
164  */
165 #define STOP_TIMEOUT_10		1230
166 #define STOP_TIMEOUT_100	124
167 #define STOP_TIMEOUT_1000	13
168 #define STOP_TIMEOUT_1000_JUMBO	73
169 
170 static unsigned char default_mcast_addr[] = {
171 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
172 };
173 
174 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
175 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
176 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
177 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
178 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
179 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
180 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
181 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
182 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
183 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
184 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
185 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
186 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
187 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
188 	"tx_bd_multple_collisions", "tx_bd_single_collision",
189 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
190 	"tx_errors"
191 };
192 
193 static irqreturn_t emac_irq(int irq, void *dev_instance);
194 static void emac_clean_tx_ring(struct emac_instance *dev);
195 static void __emac_set_multicast_list(struct emac_instance *dev);
196 
197 static inline int emac_phy_supports_gige(int phy_mode)
198 {
199 	return  phy_interface_mode_is_rgmii(phy_mode) ||
200 		phy_mode == PHY_INTERFACE_MODE_GMII ||
201 		phy_mode == PHY_INTERFACE_MODE_SGMII ||
202 		phy_mode == PHY_INTERFACE_MODE_TBI ||
203 		phy_mode == PHY_INTERFACE_MODE_RTBI;
204 }
205 
206 static inline int emac_phy_gpcs(int phy_mode)
207 {
208 	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
209 		phy_mode == PHY_INTERFACE_MODE_TBI ||
210 		phy_mode == PHY_INTERFACE_MODE_RTBI;
211 }
212 
213 static inline void emac_tx_enable(struct emac_instance *dev)
214 {
215 	struct emac_regs __iomem *p = dev->emacp;
216 	u32 r;
217 
218 	DBG(dev, "tx_enable" NL);
219 
220 	r = in_be32(&p->mr0);
221 	if (!(r & EMAC_MR0_TXE))
222 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
223 }
224 
225 static void emac_tx_disable(struct emac_instance *dev)
226 {
227 	struct emac_regs __iomem *p = dev->emacp;
228 	u32 r;
229 
230 	DBG(dev, "tx_disable" NL);
231 
232 	r = in_be32(&p->mr0);
233 	if (r & EMAC_MR0_TXE) {
234 		int n = dev->stop_timeout;
235 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
236 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
237 			udelay(1);
238 			--n;
239 		}
240 		if (unlikely(!n))
241 			emac_report_timeout_error(dev, "TX disable timeout");
242 	}
243 }
244 
245 static void emac_rx_enable(struct emac_instance *dev)
246 {
247 	struct emac_regs __iomem *p = dev->emacp;
248 	u32 r;
249 
250 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
251 		goto out;
252 
253 	DBG(dev, "rx_enable" NL);
254 
255 	r = in_be32(&p->mr0);
256 	if (!(r & EMAC_MR0_RXE)) {
257 		if (unlikely(!(r & EMAC_MR0_RXI))) {
258 			/* Wait if previous async disable is still in progress */
259 			int n = dev->stop_timeout;
260 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
261 				udelay(1);
262 				--n;
263 			}
264 			if (unlikely(!n))
265 				emac_report_timeout_error(dev,
266 							  "RX disable timeout");
267 		}
268 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
269 	}
270  out:
271 	;
272 }
273 
274 static void emac_rx_disable(struct emac_instance *dev)
275 {
276 	struct emac_regs __iomem *p = dev->emacp;
277 	u32 r;
278 
279 	DBG(dev, "rx_disable" NL);
280 
281 	r = in_be32(&p->mr0);
282 	if (r & EMAC_MR0_RXE) {
283 		int n = dev->stop_timeout;
284 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
285 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
286 			udelay(1);
287 			--n;
288 		}
289 		if (unlikely(!n))
290 			emac_report_timeout_error(dev, "RX disable timeout");
291 	}
292 }
293 
294 static inline void emac_netif_stop(struct emac_instance *dev)
295 {
296 	netif_tx_lock_bh(dev->ndev);
297 	netif_addr_lock(dev->ndev);
298 	dev->no_mcast = 1;
299 	netif_addr_unlock(dev->ndev);
300 	netif_tx_unlock_bh(dev->ndev);
301 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
302 	mal_poll_disable(dev->mal, &dev->commac);
303 	netif_tx_disable(dev->ndev);
304 }
305 
306 static inline void emac_netif_start(struct emac_instance *dev)
307 {
308 	netif_tx_lock_bh(dev->ndev);
309 	netif_addr_lock(dev->ndev);
310 	dev->no_mcast = 0;
311 	if (dev->mcast_pending && netif_running(dev->ndev))
312 		__emac_set_multicast_list(dev);
313 	netif_addr_unlock(dev->ndev);
314 	netif_tx_unlock_bh(dev->ndev);
315 
316 	netif_wake_queue(dev->ndev);
317 
318 	/* NOTE: unconditional netif_wake_queue is only appropriate
319 	 * so long as all callers are assured to have free tx slots
320 	 * (taken from tg3... though the case where that is wrong is
321 	 *  not terribly harmful)
322 	 */
323 	mal_poll_enable(dev->mal, &dev->commac);
324 }
325 
326 static inline void emac_rx_disable_async(struct emac_instance *dev)
327 {
328 	struct emac_regs __iomem *p = dev->emacp;
329 	u32 r;
330 
331 	DBG(dev, "rx_disable_async" NL);
332 
333 	r = in_be32(&p->mr0);
334 	if (r & EMAC_MR0_RXE)
335 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
336 }
337 
338 static int emac_reset(struct emac_instance *dev)
339 {
340 	struct emac_regs __iomem *p = dev->emacp;
341 	int n = 20;
342 	bool __maybe_unused try_internal_clock = false;
343 
344 	DBG(dev, "reset" NL);
345 
346 	if (!dev->reset_failed) {
347 		/* 40x erratum suggests stopping RX channel before reset,
348 		 * we stop TX as well
349 		 */
350 		emac_rx_disable(dev);
351 		emac_tx_disable(dev);
352 	}
353 
354 #ifdef CONFIG_PPC_DCR_NATIVE
355 do_retry:
356 	/*
357 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
358 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
359 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
360 	 * of the EMAC. If none is present, select the internal clock
361 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
362 	 * After a soft reset, select the external clock.
363 	 *
364 	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
365 	 * ethernet cable is not attached. This causes the reset to timeout
366 	 * and the PHY detection code in emac_init_phy() is unable to
367 	 * communicate and detect the AR8035-A PHY. As a result, the emac
368 	 * driver bails out early and the user has no ethernet.
369 	 * In order to stay compatible with existing configurations, the
370 	 * driver will temporarily switch to the internal clock, after
371 	 * the first reset fails.
372 	 */
373 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
374 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
375 					   dev->phy_map == 0xffffffff)) {
376 			/* No PHY: select internal loop clock before reset */
377 			dcri_clrset(SDR0, SDR0_ETH_CFG,
378 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
379 		} else {
380 			/* PHY present: select external clock before reset */
381 			dcri_clrset(SDR0, SDR0_ETH_CFG,
382 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
383 		}
384 	}
385 #endif
386 
387 	out_be32(&p->mr0, EMAC_MR0_SRST);
388 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
389 		--n;
390 
391 #ifdef CONFIG_PPC_DCR_NATIVE
392 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
393 		if (!n && !try_internal_clock) {
394 			/* first attempt has timed out. */
395 			n = 20;
396 			try_internal_clock = true;
397 			goto do_retry;
398 		}
399 
400 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
401 					   dev->phy_map == 0xffffffff)) {
402 			/* No PHY: restore external clock source after reset */
403 			dcri_clrset(SDR0, SDR0_ETH_CFG,
404 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
405 		}
406 	}
407 #endif
408 
409 	if (n) {
410 		dev->reset_failed = 0;
411 		return 0;
412 	} else {
413 		emac_report_timeout_error(dev, "reset timeout");
414 		dev->reset_failed = 1;
415 		return -ETIMEDOUT;
416 	}
417 }
418 
419 static void emac_hash_mc(struct emac_instance *dev)
420 {
421 	const int regs = EMAC_XAHT_REGS(dev);
422 	u32 *gaht_base = emac_gaht_base(dev);
423 	u32 gaht_temp[EMAC_XAHT_MAX_REGS];
424 	struct netdev_hw_addr *ha;
425 	int i;
426 
427 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
428 
429 	memset(gaht_temp, 0, sizeof (gaht_temp));
430 
431 	netdev_for_each_mc_addr(ha, dev->ndev) {
432 		int slot, reg, mask;
433 		DBG2(dev, "mc %pM" NL, ha->addr);
434 
435 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
436 					     ether_crc(ETH_ALEN, ha->addr));
437 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
438 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
439 
440 		gaht_temp[reg] |= mask;
441 	}
442 
443 	for (i = 0; i < regs; i++)
444 		out_be32(gaht_base + i, gaht_temp[i]);
445 }
446 
447 static inline u32 emac_iff2rmr(struct net_device *ndev)
448 {
449 	struct emac_instance *dev = netdev_priv(ndev);
450 	u32 r;
451 
452 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
453 
454 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
455 	    r |= EMAC4_RMR_BASE;
456 	else
457 	    r |= EMAC_RMR_BASE;
458 
459 	if (ndev->flags & IFF_PROMISC)
460 		r |= EMAC_RMR_PME;
461 	else if (ndev->flags & IFF_ALLMULTI ||
462 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
463 		r |= EMAC_RMR_PMME;
464 	else if (!netdev_mc_empty(ndev))
465 		r |= EMAC_RMR_MAE;
466 
467 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
468 		r &= ~EMAC4_RMR_MJS_MASK;
469 		r |= EMAC4_RMR_MJS(ndev->mtu);
470 	}
471 
472 	return r;
473 }
474 
475 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
476 {
477 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
478 
479 	DBG2(dev, "__emac_calc_base_mr1" NL);
480 
481 	switch(tx_size) {
482 	case 2048:
483 		ret |= EMAC_MR1_TFS_2K;
484 		break;
485 	default:
486 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
487 		       dev->ndev->name, tx_size);
488 	}
489 
490 	switch(rx_size) {
491 	case 16384:
492 		ret |= EMAC_MR1_RFS_16K;
493 		break;
494 	case 4096:
495 		ret |= EMAC_MR1_RFS_4K;
496 		break;
497 	default:
498 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
499 		       dev->ndev->name, rx_size);
500 	}
501 
502 	return ret;
503 }
504 
505 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
506 {
507 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
508 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
509 
510 	DBG2(dev, "__emac4_calc_base_mr1" NL);
511 
512 	switch(tx_size) {
513 	case 16384:
514 		ret |= EMAC4_MR1_TFS_16K;
515 		break;
516 	case 8192:
517 		ret |= EMAC4_MR1_TFS_8K;
518 		break;
519 	case 4096:
520 		ret |= EMAC4_MR1_TFS_4K;
521 		break;
522 	case 2048:
523 		ret |= EMAC4_MR1_TFS_2K;
524 		break;
525 	default:
526 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
527 		       dev->ndev->name, tx_size);
528 	}
529 
530 	switch(rx_size) {
531 	case 16384:
532 		ret |= EMAC4_MR1_RFS_16K;
533 		break;
534 	case 8192:
535 		ret |= EMAC4_MR1_RFS_8K;
536 		break;
537 	case 4096:
538 		ret |= EMAC4_MR1_RFS_4K;
539 		break;
540 	case 2048:
541 		ret |= EMAC4_MR1_RFS_2K;
542 		break;
543 	default:
544 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
545 		       dev->ndev->name, rx_size);
546 	}
547 
548 	return ret;
549 }
550 
551 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
552 {
553 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
554 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
555 		__emac_calc_base_mr1(dev, tx_size, rx_size);
556 }
557 
558 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
559 {
560 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
561 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
562 	else
563 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
564 }
565 
566 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
567 				 unsigned int low, unsigned int high)
568 {
569 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
570 		return (low << 22) | ( (high & 0x3ff) << 6);
571 	else
572 		return (low << 23) | ( (high & 0x1ff) << 7);
573 }
574 
575 static int emac_configure(struct emac_instance *dev)
576 {
577 	struct emac_regs __iomem *p = dev->emacp;
578 	struct net_device *ndev = dev->ndev;
579 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
580 	u32 r, mr1 = 0;
581 
582 	DBG(dev, "configure" NL);
583 
584 	if (!link) {
585 		out_be32(&p->mr1, in_be32(&p->mr1)
586 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
587 		udelay(100);
588 	} else if (emac_reset(dev) < 0)
589 		return -ETIMEDOUT;
590 
591 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
592 		tah_reset(dev->tah_dev);
593 
594 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
595 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
596 
597 	/* Default fifo sizes */
598 	tx_size = dev->tx_fifo_size;
599 	rx_size = dev->rx_fifo_size;
600 
601 	/* No link, force loopback */
602 	if (!link)
603 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
604 
605 	/* Check for full duplex */
606 	else if (dev->phy.duplex == DUPLEX_FULL)
607 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
608 
609 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
610 	dev->stop_timeout = STOP_TIMEOUT_10;
611 	switch (dev->phy.speed) {
612 	case SPEED_1000:
613 		if (emac_phy_gpcs(dev->phy.mode)) {
614 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
615 				(dev->phy.gpcs_address != 0xffffffff) ?
616 				 dev->phy.gpcs_address : dev->phy.address);
617 
618 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
619 			 * identify this GPCS PHY later.
620 			 */
621 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
622 		} else
623 			mr1 |= EMAC_MR1_MF_1000;
624 
625 		/* Extended fifo sizes */
626 		tx_size = dev->tx_fifo_size_gige;
627 		rx_size = dev->rx_fifo_size_gige;
628 
629 		if (dev->ndev->mtu > ETH_DATA_LEN) {
630 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
631 				mr1 |= EMAC4_MR1_JPSM;
632 			else
633 				mr1 |= EMAC_MR1_JPSM;
634 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
635 		} else
636 			dev->stop_timeout = STOP_TIMEOUT_1000;
637 		break;
638 	case SPEED_100:
639 		mr1 |= EMAC_MR1_MF_100;
640 		dev->stop_timeout = STOP_TIMEOUT_100;
641 		break;
642 	default: /* make gcc happy */
643 		break;
644 	}
645 
646 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
647 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
648 				dev->phy.speed);
649 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
650 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
651 
652 	/* on 40x erratum forces us to NOT use integrated flow control,
653 	 * let's hope it works on 44x ;)
654 	 */
655 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
656 	    dev->phy.duplex == DUPLEX_FULL) {
657 		if (dev->phy.pause)
658 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
659 		else if (dev->phy.asym_pause)
660 			mr1 |= EMAC_MR1_APP;
661 	}
662 
663 	/* Add base settings & fifo sizes & program MR1 */
664 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
665 	out_be32(&p->mr1, mr1);
666 
667 	/* Set individual MAC address */
668 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
669 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
670 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
671 		 ndev->dev_addr[5]);
672 
673 	/* VLAN Tag Protocol ID */
674 	out_be32(&p->vtpid, 0x8100);
675 
676 	/* Receive mode register */
677 	r = emac_iff2rmr(ndev);
678 	if (r & EMAC_RMR_MAE)
679 		emac_hash_mc(dev);
680 	out_be32(&p->rmr, r);
681 
682 	/* FIFOs thresholds */
683 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
684 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
685 			       tx_size / 2 / dev->fifo_entry_size);
686 	else
687 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
688 			      tx_size / 2 / dev->fifo_entry_size);
689 	out_be32(&p->tmr1, r);
690 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
691 
692 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
693 	   there should be still enough space in FIFO to allow the our link
694 	   partner time to process this frame and also time to send PAUSE
695 	   frame itself.
696 
697 	   Here is the worst case scenario for the RX FIFO "headroom"
698 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
699 
700 	   1) One maximum-length frame on TX                    1522 bytes
701 	   2) One PAUSE frame time                                64 bytes
702 	   3) PAUSE frame decode time allowance                   64 bytes
703 	   4) One maximum-length frame on RX                    1522 bytes
704 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
705 	   ----------
706 	   3187 bytes
707 
708 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
709 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
710 	 */
711 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
712 			   rx_size / 4 / dev->fifo_entry_size);
713 	out_be32(&p->rwmr, r);
714 
715 	/* Set PAUSE timer to the maximum */
716 	out_be32(&p->ptr, 0xffff);
717 
718 	/* IRQ sources */
719 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
720 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
721 		EMAC_ISR_IRE | EMAC_ISR_TE;
722 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
723 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
724 						  EMAC4_ISR_RXOE | */;
725 	out_be32(&p->iser,  r);
726 
727 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
728 	if (emac_phy_gpcs(dev->phy.mode)) {
729 		if (dev->phy.gpcs_address != 0xffffffff)
730 			emac_mii_reset_gpcs(&dev->phy);
731 		else
732 			emac_mii_reset_phy(&dev->phy);
733 	}
734 
735 	return 0;
736 }
737 
738 static void emac_reinitialize(struct emac_instance *dev)
739 {
740 	DBG(dev, "reinitialize" NL);
741 
742 	emac_netif_stop(dev);
743 	if (!emac_configure(dev)) {
744 		emac_tx_enable(dev);
745 		emac_rx_enable(dev);
746 	}
747 	emac_netif_start(dev);
748 }
749 
750 static void emac_full_tx_reset(struct emac_instance *dev)
751 {
752 	DBG(dev, "full_tx_reset" NL);
753 
754 	emac_tx_disable(dev);
755 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
756 	emac_clean_tx_ring(dev);
757 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
758 
759 	emac_configure(dev);
760 
761 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
762 	emac_tx_enable(dev);
763 	emac_rx_enable(dev);
764 }
765 
766 static void emac_reset_work(struct work_struct *work)
767 {
768 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
769 
770 	DBG(dev, "reset_work" NL);
771 
772 	mutex_lock(&dev->link_lock);
773 	if (dev->opened) {
774 		emac_netif_stop(dev);
775 		emac_full_tx_reset(dev);
776 		emac_netif_start(dev);
777 	}
778 	mutex_unlock(&dev->link_lock);
779 }
780 
781 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
782 {
783 	struct emac_instance *dev = netdev_priv(ndev);
784 
785 	DBG(dev, "tx_timeout" NL);
786 
787 	schedule_work(&dev->reset_work);
788 }
789 
790 
791 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
792 {
793 	int done = !!(stacr & EMAC_STACR_OC);
794 
795 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
796 		done = !done;
797 
798 	return done;
799 };
800 
801 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
802 {
803 	struct emac_regs __iomem *p = dev->emacp;
804 	u32 r = 0;
805 	int n, err = -ETIMEDOUT;
806 
807 	mutex_lock(&dev->mdio_lock);
808 
809 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
810 
811 	/* Enable proper MDIO port */
812 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
813 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
814 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
815 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
816 
817 	/* Wait for management interface to become idle */
818 	n = 20;
819 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
820 		udelay(1);
821 		if (!--n) {
822 			DBG2(dev, " -> timeout wait idle\n");
823 			goto bail;
824 		}
825 	}
826 
827 	/* Issue read command */
828 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
829 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
830 	else
831 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
832 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
833 		r |= EMAC_STACR_OC;
834 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
835 		r |= EMACX_STACR_STAC_READ;
836 	else
837 		r |= EMAC_STACR_STAC_READ;
838 	r |= (reg & EMAC_STACR_PRA_MASK)
839 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
840 	out_be32(&p->stacr, r);
841 
842 	/* Wait for read to complete */
843 	n = 200;
844 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
845 		udelay(1);
846 		if (!--n) {
847 			DBG2(dev, " -> timeout wait complete\n");
848 			goto bail;
849 		}
850 	}
851 
852 	if (unlikely(r & EMAC_STACR_PHYE)) {
853 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
854 		err = -EREMOTEIO;
855 		goto bail;
856 	}
857 
858 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
859 
860 	DBG2(dev, "mdio_read -> %04x" NL, r);
861 	err = 0;
862  bail:
863 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
864 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
865 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
866 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
867 	mutex_unlock(&dev->mdio_lock);
868 
869 	return err == 0 ? r : err;
870 }
871 
872 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
873 			      u16 val)
874 {
875 	struct emac_regs __iomem *p = dev->emacp;
876 	u32 r = 0;
877 	int n;
878 
879 	mutex_lock(&dev->mdio_lock);
880 
881 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
882 
883 	/* Enable proper MDIO port */
884 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
885 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
886 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
887 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
888 
889 	/* Wait for management interface to be idle */
890 	n = 20;
891 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
892 		udelay(1);
893 		if (!--n) {
894 			DBG2(dev, " -> timeout wait idle\n");
895 			goto bail;
896 		}
897 	}
898 
899 	/* Issue write command */
900 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
901 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
902 	else
903 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
904 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
905 		r |= EMAC_STACR_OC;
906 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
907 		r |= EMACX_STACR_STAC_WRITE;
908 	else
909 		r |= EMAC_STACR_STAC_WRITE;
910 	r |= (reg & EMAC_STACR_PRA_MASK) |
911 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
912 		(val << EMAC_STACR_PHYD_SHIFT);
913 	out_be32(&p->stacr, r);
914 
915 	/* Wait for write to complete */
916 	n = 200;
917 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
918 		udelay(1);
919 		if (!--n) {
920 			DBG2(dev, " -> timeout wait complete\n");
921 			goto bail;
922 		}
923 	}
924  bail:
925 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
926 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
927 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
928 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
929 	mutex_unlock(&dev->mdio_lock);
930 }
931 
932 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
933 {
934 	struct emac_instance *dev = netdev_priv(ndev);
935 	int res;
936 
937 	res = __emac_mdio_read((dev->mdio_instance &&
938 				dev->phy.gpcs_address != id) ?
939 				dev->mdio_instance : dev,
940 			       (u8) id, (u8) reg);
941 	return res;
942 }
943 
944 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
945 {
946 	struct emac_instance *dev = netdev_priv(ndev);
947 
948 	__emac_mdio_write((dev->mdio_instance &&
949 			   dev->phy.gpcs_address != id) ?
950 			   dev->mdio_instance : dev,
951 			  (u8) id, (u8) reg, (u16) val);
952 }
953 
954 /* Tx lock BH */
955 static void __emac_set_multicast_list(struct emac_instance *dev)
956 {
957 	struct emac_regs __iomem *p = dev->emacp;
958 	u32 rmr = emac_iff2rmr(dev->ndev);
959 
960 	DBG(dev, "__multicast %08x" NL, rmr);
961 
962 	/* I decided to relax register access rules here to avoid
963 	 * full EMAC reset.
964 	 *
965 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
966 	 * in MR1 register and do a full EMAC reset.
967 	 * One TX BD status update is delayed and, after EMAC reset, it
968 	 * never happens, resulting in TX hung (it'll be recovered by TX
969 	 * timeout handler eventually, but this is just gross).
970 	 * So we either have to do full TX reset or try to cheat here :)
971 	 *
972 	 * The only required change is to RX mode register, so I *think* all
973 	 * we need is just to stop RX channel. This seems to work on all
974 	 * tested SoCs.                                                --ebs
975 	 *
976 	 * If we need the full reset, we might just trigger the workqueue
977 	 * and do it async... a bit nasty but should work --BenH
978 	 */
979 	dev->mcast_pending = 0;
980 	emac_rx_disable(dev);
981 	if (rmr & EMAC_RMR_MAE)
982 		emac_hash_mc(dev);
983 	out_be32(&p->rmr, rmr);
984 	emac_rx_enable(dev);
985 }
986 
987 /* Tx lock BH */
988 static void emac_set_multicast_list(struct net_device *ndev)
989 {
990 	struct emac_instance *dev = netdev_priv(ndev);
991 
992 	DBG(dev, "multicast" NL);
993 
994 	BUG_ON(!netif_running(dev->ndev));
995 
996 	if (dev->no_mcast) {
997 		dev->mcast_pending = 1;
998 		return;
999 	}
1000 
1001 	mutex_lock(&dev->link_lock);
1002 	__emac_set_multicast_list(dev);
1003 	mutex_unlock(&dev->link_lock);
1004 }
1005 
1006 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1007 {
1008 	struct emac_instance *dev = netdev_priv(ndev);
1009 	struct sockaddr *addr = sa;
1010 	struct emac_regs __iomem *p = dev->emacp;
1011 
1012 	if (!is_valid_ether_addr(addr->sa_data))
1013 	       return -EADDRNOTAVAIL;
1014 
1015 	mutex_lock(&dev->link_lock);
1016 
1017 	eth_hw_addr_set(ndev, addr->sa_data);
1018 
1019 	emac_rx_disable(dev);
1020 	emac_tx_disable(dev);
1021 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1022 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1023 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1024 		ndev->dev_addr[5]);
1025 	emac_tx_enable(dev);
1026 	emac_rx_enable(dev);
1027 
1028 	mutex_unlock(&dev->link_lock);
1029 
1030 	return 0;
1031 }
1032 
1033 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1034 {
1035 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1036 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1037 	int i, ret = 0;
1038 	int mr1_jumbo_bit_change = 0;
1039 
1040 	mutex_lock(&dev->link_lock);
1041 	emac_netif_stop(dev);
1042 	emac_rx_disable(dev);
1043 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1044 
1045 	if (dev->rx_sg_skb) {
1046 		++dev->estats.rx_dropped_resize;
1047 		dev_kfree_skb(dev->rx_sg_skb);
1048 		dev->rx_sg_skb = NULL;
1049 	}
1050 
1051 	/* Make a first pass over RX ring and mark BDs ready, dropping
1052 	 * non-processed packets on the way. We need this as a separate pass
1053 	 * to simplify error recovery in the case of allocation failure later.
1054 	 */
1055 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1056 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1057 			++dev->estats.rx_dropped_resize;
1058 
1059 		dev->rx_desc[i].data_len = 0;
1060 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1061 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1062 	}
1063 
1064 	/* Reallocate RX ring only if bigger skb buffers are required */
1065 	if (rx_skb_size <= dev->rx_skb_size)
1066 		goto skip;
1067 
1068 	/* Second pass, allocate new skbs */
1069 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1070 		struct sk_buff *skb;
1071 
1072 		skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
1073 		if (!skb) {
1074 			ret = -ENOMEM;
1075 			goto oom;
1076 		}
1077 
1078 		BUG_ON(!dev->rx_skb[i]);
1079 		dev_kfree_skb(dev->rx_skb[i]);
1080 
1081 		dev->rx_desc[i].data_ptr =
1082 		    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1083 				   rx_sync_size, DMA_FROM_DEVICE)
1084 				   + NET_IP_ALIGN;
1085 		dev->rx_skb[i] = skb;
1086 	}
1087  skip:
1088 	/* Check if we need to change "Jumbo" bit in MR1 */
1089 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1090 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1091 				(dev->ndev->mtu > ETH_DATA_LEN);
1092 	} else {
1093 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1094 				(dev->ndev->mtu > ETH_DATA_LEN);
1095 	}
1096 
1097 	if (mr1_jumbo_bit_change) {
1098 		/* This is to prevent starting RX channel in emac_rx_enable() */
1099 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1100 
1101 		WRITE_ONCE(dev->ndev->mtu, new_mtu);
1102 		emac_full_tx_reset(dev);
1103 	}
1104 
1105 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1106  oom:
1107 	/* Restart RX */
1108 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1109 	dev->rx_slot = 0;
1110 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1111 	emac_rx_enable(dev);
1112 	emac_netif_start(dev);
1113 	mutex_unlock(&dev->link_lock);
1114 
1115 	return ret;
1116 }
1117 
1118 /* Process ctx, rtnl_lock semaphore */
1119 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1120 {
1121 	struct emac_instance *dev = netdev_priv(ndev);
1122 	int ret = 0;
1123 
1124 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1125 
1126 	if (netif_running(ndev)) {
1127 		/* Check if we really need to reinitialize RX ring */
1128 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1129 			ret = emac_resize_rx_ring(dev, new_mtu);
1130 	}
1131 
1132 	if (!ret) {
1133 		WRITE_ONCE(ndev->mtu, new_mtu);
1134 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1135 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1136 	}
1137 
1138 	return ret;
1139 }
1140 
1141 static void emac_clean_tx_ring(struct emac_instance *dev)
1142 {
1143 	int i;
1144 
1145 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1146 		if (dev->tx_skb[i]) {
1147 			dev_kfree_skb(dev->tx_skb[i]);
1148 			dev->tx_skb[i] = NULL;
1149 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1150 				++dev->estats.tx_dropped;
1151 		}
1152 		dev->tx_desc[i].ctrl = 0;
1153 		dev->tx_desc[i].data_ptr = 0;
1154 	}
1155 }
1156 
1157 static void emac_clean_rx_ring(struct emac_instance *dev)
1158 {
1159 	int i;
1160 
1161 	for (i = 0; i < NUM_RX_BUFF; ++i)
1162 		if (dev->rx_skb[i]) {
1163 			dev->rx_desc[i].ctrl = 0;
1164 			dev_kfree_skb(dev->rx_skb[i]);
1165 			dev->rx_skb[i] = NULL;
1166 			dev->rx_desc[i].data_ptr = 0;
1167 		}
1168 
1169 	if (dev->rx_sg_skb) {
1170 		dev_kfree_skb(dev->rx_sg_skb);
1171 		dev->rx_sg_skb = NULL;
1172 	}
1173 }
1174 
1175 static int
1176 __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
1177 {
1178 	if (unlikely(!skb))
1179 		return -ENOMEM;
1180 
1181 	dev->rx_skb[slot] = skb;
1182 	dev->rx_desc[slot].data_len = 0;
1183 
1184 	dev->rx_desc[slot].data_ptr =
1185 	    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1186 			   dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
1187 	wmb();
1188 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1189 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1190 
1191 	return 0;
1192 }
1193 
1194 static int
1195 emac_alloc_rx_skb(struct emac_instance *dev, int slot)
1196 {
1197 	struct sk_buff *skb;
1198 
1199 	skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
1200 					  GFP_KERNEL);
1201 
1202 	return __emac_prepare_rx_skb(skb, dev, slot);
1203 }
1204 
1205 static int
1206 emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
1207 {
1208 	struct sk_buff *skb;
1209 
1210 	skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
1211 
1212 	return __emac_prepare_rx_skb(skb, dev, slot);
1213 }
1214 
1215 static void emac_print_link_status(struct emac_instance *dev)
1216 {
1217 	if (netif_carrier_ok(dev->ndev))
1218 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1219 		       dev->ndev->name, dev->phy.speed,
1220 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1221 		       dev->phy.pause ? ", pause enabled" :
1222 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1223 	else
1224 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1225 }
1226 
1227 /* Process ctx, rtnl_lock semaphore */
1228 static int emac_open(struct net_device *ndev)
1229 {
1230 	struct emac_instance *dev = netdev_priv(ndev);
1231 	int err, i;
1232 
1233 	DBG(dev, "open" NL);
1234 
1235 	/* Setup error IRQ handler */
1236 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1237 	if (err) {
1238 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1239 		       ndev->name, dev->emac_irq);
1240 		return err;
1241 	}
1242 
1243 	/* Allocate RX ring */
1244 	for (i = 0; i < NUM_RX_BUFF; ++i)
1245 		if (emac_alloc_rx_skb(dev, i)) {
1246 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1247 			       ndev->name);
1248 			goto oom;
1249 		}
1250 
1251 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1252 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1253 	dev->rx_sg_skb = NULL;
1254 
1255 	mutex_lock(&dev->link_lock);
1256 	dev->opened = 1;
1257 
1258 	/* Start PHY polling now.
1259 	 */
1260 	if (dev->phy.address >= 0) {
1261 		int link_poll_interval;
1262 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1263 			dev->phy.def->ops->read_link(&dev->phy);
1264 			emac_rx_clk_default(dev);
1265 			netif_carrier_on(dev->ndev);
1266 			link_poll_interval = PHY_POLL_LINK_ON;
1267 		} else {
1268 			emac_rx_clk_tx(dev);
1269 			netif_carrier_off(dev->ndev);
1270 			link_poll_interval = PHY_POLL_LINK_OFF;
1271 		}
1272 		dev->link_polling = 1;
1273 		wmb();
1274 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1275 		emac_print_link_status(dev);
1276 	} else
1277 		netif_carrier_on(dev->ndev);
1278 
1279 	/* Required for Pause packet support in EMAC */
1280 	dev_mc_add_global(ndev, default_mcast_addr);
1281 
1282 	emac_configure(dev);
1283 	mal_poll_add(dev->mal, &dev->commac);
1284 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1285 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1286 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1287 	emac_tx_enable(dev);
1288 	emac_rx_enable(dev);
1289 	emac_netif_start(dev);
1290 
1291 	mutex_unlock(&dev->link_lock);
1292 
1293 	return 0;
1294  oom:
1295 	emac_clean_rx_ring(dev);
1296 	free_irq(dev->emac_irq, dev);
1297 
1298 	return -ENOMEM;
1299 }
1300 
1301 /* BHs disabled */
1302 #if 0
1303 static int emac_link_differs(struct emac_instance *dev)
1304 {
1305 	u32 r = in_be32(&dev->emacp->mr1);
1306 
1307 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1308 	int speed, pause, asym_pause;
1309 
1310 	if (r & EMAC_MR1_MF_1000)
1311 		speed = SPEED_1000;
1312 	else if (r & EMAC_MR1_MF_100)
1313 		speed = SPEED_100;
1314 	else
1315 		speed = SPEED_10;
1316 
1317 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1318 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1319 		pause = 1;
1320 		asym_pause = 0;
1321 		break;
1322 	case EMAC_MR1_APP:
1323 		pause = 0;
1324 		asym_pause = 1;
1325 		break;
1326 	default:
1327 		pause = asym_pause = 0;
1328 	}
1329 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1330 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1331 }
1332 #endif
1333 
1334 static void emac_link_timer(struct work_struct *work)
1335 {
1336 	struct emac_instance *dev =
1337 		container_of(to_delayed_work(work),
1338 			     struct emac_instance, link_work);
1339 	int link_poll_interval;
1340 
1341 	mutex_lock(&dev->link_lock);
1342 	DBG2(dev, "link timer" NL);
1343 
1344 	if (!dev->opened)
1345 		goto bail;
1346 
1347 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1348 		if (!netif_carrier_ok(dev->ndev)) {
1349 			emac_rx_clk_default(dev);
1350 			/* Get new link parameters */
1351 			dev->phy.def->ops->read_link(&dev->phy);
1352 
1353 			netif_carrier_on(dev->ndev);
1354 			emac_netif_stop(dev);
1355 			emac_full_tx_reset(dev);
1356 			emac_netif_start(dev);
1357 			emac_print_link_status(dev);
1358 		}
1359 		link_poll_interval = PHY_POLL_LINK_ON;
1360 	} else {
1361 		if (netif_carrier_ok(dev->ndev)) {
1362 			emac_rx_clk_tx(dev);
1363 			netif_carrier_off(dev->ndev);
1364 			netif_tx_disable(dev->ndev);
1365 			emac_reinitialize(dev);
1366 			emac_print_link_status(dev);
1367 		}
1368 		link_poll_interval = PHY_POLL_LINK_OFF;
1369 	}
1370 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1371  bail:
1372 	mutex_unlock(&dev->link_lock);
1373 }
1374 
1375 static void emac_force_link_update(struct emac_instance *dev)
1376 {
1377 	netif_carrier_off(dev->ndev);
1378 	smp_rmb();
1379 	if (dev->link_polling) {
1380 		cancel_delayed_work_sync(&dev->link_work);
1381 		if (dev->link_polling)
1382 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1383 	}
1384 }
1385 
1386 /* Process ctx, rtnl_lock semaphore */
1387 static int emac_close(struct net_device *ndev)
1388 {
1389 	struct emac_instance *dev = netdev_priv(ndev);
1390 
1391 	DBG(dev, "close" NL);
1392 
1393 	if (dev->phy.address >= 0) {
1394 		dev->link_polling = 0;
1395 		cancel_delayed_work_sync(&dev->link_work);
1396 	}
1397 	mutex_lock(&dev->link_lock);
1398 	emac_netif_stop(dev);
1399 	dev->opened = 0;
1400 	mutex_unlock(&dev->link_lock);
1401 
1402 	emac_rx_disable(dev);
1403 	emac_tx_disable(dev);
1404 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1405 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1406 	mal_poll_del(dev->mal, &dev->commac);
1407 
1408 	emac_clean_tx_ring(dev);
1409 	emac_clean_rx_ring(dev);
1410 
1411 	free_irq(dev->emac_irq, dev);
1412 
1413 	netif_carrier_off(ndev);
1414 
1415 	return 0;
1416 }
1417 
1418 static inline u16 emac_tx_csum(struct emac_instance *dev,
1419 			       struct sk_buff *skb)
1420 {
1421 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1422 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1423 		++dev->stats.tx_packets_csum;
1424 		return EMAC_TX_CTRL_TAH_CSUM;
1425 	}
1426 	return 0;
1427 }
1428 
1429 static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1430 {
1431 	struct emac_regs __iomem *p = dev->emacp;
1432 	struct net_device *ndev = dev->ndev;
1433 
1434 	/* Send the packet out. If the if makes a significant perf
1435 	 * difference, then we can store the TMR0 value in "dev"
1436 	 * instead
1437 	 */
1438 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1439 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1440 	else
1441 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1442 
1443 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1444 		netif_stop_queue(ndev);
1445 		DBG2(dev, "stopped TX queue" NL);
1446 	}
1447 
1448 	netif_trans_update(ndev);
1449 	++dev->stats.tx_packets;
1450 	dev->stats.tx_bytes += len;
1451 
1452 	return NETDEV_TX_OK;
1453 }
1454 
1455 /* Tx lock BH */
1456 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1457 {
1458 	struct emac_instance *dev = netdev_priv(ndev);
1459 	unsigned int len = skb->len;
1460 	int slot;
1461 
1462 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1463 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1464 
1465 	slot = dev->tx_slot++;
1466 	if (dev->tx_slot == NUM_TX_BUFF) {
1467 		dev->tx_slot = 0;
1468 		ctrl |= MAL_TX_CTRL_WRAP;
1469 	}
1470 
1471 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1472 
1473 	dev->tx_skb[slot] = skb;
1474 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1475 						     skb->data, len,
1476 						     DMA_TO_DEVICE);
1477 	dev->tx_desc[slot].data_len = (u16) len;
1478 	wmb();
1479 	dev->tx_desc[slot].ctrl = ctrl;
1480 
1481 	return emac_xmit_finish(dev, len);
1482 }
1483 
1484 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1485 				  u32 pd, int len, int last, u16 base_ctrl)
1486 {
1487 	while (1) {
1488 		u16 ctrl = base_ctrl;
1489 		int chunk = min(len, MAL_MAX_TX_SIZE);
1490 		len -= chunk;
1491 
1492 		slot = (slot + 1) % NUM_TX_BUFF;
1493 
1494 		if (last && !len)
1495 			ctrl |= MAL_TX_CTRL_LAST;
1496 		if (slot == NUM_TX_BUFF - 1)
1497 			ctrl |= MAL_TX_CTRL_WRAP;
1498 
1499 		dev->tx_skb[slot] = NULL;
1500 		dev->tx_desc[slot].data_ptr = pd;
1501 		dev->tx_desc[slot].data_len = (u16) chunk;
1502 		dev->tx_desc[slot].ctrl = ctrl;
1503 		++dev->tx_cnt;
1504 
1505 		if (!len)
1506 			break;
1507 
1508 		pd += chunk;
1509 	}
1510 	return slot;
1511 }
1512 
1513 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1514 static netdev_tx_t
1515 emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1516 {
1517 	struct emac_instance *dev = netdev_priv(ndev);
1518 	int nr_frags = skb_shinfo(skb)->nr_frags;
1519 	int len = skb->len, chunk;
1520 	int slot, i;
1521 	u16 ctrl;
1522 	u32 pd;
1523 
1524 	/* This is common "fast" path */
1525 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1526 		return emac_start_xmit(skb, ndev);
1527 
1528 	len -= skb->data_len;
1529 
1530 	/* Note, this is only an *estimation*, we can still run out of empty
1531 	 * slots because of the additional fragmentation into
1532 	 * MAL_MAX_TX_SIZE-sized chunks
1533 	 */
1534 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1535 		goto stop_queue;
1536 
1537 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1538 	    emac_tx_csum(dev, skb);
1539 	slot = dev->tx_slot;
1540 
1541 	/* skb data */
1542 	dev->tx_skb[slot] = NULL;
1543 	chunk = min(len, MAL_MAX_TX_SIZE);
1544 	dev->tx_desc[slot].data_ptr = pd =
1545 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1546 	dev->tx_desc[slot].data_len = (u16) chunk;
1547 	len -= chunk;
1548 	if (unlikely(len))
1549 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1550 				       ctrl);
1551 	/* skb fragments */
1552 	for (i = 0; i < nr_frags; ++i) {
1553 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1554 		len = skb_frag_size(frag);
1555 
1556 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1557 			goto undo_frame;
1558 
1559 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1560 				      DMA_TO_DEVICE);
1561 
1562 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1563 				       ctrl);
1564 	}
1565 
1566 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1567 
1568 	/* Attach skb to the last slot so we don't release it too early */
1569 	dev->tx_skb[slot] = skb;
1570 
1571 	/* Send the packet out */
1572 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1573 		ctrl |= MAL_TX_CTRL_WRAP;
1574 	wmb();
1575 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1576 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1577 
1578 	return emac_xmit_finish(dev, skb->len);
1579 
1580  undo_frame:
1581 	/* Well, too bad. Our previous estimation was overly optimistic.
1582 	 * Undo everything.
1583 	 */
1584 	while (slot != dev->tx_slot) {
1585 		dev->tx_desc[slot].ctrl = 0;
1586 		--dev->tx_cnt;
1587 		if (--slot < 0)
1588 			slot = NUM_TX_BUFF - 1;
1589 	}
1590 	++dev->estats.tx_undo;
1591 
1592  stop_queue:
1593 	netif_stop_queue(ndev);
1594 	DBG2(dev, "stopped TX queue" NL);
1595 	return NETDEV_TX_BUSY;
1596 }
1597 
1598 /* Tx lock BHs */
1599 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1600 {
1601 	struct emac_error_stats *st = &dev->estats;
1602 
1603 	DBG(dev, "BD TX error %04x" NL, ctrl);
1604 
1605 	++st->tx_bd_errors;
1606 	if (ctrl & EMAC_TX_ST_BFCS)
1607 		++st->tx_bd_bad_fcs;
1608 	if (ctrl & EMAC_TX_ST_LCS)
1609 		++st->tx_bd_carrier_loss;
1610 	if (ctrl & EMAC_TX_ST_ED)
1611 		++st->tx_bd_excessive_deferral;
1612 	if (ctrl & EMAC_TX_ST_EC)
1613 		++st->tx_bd_excessive_collisions;
1614 	if (ctrl & EMAC_TX_ST_LC)
1615 		++st->tx_bd_late_collision;
1616 	if (ctrl & EMAC_TX_ST_MC)
1617 		++st->tx_bd_multple_collisions;
1618 	if (ctrl & EMAC_TX_ST_SC)
1619 		++st->tx_bd_single_collision;
1620 	if (ctrl & EMAC_TX_ST_UR)
1621 		++st->tx_bd_underrun;
1622 	if (ctrl & EMAC_TX_ST_SQE)
1623 		++st->tx_bd_sqe;
1624 }
1625 
1626 static void emac_poll_tx(void *param)
1627 {
1628 	struct emac_instance *dev = param;
1629 	u32 bad_mask;
1630 
1631 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1632 
1633 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1634 		bad_mask = EMAC_IS_BAD_TX_TAH;
1635 	else
1636 		bad_mask = EMAC_IS_BAD_TX;
1637 
1638 	netif_tx_lock_bh(dev->ndev);
1639 	if (dev->tx_cnt) {
1640 		u16 ctrl;
1641 		int slot = dev->ack_slot, n = 0;
1642 	again:
1643 		ctrl = dev->tx_desc[slot].ctrl;
1644 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1645 			struct sk_buff *skb = dev->tx_skb[slot];
1646 			++n;
1647 
1648 			if (skb) {
1649 				dev_kfree_skb(skb);
1650 				dev->tx_skb[slot] = NULL;
1651 			}
1652 			slot = (slot + 1) % NUM_TX_BUFF;
1653 
1654 			if (unlikely(ctrl & bad_mask))
1655 				emac_parse_tx_error(dev, ctrl);
1656 
1657 			if (--dev->tx_cnt)
1658 				goto again;
1659 		}
1660 		if (n) {
1661 			dev->ack_slot = slot;
1662 			if (netif_queue_stopped(dev->ndev) &&
1663 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1664 				netif_wake_queue(dev->ndev);
1665 
1666 			DBG2(dev, "tx %d pkts" NL, n);
1667 		}
1668 	}
1669 	netif_tx_unlock_bh(dev->ndev);
1670 }
1671 
1672 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1673 				       int len)
1674 {
1675 	struct sk_buff *skb = dev->rx_skb[slot];
1676 
1677 	DBG2(dev, "recycle %d %d" NL, slot, len);
1678 
1679 	if (len)
1680 		dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1681 			       SKB_DATA_ALIGN(len + NET_IP_ALIGN),
1682 			       DMA_FROM_DEVICE);
1683 
1684 	dev->rx_desc[slot].data_len = 0;
1685 	wmb();
1686 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1687 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1688 }
1689 
1690 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1691 {
1692 	struct emac_error_stats *st = &dev->estats;
1693 
1694 	DBG(dev, "BD RX error %04x" NL, ctrl);
1695 
1696 	++st->rx_bd_errors;
1697 	if (ctrl & EMAC_RX_ST_OE)
1698 		++st->rx_bd_overrun;
1699 	if (ctrl & EMAC_RX_ST_BP)
1700 		++st->rx_bd_bad_packet;
1701 	if (ctrl & EMAC_RX_ST_RP)
1702 		++st->rx_bd_runt_packet;
1703 	if (ctrl & EMAC_RX_ST_SE)
1704 		++st->rx_bd_short_event;
1705 	if (ctrl & EMAC_RX_ST_AE)
1706 		++st->rx_bd_alignment_error;
1707 	if (ctrl & EMAC_RX_ST_BFCS)
1708 		++st->rx_bd_bad_fcs;
1709 	if (ctrl & EMAC_RX_ST_PTL)
1710 		++st->rx_bd_packet_too_long;
1711 	if (ctrl & EMAC_RX_ST_ORE)
1712 		++st->rx_bd_out_of_range;
1713 	if (ctrl & EMAC_RX_ST_IRE)
1714 		++st->rx_bd_in_range;
1715 }
1716 
1717 static inline void emac_rx_csum(struct emac_instance *dev,
1718 				struct sk_buff *skb, u16 ctrl)
1719 {
1720 #ifdef CONFIG_IBM_EMAC_TAH
1721 	if (!ctrl && dev->tah_dev) {
1722 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1723 		++dev->stats.rx_packets_csum;
1724 	}
1725 #endif
1726 }
1727 
1728 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1729 {
1730 	if (likely(dev->rx_sg_skb != NULL)) {
1731 		int len = dev->rx_desc[slot].data_len;
1732 		int tot_len = dev->rx_sg_skb->len + len;
1733 
1734 		if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
1735 			++dev->estats.rx_dropped_mtu;
1736 			dev_kfree_skb(dev->rx_sg_skb);
1737 			dev->rx_sg_skb = NULL;
1738 		} else {
1739 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1740 					 dev->rx_skb[slot]->data, len);
1741 			skb_put(dev->rx_sg_skb, len);
1742 			emac_recycle_rx_skb(dev, slot, len);
1743 			return 0;
1744 		}
1745 	}
1746 	emac_recycle_rx_skb(dev, slot, 0);
1747 	return -1;
1748 }
1749 
1750 /* NAPI poll context */
1751 static int emac_poll_rx(void *param, int budget)
1752 {
1753 	struct emac_instance *dev = param;
1754 	int slot = dev->rx_slot, received = 0;
1755 
1756 	DBG2(dev, "poll_rx(%d)" NL, budget);
1757 
1758  again:
1759 	while (budget > 0) {
1760 		int len;
1761 		struct sk_buff *skb;
1762 		u16 ctrl = dev->rx_desc[slot].ctrl;
1763 
1764 		if (ctrl & MAL_RX_CTRL_EMPTY)
1765 			break;
1766 
1767 		skb = dev->rx_skb[slot];
1768 		mb();
1769 		len = dev->rx_desc[slot].data_len;
1770 
1771 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1772 			goto sg;
1773 
1774 		ctrl &= EMAC_BAD_RX_MASK;
1775 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1776 			emac_parse_rx_error(dev, ctrl);
1777 			++dev->estats.rx_dropped_error;
1778 			emac_recycle_rx_skb(dev, slot, 0);
1779 			len = 0;
1780 			goto next;
1781 		}
1782 
1783 		if (len < ETH_HLEN) {
1784 			++dev->estats.rx_dropped_stack;
1785 			emac_recycle_rx_skb(dev, slot, len);
1786 			goto next;
1787 		}
1788 
1789 		if (len && len < EMAC_RX_COPY_THRESH) {
1790 			struct sk_buff *copy_skb;
1791 
1792 			copy_skb = napi_alloc_skb(&dev->mal->napi, len);
1793 			if (unlikely(!copy_skb))
1794 				goto oom;
1795 
1796 			memcpy(copy_skb->data - NET_IP_ALIGN,
1797 			       skb->data - NET_IP_ALIGN,
1798 			       len + NET_IP_ALIGN);
1799 			emac_recycle_rx_skb(dev, slot, len);
1800 			skb = copy_skb;
1801 		} else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
1802 			goto oom;
1803 
1804 		skb_put(skb, len);
1805 	push_packet:
1806 		skb->protocol = eth_type_trans(skb, dev->ndev);
1807 		emac_rx_csum(dev, skb, ctrl);
1808 
1809 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1810 			++dev->estats.rx_dropped_stack;
1811 	next:
1812 		++dev->stats.rx_packets;
1813 	skip:
1814 		dev->stats.rx_bytes += len;
1815 		slot = (slot + 1) % NUM_RX_BUFF;
1816 		--budget;
1817 		++received;
1818 		continue;
1819 	sg:
1820 		if (ctrl & MAL_RX_CTRL_FIRST) {
1821 			BUG_ON(dev->rx_sg_skb);
1822 			if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
1823 				DBG(dev, "rx OOM %d" NL, slot);
1824 				++dev->estats.rx_dropped_oom;
1825 				emac_recycle_rx_skb(dev, slot, 0);
1826 			} else {
1827 				dev->rx_sg_skb = skb;
1828 				skb_put(skb, len);
1829 			}
1830 		} else if (!emac_rx_sg_append(dev, slot) &&
1831 			   (ctrl & MAL_RX_CTRL_LAST)) {
1832 
1833 			skb = dev->rx_sg_skb;
1834 			dev->rx_sg_skb = NULL;
1835 
1836 			ctrl &= EMAC_BAD_RX_MASK;
1837 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1838 				emac_parse_rx_error(dev, ctrl);
1839 				++dev->estats.rx_dropped_error;
1840 				dev_kfree_skb(skb);
1841 				len = 0;
1842 			} else
1843 				goto push_packet;
1844 		}
1845 		goto skip;
1846 	oom:
1847 		DBG(dev, "rx OOM %d" NL, slot);
1848 		/* Drop the packet and recycle skb */
1849 		++dev->estats.rx_dropped_oom;
1850 		emac_recycle_rx_skb(dev, slot, 0);
1851 		goto next;
1852 	}
1853 
1854 	if (received) {
1855 		DBG2(dev, "rx %d BDs" NL, received);
1856 		dev->rx_slot = slot;
1857 	}
1858 
1859 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1860 		mb();
1861 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1862 			DBG2(dev, "rx restart" NL);
1863 			received = 0;
1864 			goto again;
1865 		}
1866 
1867 		if (dev->rx_sg_skb) {
1868 			DBG2(dev, "dropping partial rx packet" NL);
1869 			++dev->estats.rx_dropped_error;
1870 			dev_kfree_skb(dev->rx_sg_skb);
1871 			dev->rx_sg_skb = NULL;
1872 		}
1873 
1874 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1875 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1876 		emac_rx_enable(dev);
1877 		dev->rx_slot = 0;
1878 	}
1879 	return received;
1880 }
1881 
1882 /* NAPI poll context */
1883 static int emac_peek_rx(void *param)
1884 {
1885 	struct emac_instance *dev = param;
1886 
1887 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1888 }
1889 
1890 /* NAPI poll context */
1891 static int emac_peek_rx_sg(void *param)
1892 {
1893 	struct emac_instance *dev = param;
1894 
1895 	int slot = dev->rx_slot;
1896 	while (1) {
1897 		u16 ctrl = dev->rx_desc[slot].ctrl;
1898 		if (ctrl & MAL_RX_CTRL_EMPTY)
1899 			return 0;
1900 		else if (ctrl & MAL_RX_CTRL_LAST)
1901 			return 1;
1902 
1903 		slot = (slot + 1) % NUM_RX_BUFF;
1904 
1905 		/* I'm just being paranoid here :) */
1906 		if (unlikely(slot == dev->rx_slot))
1907 			return 0;
1908 	}
1909 }
1910 
1911 /* Hard IRQ */
1912 static void emac_rxde(void *param)
1913 {
1914 	struct emac_instance *dev = param;
1915 
1916 	++dev->estats.rx_stopped;
1917 	emac_rx_disable_async(dev);
1918 }
1919 
1920 /* Hard IRQ */
1921 static irqreturn_t emac_irq(int irq, void *dev_instance)
1922 {
1923 	struct emac_instance *dev = dev_instance;
1924 	struct emac_regs __iomem *p = dev->emacp;
1925 	struct emac_error_stats *st = &dev->estats;
1926 	u32 isr;
1927 
1928 	spin_lock(&dev->lock);
1929 
1930 	isr = in_be32(&p->isr);
1931 	out_be32(&p->isr, isr);
1932 
1933 	DBG(dev, "isr = %08x" NL, isr);
1934 
1935 	if (isr & EMAC4_ISR_TXPE)
1936 		++st->tx_parity;
1937 	if (isr & EMAC4_ISR_RXPE)
1938 		++st->rx_parity;
1939 	if (isr & EMAC4_ISR_TXUE)
1940 		++st->tx_underrun;
1941 	if (isr & EMAC4_ISR_RXOE)
1942 		++st->rx_fifo_overrun;
1943 	if (isr & EMAC_ISR_OVR)
1944 		++st->rx_overrun;
1945 	if (isr & EMAC_ISR_BP)
1946 		++st->rx_bad_packet;
1947 	if (isr & EMAC_ISR_RP)
1948 		++st->rx_runt_packet;
1949 	if (isr & EMAC_ISR_SE)
1950 		++st->rx_short_event;
1951 	if (isr & EMAC_ISR_ALE)
1952 		++st->rx_alignment_error;
1953 	if (isr & EMAC_ISR_BFCS)
1954 		++st->rx_bad_fcs;
1955 	if (isr & EMAC_ISR_PTLE)
1956 		++st->rx_packet_too_long;
1957 	if (isr & EMAC_ISR_ORE)
1958 		++st->rx_out_of_range;
1959 	if (isr & EMAC_ISR_IRE)
1960 		++st->rx_in_range;
1961 	if (isr & EMAC_ISR_SQE)
1962 		++st->tx_sqe;
1963 	if (isr & EMAC_ISR_TE)
1964 		++st->tx_errors;
1965 
1966 	spin_unlock(&dev->lock);
1967 
1968 	return IRQ_HANDLED;
1969 }
1970 
1971 static struct net_device_stats *emac_stats(struct net_device *ndev)
1972 {
1973 	struct emac_instance *dev = netdev_priv(ndev);
1974 	struct emac_stats *st = &dev->stats;
1975 	struct emac_error_stats *est = &dev->estats;
1976 	struct net_device_stats *nst = &ndev->stats;
1977 	unsigned long flags;
1978 
1979 	DBG2(dev, "stats" NL);
1980 
1981 	/* Compute "legacy" statistics */
1982 	spin_lock_irqsave(&dev->lock, flags);
1983 	nst->rx_packets = (unsigned long)st->rx_packets;
1984 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1985 	nst->tx_packets = (unsigned long)st->tx_packets;
1986 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1987 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1988 					  est->rx_dropped_error +
1989 					  est->rx_dropped_resize +
1990 					  est->rx_dropped_mtu);
1991 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1992 
1993 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1994 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1995 					      est->rx_fifo_overrun +
1996 					      est->rx_overrun);
1997 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1998 					       est->rx_alignment_error);
1999 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
2000 					     est->rx_bad_fcs);
2001 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
2002 						est->rx_bd_short_event +
2003 						est->rx_bd_packet_too_long +
2004 						est->rx_bd_out_of_range +
2005 						est->rx_bd_in_range +
2006 						est->rx_runt_packet +
2007 						est->rx_short_event +
2008 						est->rx_packet_too_long +
2009 						est->rx_out_of_range +
2010 						est->rx_in_range);
2011 
2012 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
2013 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
2014 					      est->tx_underrun);
2015 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
2016 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
2017 					  est->tx_bd_excessive_collisions +
2018 					  est->tx_bd_late_collision +
2019 					  est->tx_bd_multple_collisions);
2020 	spin_unlock_irqrestore(&dev->lock, flags);
2021 	return nst;
2022 }
2023 
2024 static struct mal_commac_ops emac_commac_ops = {
2025 	.poll_tx = &emac_poll_tx,
2026 	.poll_rx = &emac_poll_rx,
2027 	.peek_rx = &emac_peek_rx,
2028 	.rxde = &emac_rxde,
2029 };
2030 
2031 static struct mal_commac_ops emac_commac_sg_ops = {
2032 	.poll_tx = &emac_poll_tx,
2033 	.poll_rx = &emac_poll_rx,
2034 	.peek_rx = &emac_peek_rx_sg,
2035 	.rxde = &emac_rxde,
2036 };
2037 
2038 /* Ethtool support */
2039 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2040 					   struct ethtool_link_ksettings *cmd)
2041 {
2042 	struct emac_instance *dev = netdev_priv(ndev);
2043 	u32 supported, advertising;
2044 
2045 	supported = dev->phy.features;
2046 	cmd->base.port = PORT_MII;
2047 	cmd->base.phy_address = dev->phy.address;
2048 
2049 	mutex_lock(&dev->link_lock);
2050 	advertising = dev->phy.advertising;
2051 	cmd->base.autoneg = dev->phy.autoneg;
2052 	cmd->base.speed = dev->phy.speed;
2053 	cmd->base.duplex = dev->phy.duplex;
2054 	mutex_unlock(&dev->link_lock);
2055 
2056 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2057 						supported);
2058 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2059 						advertising);
2060 
2061 	return 0;
2062 }
2063 
2064 static int
2065 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2066 				const struct ethtool_link_ksettings *cmd)
2067 {
2068 	struct emac_instance *dev = netdev_priv(ndev);
2069 	u32 f = dev->phy.features;
2070 	u32 advertising;
2071 
2072 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2073 						cmd->link_modes.advertising);
2074 
2075 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2076 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2077 
2078 	/* Basic sanity checks */
2079 	if (dev->phy.address < 0)
2080 		return -EOPNOTSUPP;
2081 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2082 	    cmd->base.autoneg != AUTONEG_DISABLE)
2083 		return -EINVAL;
2084 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2085 		return -EINVAL;
2086 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2087 		return -EINVAL;
2088 
2089 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2090 		switch (cmd->base.speed) {
2091 		case SPEED_10:
2092 			if (cmd->base.duplex == DUPLEX_HALF &&
2093 			    !(f & SUPPORTED_10baseT_Half))
2094 				return -EINVAL;
2095 			if (cmd->base.duplex == DUPLEX_FULL &&
2096 			    !(f & SUPPORTED_10baseT_Full))
2097 				return -EINVAL;
2098 			break;
2099 		case SPEED_100:
2100 			if (cmd->base.duplex == DUPLEX_HALF &&
2101 			    !(f & SUPPORTED_100baseT_Half))
2102 				return -EINVAL;
2103 			if (cmd->base.duplex == DUPLEX_FULL &&
2104 			    !(f & SUPPORTED_100baseT_Full))
2105 				return -EINVAL;
2106 			break;
2107 		case SPEED_1000:
2108 			if (cmd->base.duplex == DUPLEX_HALF &&
2109 			    !(f & SUPPORTED_1000baseT_Half))
2110 				return -EINVAL;
2111 			if (cmd->base.duplex == DUPLEX_FULL &&
2112 			    !(f & SUPPORTED_1000baseT_Full))
2113 				return -EINVAL;
2114 			break;
2115 		default:
2116 			return -EINVAL;
2117 		}
2118 
2119 		mutex_lock(&dev->link_lock);
2120 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2121 						cmd->base.duplex);
2122 		mutex_unlock(&dev->link_lock);
2123 
2124 	} else {
2125 		if (!(f & SUPPORTED_Autoneg))
2126 			return -EINVAL;
2127 
2128 		mutex_lock(&dev->link_lock);
2129 		dev->phy.def->ops->setup_aneg(&dev->phy,
2130 					      (advertising & f) |
2131 					      (dev->phy.advertising &
2132 					       (ADVERTISED_Pause |
2133 						ADVERTISED_Asym_Pause)));
2134 		mutex_unlock(&dev->link_lock);
2135 	}
2136 	emac_force_link_update(dev);
2137 
2138 	return 0;
2139 }
2140 
2141 static void
2142 emac_ethtool_get_ringparam(struct net_device *ndev,
2143 			   struct ethtool_ringparam *rp,
2144 			   struct kernel_ethtool_ringparam *kernel_rp,
2145 			   struct netlink_ext_ack *extack)
2146 {
2147 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2148 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2149 }
2150 
2151 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2152 					struct ethtool_pauseparam *pp)
2153 {
2154 	struct emac_instance *dev = netdev_priv(ndev);
2155 
2156 	mutex_lock(&dev->link_lock);
2157 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2158 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2159 		pp->autoneg = 1;
2160 
2161 	if (dev->phy.duplex == DUPLEX_FULL) {
2162 		if (dev->phy.pause)
2163 			pp->rx_pause = pp->tx_pause = 1;
2164 		else if (dev->phy.asym_pause)
2165 			pp->tx_pause = 1;
2166 	}
2167 	mutex_unlock(&dev->link_lock);
2168 }
2169 
2170 static int emac_get_regs_len(struct emac_instance *dev)
2171 {
2172 		return sizeof(struct emac_ethtool_regs_subhdr) +
2173 			sizeof(struct emac_regs);
2174 }
2175 
2176 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2177 {
2178 	struct emac_instance *dev = netdev_priv(ndev);
2179 	int size;
2180 
2181 	size = sizeof(struct emac_ethtool_regs_hdr) +
2182 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2183 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2184 		size += zmii_get_regs_len(dev->zmii_dev);
2185 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2186 		size += rgmii_get_regs_len(dev->rgmii_dev);
2187 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2188 		size += tah_get_regs_len(dev->tah_dev);
2189 
2190 	return size;
2191 }
2192 
2193 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2194 {
2195 	struct emac_ethtool_regs_subhdr *hdr = buf;
2196 
2197 	hdr->index = dev->cell_index;
2198 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2199 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2200 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2201 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2202 	} else {
2203 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2204 	}
2205 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2206 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2207 }
2208 
2209 static void emac_ethtool_get_regs(struct net_device *ndev,
2210 				  struct ethtool_regs *regs, void *buf)
2211 {
2212 	struct emac_instance *dev = netdev_priv(ndev);
2213 	struct emac_ethtool_regs_hdr *hdr = buf;
2214 
2215 	hdr->components = 0;
2216 	buf = hdr + 1;
2217 
2218 	buf = mal_dump_regs(dev->mal, buf);
2219 	buf = emac_dump_regs(dev, buf);
2220 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2221 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2222 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2223 	}
2224 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2225 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2226 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2227 	}
2228 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2229 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2230 		buf = tah_dump_regs(dev->tah_dev, buf);
2231 	}
2232 }
2233 
2234 static int emac_ethtool_nway_reset(struct net_device *ndev)
2235 {
2236 	struct emac_instance *dev = netdev_priv(ndev);
2237 	int res = 0;
2238 
2239 	DBG(dev, "nway_reset" NL);
2240 
2241 	if (dev->phy.address < 0)
2242 		return -EOPNOTSUPP;
2243 
2244 	mutex_lock(&dev->link_lock);
2245 	if (!dev->phy.autoneg) {
2246 		res = -EINVAL;
2247 		goto out;
2248 	}
2249 
2250 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2251  out:
2252 	mutex_unlock(&dev->link_lock);
2253 	emac_force_link_update(dev);
2254 	return res;
2255 }
2256 
2257 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2258 {
2259 	if (stringset == ETH_SS_STATS)
2260 		return EMAC_ETHTOOL_STATS_COUNT;
2261 	else
2262 		return -EINVAL;
2263 }
2264 
2265 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2266 				     u8 * buf)
2267 {
2268 	if (stringset == ETH_SS_STATS)
2269 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2270 }
2271 
2272 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2273 					   struct ethtool_stats *estats,
2274 					   u64 * tmp_stats)
2275 {
2276 	struct emac_instance *dev = netdev_priv(ndev);
2277 
2278 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2279 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2280 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2281 }
2282 
2283 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2284 				     struct ethtool_drvinfo *info)
2285 {
2286 	struct emac_instance *dev = netdev_priv(ndev);
2287 
2288 	strscpy(info->driver, "ibm_emac", sizeof(info->driver));
2289 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
2290 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2291 		 dev->cell_index, dev->ofdev->dev.of_node);
2292 }
2293 
2294 static const struct ethtool_ops emac_ethtool_ops = {
2295 	.get_drvinfo = emac_ethtool_get_drvinfo,
2296 
2297 	.get_regs_len = emac_ethtool_get_regs_len,
2298 	.get_regs = emac_ethtool_get_regs,
2299 
2300 	.nway_reset = emac_ethtool_nway_reset,
2301 
2302 	.get_ringparam = emac_ethtool_get_ringparam,
2303 	.get_pauseparam = emac_ethtool_get_pauseparam,
2304 
2305 	.get_strings = emac_ethtool_get_strings,
2306 	.get_sset_count = emac_ethtool_get_sset_count,
2307 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2308 
2309 	.get_link = ethtool_op_get_link,
2310 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2311 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2312 };
2313 
2314 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2315 {
2316 	struct emac_instance *dev = netdev_priv(ndev);
2317 	struct mii_ioctl_data *data = if_mii(rq);
2318 
2319 	DBG(dev, "ioctl %08x" NL, cmd);
2320 
2321 	if (dev->phy.address < 0)
2322 		return -EOPNOTSUPP;
2323 
2324 	switch (cmd) {
2325 	case SIOCGMIIPHY:
2326 		data->phy_id = dev->phy.address;
2327 		fallthrough;
2328 	case SIOCGMIIREG:
2329 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2330 					       data->reg_num);
2331 		return 0;
2332 
2333 	case SIOCSMIIREG:
2334 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2335 				data->val_in);
2336 		return 0;
2337 	default:
2338 		return -EOPNOTSUPP;
2339 	}
2340 }
2341 
2342 struct emac_depentry {
2343 	u32			phandle;
2344 	struct device_node	*node;
2345 	struct platform_device	*ofdev;
2346 	void			*drvdata;
2347 };
2348 
2349 #define	EMAC_DEP_MAL_IDX	0
2350 #define	EMAC_DEP_ZMII_IDX	1
2351 #define	EMAC_DEP_RGMII_IDX	2
2352 #define	EMAC_DEP_TAH_IDX	3
2353 #define	EMAC_DEP_MDIO_IDX	4
2354 #define	EMAC_DEP_PREV_IDX	5
2355 #define	EMAC_DEP_COUNT		6
2356 
2357 static int emac_check_deps(struct emac_instance *dev,
2358 			   struct emac_depentry *deps)
2359 {
2360 	int i, there = 0;
2361 	struct device_node *np;
2362 
2363 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2364 		/* no dependency on that item, allright */
2365 		if (deps[i].phandle == 0) {
2366 			there++;
2367 			continue;
2368 		}
2369 		/* special case for blist as the dependency might go away */
2370 		if (i == EMAC_DEP_PREV_IDX) {
2371 			np = *(dev->blist - 1);
2372 			if (np == NULL) {
2373 				deps[i].phandle = 0;
2374 				there++;
2375 				continue;
2376 			}
2377 			if (deps[i].node == NULL)
2378 				deps[i].node = of_node_get(np);
2379 		}
2380 		if (deps[i].node == NULL)
2381 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2382 		if (deps[i].node == NULL)
2383 			continue;
2384 		if (deps[i].ofdev == NULL)
2385 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2386 		if (deps[i].ofdev == NULL)
2387 			continue;
2388 		if (deps[i].drvdata == NULL)
2389 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2390 		if (deps[i].drvdata != NULL)
2391 			there++;
2392 	}
2393 	return there == EMAC_DEP_COUNT;
2394 }
2395 
2396 static void emac_put_deps(struct emac_instance *dev)
2397 {
2398 	platform_device_put(dev->mal_dev);
2399 	platform_device_put(dev->zmii_dev);
2400 	platform_device_put(dev->rgmii_dev);
2401 	platform_device_put(dev->mdio_dev);
2402 	platform_device_put(dev->tah_dev);
2403 }
2404 
2405 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2406 			      void *data)
2407 {
2408 	/* We are only intereted in device addition */
2409 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2410 		wake_up_all(&emac_probe_wait);
2411 	return 0;
2412 }
2413 
2414 static struct notifier_block emac_of_bus_notifier = {
2415 	.notifier_call = emac_of_bus_notify
2416 };
2417 
2418 static int emac_wait_deps(struct emac_instance *dev)
2419 {
2420 	struct emac_depentry deps[EMAC_DEP_COUNT];
2421 	int i, err;
2422 
2423 	memset(&deps, 0, sizeof(deps));
2424 
2425 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2426 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2427 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2428 	if (dev->tah_ph)
2429 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2430 	if (dev->mdio_ph)
2431 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2432 	if (dev->blist && dev->blist > emac_boot_list)
2433 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2434 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2435 	wait_event_timeout(emac_probe_wait,
2436 			   emac_check_deps(dev, deps),
2437 			   EMAC_PROBE_DEP_TIMEOUT);
2438 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2439 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2440 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2441 		of_node_put(deps[i].node);
2442 		if (err)
2443 			platform_device_put(deps[i].ofdev);
2444 	}
2445 	if (err == 0) {
2446 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2447 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2448 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2449 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2450 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2451 	}
2452 	platform_device_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2453 	return err;
2454 }
2455 
2456 static int emac_read_uint_prop(struct device_node *np, const char *name,
2457 			       u32 *val, int fatal)
2458 {
2459 	int len;
2460 	const u32 *prop = of_get_property(np, name, &len);
2461 	if (prop == NULL || len < sizeof(u32)) {
2462 		if (fatal)
2463 			printk(KERN_ERR "%pOF: missing %s property\n",
2464 			       np, name);
2465 		return -ENODEV;
2466 	}
2467 	*val = *prop;
2468 	return 0;
2469 }
2470 
2471 static void emac_adjust_link(struct net_device *ndev)
2472 {
2473 	struct emac_instance *dev = netdev_priv(ndev);
2474 	struct phy_device *phy = dev->phy_dev;
2475 
2476 	dev->phy.autoneg = phy->autoneg;
2477 	dev->phy.speed = phy->speed;
2478 	dev->phy.duplex = phy->duplex;
2479 	dev->phy.pause = phy->pause;
2480 	dev->phy.asym_pause = phy->asym_pause;
2481 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2482 						phy->advertising);
2483 }
2484 
2485 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2486 {
2487 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2488 	/* This is a workaround for powered down ports/phys.
2489 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2490 	 * This hardware disables ports as part of the handoff
2491 	 * procedure. Accessing the ports will lead to errors
2492 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2493 	 */
2494 	return ret < 0 ? 0xffff : ret;
2495 }
2496 
2497 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2498 			      int regnum, u16 val)
2499 {
2500 	emac_mdio_write(bus->priv, addr, regnum, val);
2501 	return 0;
2502 }
2503 
2504 static int emac_mii_bus_reset(struct mii_bus *bus)
2505 {
2506 	struct emac_instance *dev = netdev_priv(bus->priv);
2507 
2508 	return emac_reset(dev);
2509 }
2510 
2511 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2512 				    struct phy_device *phy_dev)
2513 {
2514 	phy_dev->autoneg = phy->autoneg;
2515 	phy_dev->speed = phy->speed;
2516 	phy_dev->duplex = phy->duplex;
2517 	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2518 						phy->advertising);
2519 	return phy_start_aneg(phy_dev);
2520 }
2521 
2522 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2523 {
2524 	struct net_device *ndev = phy->dev;
2525 	struct emac_instance *dev = netdev_priv(ndev);
2526 
2527 	phy->autoneg = AUTONEG_ENABLE;
2528 	phy->advertising = advertise;
2529 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2530 }
2531 
2532 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2533 {
2534 	struct net_device *ndev = phy->dev;
2535 	struct emac_instance *dev = netdev_priv(ndev);
2536 
2537 	phy->autoneg = AUTONEG_DISABLE;
2538 	phy->speed = speed;
2539 	phy->duplex = fd;
2540 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2541 }
2542 
2543 static int emac_mdio_poll_link(struct mii_phy *phy)
2544 {
2545 	struct net_device *ndev = phy->dev;
2546 	struct emac_instance *dev = netdev_priv(ndev);
2547 	int res;
2548 
2549 	res = phy_read_status(dev->phy_dev);
2550 	if (res) {
2551 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2552 		return ethtool_op_get_link(ndev);
2553 	}
2554 
2555 	return dev->phy_dev->link;
2556 }
2557 
2558 static int emac_mdio_read_link(struct mii_phy *phy)
2559 {
2560 	struct net_device *ndev = phy->dev;
2561 	struct emac_instance *dev = netdev_priv(ndev);
2562 	struct phy_device *phy_dev = dev->phy_dev;
2563 	int res;
2564 
2565 	res = phy_read_status(phy_dev);
2566 	if (res)
2567 		return res;
2568 
2569 	phy->speed = phy_dev->speed;
2570 	phy->duplex = phy_dev->duplex;
2571 	phy->pause = phy_dev->pause;
2572 	phy->asym_pause = phy_dev->asym_pause;
2573 	return 0;
2574 }
2575 
2576 static int emac_mdio_init_phy(struct mii_phy *phy)
2577 {
2578 	struct net_device *ndev = phy->dev;
2579 	struct emac_instance *dev = netdev_priv(ndev);
2580 
2581 	phy_start(dev->phy_dev);
2582 	return phy_init_hw(dev->phy_dev);
2583 }
2584 
2585 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2586 	.init		= emac_mdio_init_phy,
2587 	.setup_aneg	= emac_mdio_setup_aneg,
2588 	.setup_forced	= emac_mdio_setup_forced,
2589 	.poll_link	= emac_mdio_poll_link,
2590 	.read_link	= emac_mdio_read_link,
2591 };
2592 
2593 static int emac_dt_mdio_probe(struct emac_instance *dev)
2594 {
2595 	struct device_node *mii_np;
2596 	int res;
2597 
2598 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2599 	if (!mii_np) {
2600 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2601 		return -ENODEV;
2602 	}
2603 
2604 	if (!of_device_is_available(mii_np)) {
2605 		res = -ENODEV;
2606 		goto put_node;
2607 	}
2608 
2609 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2610 	if (!dev->mii_bus) {
2611 		res = -ENOMEM;
2612 		goto put_node;
2613 	}
2614 
2615 	dev->mii_bus->priv = dev->ndev;
2616 	dev->mii_bus->parent = dev->ndev->dev.parent;
2617 	dev->mii_bus->name = "emac_mdio";
2618 	dev->mii_bus->read = &emac_mii_bus_read;
2619 	dev->mii_bus->write = &emac_mii_bus_write;
2620 	dev->mii_bus->reset = &emac_mii_bus_reset;
2621 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2622 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2623 	if (res) {
2624 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2625 			dev->mii_bus->name, res);
2626 	}
2627 
2628  put_node:
2629 	of_node_put(mii_np);
2630 	return res;
2631 }
2632 
2633 static int emac_dt_phy_connect(struct emac_instance *dev,
2634 			       struct device_node *phy_handle)
2635 {
2636 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2637 				    GFP_KERNEL);
2638 	if (!dev->phy.def)
2639 		return -ENOMEM;
2640 
2641 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2642 				      0, dev->phy_mode);
2643 	if (!dev->phy_dev) {
2644 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2645 		return -ENODEV;
2646 	}
2647 
2648 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2649 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2650 	dev->phy.def->name = dev->phy_dev->drv->name;
2651 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2652 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2653 						dev->phy_dev->supported);
2654 	dev->phy.address = dev->phy_dev->mdio.addr;
2655 	dev->phy.mode = dev->phy_dev->interface;
2656 	return 0;
2657 }
2658 
2659 static int emac_dt_phy_probe(struct emac_instance *dev)
2660 {
2661 	struct device_node *np = dev->ofdev->dev.of_node;
2662 	struct device_node *phy_handle;
2663 	int res = 1;
2664 
2665 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2666 
2667 	if (phy_handle) {
2668 		res = emac_dt_mdio_probe(dev);
2669 		if (!res) {
2670 			res = emac_dt_phy_connect(dev, phy_handle);
2671 			if (res)
2672 				mdiobus_unregister(dev->mii_bus);
2673 		}
2674 	}
2675 
2676 	of_node_put(phy_handle);
2677 	return res;
2678 }
2679 
2680 static int emac_init_phy(struct emac_instance *dev)
2681 {
2682 	struct device_node *np = dev->ofdev->dev.of_node;
2683 	struct net_device *ndev = dev->ndev;
2684 	u32 phy_map, adv;
2685 	int i;
2686 
2687 	dev->phy.dev = ndev;
2688 	dev->phy.mode = dev->phy_mode;
2689 
2690 	/* PHY-less configuration. */
2691 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2692 	    of_phy_is_fixed_link(np)) {
2693 		emac_reset(dev);
2694 
2695 		/* PHY-less configuration. */
2696 		dev->phy.address = -1;
2697 		dev->phy.features = SUPPORTED_MII;
2698 		if (emac_phy_supports_gige(dev->phy_mode))
2699 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2700 		else
2701 			dev->phy.features |= SUPPORTED_100baseT_Full;
2702 		dev->phy.pause = 1;
2703 
2704 		if (of_phy_is_fixed_link(np)) {
2705 			int res = emac_dt_mdio_probe(dev);
2706 
2707 			if (res)
2708 				return res;
2709 
2710 			res = of_phy_register_fixed_link(np);
2711 			dev->phy_dev = of_phy_find_device(np);
2712 			if (res || !dev->phy_dev) {
2713 				mdiobus_unregister(dev->mii_bus);
2714 				return res ? res : -EINVAL;
2715 			}
2716 			emac_adjust_link(dev->ndev);
2717 			put_device(&dev->phy_dev->mdio.dev);
2718 		}
2719 		return 0;
2720 	}
2721 
2722 	mutex_lock(&emac_phy_map_lock);
2723 	phy_map = dev->phy_map | busy_phy_map;
2724 
2725 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2726 
2727 	dev->phy.mdio_read = emac_mdio_read;
2728 	dev->phy.mdio_write = emac_mdio_write;
2729 
2730 	/* Enable internal clock source */
2731 #ifdef CONFIG_PPC_DCR_NATIVE
2732 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2733 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2734 #endif
2735 	/* PHY clock workaround */
2736 	emac_rx_clk_tx(dev);
2737 
2738 	/* Enable internal clock source on 440GX*/
2739 #ifdef CONFIG_PPC_DCR_NATIVE
2740 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2741 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2742 #endif
2743 	/* Configure EMAC with defaults so we can at least use MDIO
2744 	 * This is needed mostly for 440GX
2745 	 */
2746 	if (emac_phy_gpcs(dev->phy.mode)) {
2747 		/* XXX
2748 		 * Make GPCS PHY address equal to EMAC index.
2749 		 * We probably should take into account busy_phy_map
2750 		 * and/or phy_map here.
2751 		 *
2752 		 * Note that the busy_phy_map is currently global
2753 		 * while it should probably be per-ASIC...
2754 		 */
2755 		dev->phy.gpcs_address = dev->gpcs_address;
2756 		if (dev->phy.gpcs_address == 0xffffffff)
2757 			dev->phy.address = dev->cell_index;
2758 	}
2759 
2760 	emac_configure(dev);
2761 
2762 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2763 		int res = emac_dt_phy_probe(dev);
2764 
2765 		switch (res) {
2766 		case 1:
2767 			/* No phy-handle property configured.
2768 			 * Continue with the existing phy probe
2769 			 * and setup code.
2770 			 */
2771 			break;
2772 
2773 		case 0:
2774 			mutex_unlock(&emac_phy_map_lock);
2775 			goto init_phy;
2776 
2777 		default:
2778 			mutex_unlock(&emac_phy_map_lock);
2779 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2780 				res);
2781 			return res;
2782 		}
2783 	}
2784 
2785 	if (dev->phy_address != 0xffffffff)
2786 		phy_map = ~(1 << dev->phy_address);
2787 
2788 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2789 		if (!(phy_map & 1)) {
2790 			int r;
2791 			busy_phy_map |= 1 << i;
2792 
2793 			/* Quick check if there is a PHY at the address */
2794 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2795 			if (r == 0xffff || r < 0)
2796 				continue;
2797 			if (!emac_mii_phy_probe(&dev->phy, i))
2798 				break;
2799 		}
2800 
2801 	/* Enable external clock source */
2802 #ifdef CONFIG_PPC_DCR_NATIVE
2803 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2804 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2805 #endif
2806 	mutex_unlock(&emac_phy_map_lock);
2807 	if (i == 0x20) {
2808 		printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2809 		return -ENXIO;
2810 	}
2811 
2812  init_phy:
2813 	/* Init PHY */
2814 	if (dev->phy.def->ops->init)
2815 		dev->phy.def->ops->init(&dev->phy);
2816 
2817 	/* Disable any PHY features not supported by the platform */
2818 	dev->phy.def->features &= ~dev->phy_feat_exc;
2819 	dev->phy.features &= ~dev->phy_feat_exc;
2820 
2821 	/* Setup initial link parameters */
2822 	if (dev->phy.features & SUPPORTED_Autoneg) {
2823 		adv = dev->phy.features;
2824 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2825 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2826 		/* Restart autonegotiation */
2827 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2828 	} else {
2829 		u32 f = dev->phy.def->features;
2830 		int speed = SPEED_10, fd = DUPLEX_HALF;
2831 
2832 		/* Select highest supported speed/duplex */
2833 		if (f & SUPPORTED_1000baseT_Full) {
2834 			speed = SPEED_1000;
2835 			fd = DUPLEX_FULL;
2836 		} else if (f & SUPPORTED_1000baseT_Half)
2837 			speed = SPEED_1000;
2838 		else if (f & SUPPORTED_100baseT_Full) {
2839 			speed = SPEED_100;
2840 			fd = DUPLEX_FULL;
2841 		} else if (f & SUPPORTED_100baseT_Half)
2842 			speed = SPEED_100;
2843 		else if (f & SUPPORTED_10baseT_Full)
2844 			fd = DUPLEX_FULL;
2845 
2846 		/* Force link parameters */
2847 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2848 	}
2849 	return 0;
2850 }
2851 
2852 static int emac_init_config(struct emac_instance *dev)
2853 {
2854 	struct device_node *np = dev->ofdev->dev.of_node;
2855 	int err;
2856 
2857 	/* Read config from device-tree */
2858 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2859 		return -ENXIO;
2860 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2861 		return -ENXIO;
2862 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2863 		return -ENXIO;
2864 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2865 		return -ENXIO;
2866 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2867 		dev->max_mtu = ETH_DATA_LEN;
2868 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2869 		dev->rx_fifo_size = 2048;
2870 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2871 		dev->tx_fifo_size = 2048;
2872 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2873 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2874 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2875 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2876 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2877 		dev->phy_address = 0xffffffff;
2878 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2879 		dev->phy_map = 0xffffffff;
2880 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2881 		dev->gpcs_address = 0xffffffff;
2882 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2883 		return -ENXIO;
2884 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2885 		dev->tah_ph = 0;
2886 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2887 		dev->tah_port = 0;
2888 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2889 		dev->mdio_ph = 0;
2890 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2891 		dev->zmii_ph = 0;
2892 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2893 		dev->zmii_port = 0xffffffff;
2894 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2895 		dev->rgmii_ph = 0;
2896 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2897 		dev->rgmii_port = 0xffffffff;
2898 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2899 		dev->fifo_entry_size = 16;
2900 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2901 		dev->mal_burst_size = 256;
2902 
2903 	/* PHY mode needs some decoding */
2904 	err = of_get_phy_mode(np, &dev->phy_mode);
2905 	if (err)
2906 		dev->phy_mode = PHY_INTERFACE_MODE_NA;
2907 
2908 	/* Check EMAC version */
2909 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2910 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2911 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2912 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2913 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2914 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2915 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2916 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2917 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2918 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2919 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2920 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2921 		}
2922 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2923 		dev->features |= EMAC_FTR_EMAC4;
2924 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2925 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2926 	} else {
2927 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2928 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2929 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2930 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2931 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2932 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2933 #else
2934 			printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2935 					np);
2936 			return -ENXIO;
2937 #endif
2938 		}
2939 
2940 	}
2941 
2942 	/* Fixup some feature bits based on the device tree */
2943 	if (of_property_read_bool(np, "has-inverted-stacr-oc"))
2944 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2945 	if (of_property_read_bool(np, "has-new-stacr-staopc"))
2946 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2947 
2948 	/* CAB lacks the appropriate properties */
2949 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2950 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2951 			EMAC_FTR_STACR_OC_INVERT;
2952 
2953 	/* Enable TAH/ZMII/RGMII features as found */
2954 	if (dev->tah_ph != 0) {
2955 #ifdef CONFIG_IBM_EMAC_TAH
2956 		dev->features |= EMAC_FTR_HAS_TAH;
2957 #else
2958 		printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2959 		return -ENXIO;
2960 #endif
2961 	}
2962 
2963 	if (dev->zmii_ph != 0) {
2964 #ifdef CONFIG_IBM_EMAC_ZMII
2965 		dev->features |= EMAC_FTR_HAS_ZMII;
2966 #else
2967 		printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2968 		return -ENXIO;
2969 #endif
2970 	}
2971 
2972 	if (dev->rgmii_ph != 0) {
2973 #ifdef CONFIG_IBM_EMAC_RGMII
2974 		dev->features |= EMAC_FTR_HAS_RGMII;
2975 #else
2976 		printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2977 		return -ENXIO;
2978 #endif
2979 	}
2980 
2981 	/* Read MAC-address */
2982 	err = of_get_ethdev_address(np, dev->ndev);
2983 	if (err)
2984 		return dev_err_probe(&dev->ofdev->dev, err,
2985 				     "Can't get valid [local-]mac-address from OF !\n");
2986 
2987 	/* IAHT and GAHT filter parameterization */
2988 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2989 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2990 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2991 	} else {
2992 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2993 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2994 	}
2995 
2996 	/* This should never happen */
2997 	if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
2998 		return -ENXIO;
2999 
3000 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
3001 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
3002 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
3003 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
3004 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
3005 
3006 	return 0;
3007 }
3008 
3009 static const struct net_device_ops emac_netdev_ops = {
3010 	.ndo_open		= emac_open,
3011 	.ndo_stop		= emac_close,
3012 	.ndo_get_stats		= emac_stats,
3013 	.ndo_set_rx_mode	= emac_set_multicast_list,
3014 	.ndo_eth_ioctl		= emac_ioctl,
3015 	.ndo_tx_timeout		= emac_tx_timeout,
3016 	.ndo_validate_addr	= eth_validate_addr,
3017 	.ndo_set_mac_address	= emac_set_mac_address,
3018 	.ndo_start_xmit		= emac_start_xmit,
3019 };
3020 
3021 static const struct net_device_ops emac_gige_netdev_ops = {
3022 	.ndo_open		= emac_open,
3023 	.ndo_stop		= emac_close,
3024 	.ndo_get_stats		= emac_stats,
3025 	.ndo_set_rx_mode	= emac_set_multicast_list,
3026 	.ndo_eth_ioctl		= emac_ioctl,
3027 	.ndo_tx_timeout		= emac_tx_timeout,
3028 	.ndo_validate_addr	= eth_validate_addr,
3029 	.ndo_set_mac_address	= emac_set_mac_address,
3030 	.ndo_start_xmit		= emac_start_xmit_sg,
3031 	.ndo_change_mtu		= emac_change_mtu,
3032 };
3033 
3034 static int emac_probe(struct platform_device *ofdev)
3035 {
3036 	struct net_device *ndev;
3037 	struct emac_instance *dev;
3038 	struct device_node *np = ofdev->dev.of_node;
3039 	struct device_node **blist = NULL;
3040 	int err, i;
3041 
3042 	/* Skip unused/unwired EMACS.  We leave the check for an unused
3043 	 * property here for now, but new flat device trees should set a
3044 	 * status property to "disabled" instead.
3045 	 */
3046 	if (of_property_read_bool(np, "unused") || !of_device_is_available(np))
3047 		return -ENODEV;
3048 
3049 	/* Find ourselves in the bootlist if we are there */
3050 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3051 		if (emac_boot_list[i] == np)
3052 			blist = &emac_boot_list[i];
3053 
3054 	/* Allocate our net_device structure */
3055 	err = -ENOMEM;
3056 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3057 	if (!ndev)
3058 		goto err_gone;
3059 
3060 	dev = netdev_priv(ndev);
3061 	dev->ndev = ndev;
3062 	dev->ofdev = ofdev;
3063 	dev->blist = blist;
3064 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3065 
3066 	/* Initialize some embedded data structures */
3067 	mutex_init(&dev->mdio_lock);
3068 	mutex_init(&dev->link_lock);
3069 	spin_lock_init(&dev->lock);
3070 	INIT_WORK(&dev->reset_work, emac_reset_work);
3071 
3072 	/* Init various config data based on device-tree */
3073 	err = emac_init_config(dev);
3074 	if (err)
3075 		goto err_free;
3076 
3077 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3078 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3079 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3080 	if (!dev->emac_irq) {
3081 		printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3082 		err = -ENODEV;
3083 		goto err_free;
3084 	}
3085 	ndev->irq = dev->emac_irq;
3086 
3087 	/* Map EMAC regs */
3088 	// TODO : platform_get_resource() and devm_ioremap_resource()
3089 	dev->emacp = of_iomap(np, 0);
3090 	if (dev->emacp == NULL) {
3091 		printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3092 		err = -ENOMEM;
3093 		goto err_irq_unmap;
3094 	}
3095 
3096 	/* Wait for dependent devices */
3097 	err = emac_wait_deps(dev);
3098 	if (err) {
3099 		printk(KERN_ERR
3100 		       "%pOF: Timeout waiting for dependent devices\n", np);
3101 		/*  display more info about what's missing ? */
3102 		goto err_reg_unmap;
3103 	}
3104 	dev->mal = platform_get_drvdata(dev->mal_dev);
3105 	if (dev->mdio_dev != NULL)
3106 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3107 
3108 	/* Register with MAL */
3109 	dev->commac.ops = &emac_commac_ops;
3110 	dev->commac.dev = dev;
3111 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3112 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3113 	err = mal_register_commac(dev->mal, &dev->commac);
3114 	if (err) {
3115 		printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3116 		       np, dev->mal_dev->dev.of_node);
3117 		goto err_rel_deps;
3118 	}
3119 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3120 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3121 
3122 	/* Get pointers to BD rings */
3123 	dev->tx_desc =
3124 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3125 	dev->rx_desc =
3126 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3127 
3128 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3129 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3130 
3131 	/* Clean rings */
3132 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3133 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3134 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3135 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3136 
3137 	/* Attach to ZMII, if needed */
3138 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3139 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3140 		goto err_unreg_commac;
3141 
3142 	/* Attach to RGMII, if needed */
3143 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3144 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3145 		goto err_detach_zmii;
3146 
3147 	/* Attach to TAH, if needed */
3148 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3149 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3150 		goto err_detach_rgmii;
3151 
3152 	/* Set some link defaults before we can find out real parameters */
3153 	dev->phy.speed = SPEED_100;
3154 	dev->phy.duplex = DUPLEX_FULL;
3155 	dev->phy.autoneg = AUTONEG_DISABLE;
3156 	dev->phy.pause = dev->phy.asym_pause = 0;
3157 	dev->stop_timeout = STOP_TIMEOUT_100;
3158 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3159 
3160 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3161 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3162 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3163 				     SUPPORTED_100baseT_Half |
3164 				     SUPPORTED_10baseT_Half);
3165 	}
3166 
3167 	/* Find PHY if any */
3168 	err = emac_init_phy(dev);
3169 	if (err != 0)
3170 		goto err_detach_tah;
3171 
3172 	if (dev->tah_dev) {
3173 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3174 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3175 	}
3176 	ndev->watchdog_timeo = 5 * HZ;
3177 	if (emac_phy_supports_gige(dev->phy_mode)) {
3178 		ndev->netdev_ops = &emac_gige_netdev_ops;
3179 		dev->commac.ops = &emac_commac_sg_ops;
3180 	} else
3181 		ndev->netdev_ops = &emac_netdev_ops;
3182 	ndev->ethtool_ops = &emac_ethtool_ops;
3183 
3184 	/* MTU range: 46 - 1500 or whatever is in OF */
3185 	ndev->min_mtu = EMAC_MIN_MTU;
3186 	ndev->max_mtu = dev->max_mtu;
3187 
3188 	netif_carrier_off(ndev);
3189 
3190 	err = register_netdev(ndev);
3191 	if (err) {
3192 		printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3193 		       np, err);
3194 		goto err_detach_tah;
3195 	}
3196 
3197 	/* Set our drvdata last as we don't want them visible until we are
3198 	 * fully initialized
3199 	 */
3200 	wmb();
3201 	platform_set_drvdata(ofdev, dev);
3202 
3203 	/* There's a new kid in town ! Let's tell everybody */
3204 	wake_up_all(&emac_probe_wait);
3205 
3206 
3207 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3208 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
3209 
3210 	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3211 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3212 
3213 	if (dev->phy.address >= 0)
3214 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3215 		       dev->phy.def->name, dev->phy.address);
3216 
3217 	/* Life is good */
3218 	return 0;
3219 
3220 	/* I have a bad feeling about this ... */
3221 
3222  err_detach_tah:
3223 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3224 		tah_detach(dev->tah_dev, dev->tah_port);
3225  err_detach_rgmii:
3226 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3227 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3228  err_detach_zmii:
3229 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3230 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3231  err_unreg_commac:
3232 	mal_unregister_commac(dev->mal, &dev->commac);
3233  err_rel_deps:
3234 	emac_put_deps(dev);
3235  err_reg_unmap:
3236 	iounmap(dev->emacp);
3237  err_irq_unmap:
3238 	if (dev->wol_irq)
3239 		irq_dispose_mapping(dev->wol_irq);
3240 	if (dev->emac_irq)
3241 		irq_dispose_mapping(dev->emac_irq);
3242  err_free:
3243 	free_netdev(ndev);
3244  err_gone:
3245 	/* if we were on the bootlist, remove us as we won't show up and
3246 	 * wake up all waiters to notify them in case they were waiting
3247 	 * on us
3248 	 */
3249 	if (blist) {
3250 		*blist = NULL;
3251 		wake_up_all(&emac_probe_wait);
3252 	}
3253 	return err;
3254 }
3255 
3256 static void emac_remove(struct platform_device *ofdev)
3257 {
3258 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3259 
3260 	DBG(dev, "remove" NL);
3261 
3262 	unregister_netdev(dev->ndev);
3263 
3264 	cancel_work_sync(&dev->reset_work);
3265 
3266 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3267 		tah_detach(dev->tah_dev, dev->tah_port);
3268 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3269 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3270 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3271 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3272 
3273 	if (dev->phy_dev)
3274 		phy_disconnect(dev->phy_dev);
3275 
3276 	if (dev->mii_bus)
3277 		mdiobus_unregister(dev->mii_bus);
3278 
3279 	busy_phy_map &= ~(1 << dev->phy.address);
3280 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3281 
3282 	mal_unregister_commac(dev->mal, &dev->commac);
3283 	emac_put_deps(dev);
3284 
3285 	iounmap(dev->emacp);
3286 
3287 	if (dev->wol_irq)
3288 		irq_dispose_mapping(dev->wol_irq);
3289 	if (dev->emac_irq)
3290 		irq_dispose_mapping(dev->emac_irq);
3291 
3292 	free_netdev(dev->ndev);
3293 }
3294 
3295 /* XXX Features in here should be replaced by properties... */
3296 static const struct of_device_id emac_match[] =
3297 {
3298 	{
3299 		.type		= "network",
3300 		.compatible	= "ibm,emac",
3301 	},
3302 	{
3303 		.type		= "network",
3304 		.compatible	= "ibm,emac4",
3305 	},
3306 	{
3307 		.type		= "network",
3308 		.compatible	= "ibm,emac4sync",
3309 	},
3310 	{},
3311 };
3312 MODULE_DEVICE_TABLE(of, emac_match);
3313 
3314 static struct platform_driver emac_driver = {
3315 	.driver = {
3316 		.name = "emac",
3317 		.of_match_table = emac_match,
3318 	},
3319 	.probe = emac_probe,
3320 	.remove_new = emac_remove,
3321 };
3322 
3323 static void __init emac_make_bootlist(void)
3324 {
3325 	struct device_node *np = NULL;
3326 	int j, max, i = 0;
3327 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3328 
3329 	/* Collect EMACs */
3330 	while((np = of_find_all_nodes(np)) != NULL) {
3331 		const u32 *idx;
3332 
3333 		if (of_match_node(emac_match, np) == NULL)
3334 			continue;
3335 		if (of_property_read_bool(np, "unused"))
3336 			continue;
3337 		idx = of_get_property(np, "cell-index", NULL);
3338 		if (idx == NULL)
3339 			continue;
3340 		cell_indices[i] = *idx;
3341 		emac_boot_list[i++] = of_node_get(np);
3342 		if (i >= EMAC_BOOT_LIST_SIZE) {
3343 			of_node_put(np);
3344 			break;
3345 		}
3346 	}
3347 	max = i;
3348 
3349 	/* Bubble sort them (doh, what a creative algorithm :-) */
3350 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3351 		for (j = i; j < max; j++) {
3352 			if (cell_indices[i] > cell_indices[j]) {
3353 				swap(emac_boot_list[i], emac_boot_list[j]);
3354 				swap(cell_indices[i], cell_indices[j]);
3355 			}
3356 		}
3357 }
3358 
3359 static int __init emac_init(void)
3360 {
3361 	int rc;
3362 
3363 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3364 
3365 	/* Build EMAC boot list */
3366 	emac_make_bootlist();
3367 
3368 	/* Init submodules */
3369 	rc = mal_init();
3370 	if (rc)
3371 		goto err;
3372 	rc = zmii_init();
3373 	if (rc)
3374 		goto err_mal;
3375 	rc = rgmii_init();
3376 	if (rc)
3377 		goto err_zmii;
3378 	rc = tah_init();
3379 	if (rc)
3380 		goto err_rgmii;
3381 	rc = platform_driver_register(&emac_driver);
3382 	if (rc)
3383 		goto err_tah;
3384 
3385 	return 0;
3386 
3387  err_tah:
3388 	tah_exit();
3389  err_rgmii:
3390 	rgmii_exit();
3391  err_zmii:
3392 	zmii_exit();
3393  err_mal:
3394 	mal_exit();
3395  err:
3396 	return rc;
3397 }
3398 
3399 static void __exit emac_exit(void)
3400 {
3401 	int i;
3402 
3403 	platform_driver_unregister(&emac_driver);
3404 
3405 	tah_exit();
3406 	rgmii_exit();
3407 	zmii_exit();
3408 	mal_exit();
3409 
3410 	/* Destroy EMAC boot list */
3411 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3412 		of_node_put(emac_boot_list[i]);
3413 }
3414 
3415 module_init(emac_init);
3416 module_exit(emac_exit);
3417