xref: /linux/drivers/net/ethernet/ibm/emac/core.c (revision 666ed8bfd1de3b091cf32ca03b651757dd86cfff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/ethernet/ibm/emac/core.c
4  *
5  * Driver for PowerPC 4xx on-chip ethernet controller.
6  *
7  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8  *                <benh@kernel.crashing.org>
9  *
10  * Based on the arch/ppc version of the driver:
11  *
12  * Copyright (c) 2004, 2005 Zultys Technologies.
13  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14  *
15  * Based on original work by
16  * 	Matt Porter <mporter@kernel.crashing.org>
17  *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
18  *      Armin Kuster <akuster@mvista.com>
19  * 	Johnnie Peters <jpeters@mvista.com>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/delay.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/crc32.h>
32 #include <linux/ethtool.h>
33 #include <linux/mii.h>
34 #include <linux/bitops.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 #include <linux/of_address.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_net.h>
40 #include <linux/of_mdio.h>
41 #include <linux/slab.h>
42 
43 #include <asm/processor.h>
44 #include <asm/io.h>
45 #include <asm/dma.h>
46 #include <linux/uaccess.h>
47 #include <asm/dcr.h>
48 #include <asm/dcr-regs.h>
49 
50 #include "core.h"
51 
52 /*
53  * Lack of dma_unmap_???? calls is intentional.
54  *
55  * API-correct usage requires additional support state information to be
56  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
57  * EMAC design (e.g. TX buffer passed from network stack can be split into
58  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
59  * maintaining such information will add additional overhead.
60  * Current DMA API implementation for 4xx processors only ensures cache coherency
61  * and dma_unmap_???? routines are empty and are likely to stay this way.
62  * I decided to omit dma_unmap_??? calls because I don't want to add additional
63  * complexity just for the sake of following some abstract API, when it doesn't
64  * add any real benefit to the driver. I understand that this decision maybe
65  * controversial, but I really tried to make code API-correct and efficient
66  * at the same time and didn't come up with code I liked :(.                --ebs
67  */
68 
69 #define DRV_NAME        "emac"
70 #define DRV_VERSION     "3.54"
71 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
72 
73 MODULE_DESCRIPTION(DRV_DESC);
74 MODULE_AUTHOR
75     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
76 MODULE_LICENSE("GPL");
77 
78 /* minimum number of free TX descriptors required to wake up TX process */
79 #define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
80 
81 /* If packet size is less than this number, we allocate small skb and copy packet
82  * contents into it instead of just sending original big skb up
83  */
84 #define EMAC_RX_COPY_THRESH		CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
85 
86 /* Since multiple EMACs share MDIO lines in various ways, we need
87  * to avoid re-using the same PHY ID in cases where the arch didn't
88  * setup precise phy_map entries
89  *
90  * XXX This is something that needs to be reworked as we can have multiple
91  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
92  * probably require in that case to have explicit PHY IDs in the device-tree
93  */
94 static u32 busy_phy_map;
95 static DEFINE_MUTEX(emac_phy_map_lock);
96 
97 /* This is the wait queue used to wait on any event related to probe, that
98  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
99  */
100 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
101 
102 /* Having stable interface names is a doomed idea. However, it would be nice
103  * if we didn't have completely random interface names at boot too :-) It's
104  * just a matter of making everybody's life easier. Since we are doing
105  * threaded probing, it's a bit harder though. The base idea here is that
106  * we make up a list of all emacs in the device-tree before we register the
107  * driver. Every emac will then wait for the previous one in the list to
108  * initialize before itself. We should also keep that list ordered by
109  * cell_index.
110  * That list is only 4 entries long, meaning that additional EMACs don't
111  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112  */
113 
114 #define EMAC_BOOT_LIST_SIZE	4
115 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
116 
117 /* How long should I wait for dependent devices ? */
118 #define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
119 
120 /* I don't want to litter system log with timeout errors
121  * when we have brain-damaged PHY.
122  */
123 static inline void emac_report_timeout_error(struct emac_instance *dev,
124 					     const char *error)
125 {
126 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
127 				  EMAC_FTR_460EX_PHY_CLK_FIX |
128 				  EMAC_FTR_440EP_PHY_CLK_FIX))
129 		DBG(dev, "%s" NL, error);
130 	else if (net_ratelimit())
131 		printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
132 }
133 
134 /* EMAC PHY clock workaround:
135  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
136  * which allows controlling each EMAC clock
137  */
138 static inline void emac_rx_clk_tx(struct emac_instance *dev)
139 {
140 #ifdef CONFIG_PPC_DCR_NATIVE
141 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
142 		dcri_clrset(SDR0, SDR0_MFR,
143 			    0, SDR0_MFR_ECS >> dev->cell_index);
144 #endif
145 }
146 
147 static inline void emac_rx_clk_default(struct emac_instance *dev)
148 {
149 #ifdef CONFIG_PPC_DCR_NATIVE
150 	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
151 		dcri_clrset(SDR0, SDR0_MFR,
152 			    SDR0_MFR_ECS >> dev->cell_index, 0);
153 #endif
154 }
155 
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON	HZ
158 #define PHY_POLL_LINK_OFF	(HZ / 5)
159 
160 /* Graceful stop timeouts in us.
161  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
162  */
163 #define STOP_TIMEOUT_10		1230
164 #define STOP_TIMEOUT_100	124
165 #define STOP_TIMEOUT_1000	13
166 #define STOP_TIMEOUT_1000_JUMBO	73
167 
168 static unsigned char default_mcast_addr[] = {
169 	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
170 };
171 
172 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
173 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
174 	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
175 	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
176 	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
177 	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
178 	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
179 	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
180 	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
181 	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
182 	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
183 	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
184 	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
185 	"tx_bd_excessive_collisions", "tx_bd_late_collision",
186 	"tx_bd_multple_collisions", "tx_bd_single_collision",
187 	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
188 	"tx_errors"
189 };
190 
191 static irqreturn_t emac_irq(int irq, void *dev_instance);
192 static void emac_clean_tx_ring(struct emac_instance *dev);
193 static void __emac_set_multicast_list(struct emac_instance *dev);
194 
195 static inline int emac_phy_supports_gige(int phy_mode)
196 {
197 	return  phy_interface_mode_is_rgmii(phy_mode) ||
198 		phy_mode == PHY_INTERFACE_MODE_GMII ||
199 		phy_mode == PHY_INTERFACE_MODE_SGMII ||
200 		phy_mode == PHY_INTERFACE_MODE_TBI ||
201 		phy_mode == PHY_INTERFACE_MODE_RTBI;
202 }
203 
204 static inline int emac_phy_gpcs(int phy_mode)
205 {
206 	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
207 		phy_mode == PHY_INTERFACE_MODE_TBI ||
208 		phy_mode == PHY_INTERFACE_MODE_RTBI;
209 }
210 
211 static inline void emac_tx_enable(struct emac_instance *dev)
212 {
213 	struct emac_regs __iomem *p = dev->emacp;
214 	u32 r;
215 
216 	DBG(dev, "tx_enable" NL);
217 
218 	r = in_be32(&p->mr0);
219 	if (!(r & EMAC_MR0_TXE))
220 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
221 }
222 
223 static void emac_tx_disable(struct emac_instance *dev)
224 {
225 	struct emac_regs __iomem *p = dev->emacp;
226 	u32 r;
227 
228 	DBG(dev, "tx_disable" NL);
229 
230 	r = in_be32(&p->mr0);
231 	if (r & EMAC_MR0_TXE) {
232 		int n = dev->stop_timeout;
233 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
234 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
235 			udelay(1);
236 			--n;
237 		}
238 		if (unlikely(!n))
239 			emac_report_timeout_error(dev, "TX disable timeout");
240 	}
241 }
242 
243 static void emac_rx_enable(struct emac_instance *dev)
244 {
245 	struct emac_regs __iomem *p = dev->emacp;
246 	u32 r;
247 
248 	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
249 		goto out;
250 
251 	DBG(dev, "rx_enable" NL);
252 
253 	r = in_be32(&p->mr0);
254 	if (!(r & EMAC_MR0_RXE)) {
255 		if (unlikely(!(r & EMAC_MR0_RXI))) {
256 			/* Wait if previous async disable is still in progress */
257 			int n = dev->stop_timeout;
258 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
259 				udelay(1);
260 				--n;
261 			}
262 			if (unlikely(!n))
263 				emac_report_timeout_error(dev,
264 							  "RX disable timeout");
265 		}
266 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
267 	}
268  out:
269 	;
270 }
271 
272 static void emac_rx_disable(struct emac_instance *dev)
273 {
274 	struct emac_regs __iomem *p = dev->emacp;
275 	u32 r;
276 
277 	DBG(dev, "rx_disable" NL);
278 
279 	r = in_be32(&p->mr0);
280 	if (r & EMAC_MR0_RXE) {
281 		int n = dev->stop_timeout;
282 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
283 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
284 			udelay(1);
285 			--n;
286 		}
287 		if (unlikely(!n))
288 			emac_report_timeout_error(dev, "RX disable timeout");
289 	}
290 }
291 
292 static inline void emac_netif_stop(struct emac_instance *dev)
293 {
294 	netif_tx_lock_bh(dev->ndev);
295 	netif_addr_lock(dev->ndev);
296 	dev->no_mcast = 1;
297 	netif_addr_unlock(dev->ndev);
298 	netif_tx_unlock_bh(dev->ndev);
299 	netif_trans_update(dev->ndev);	/* prevent tx timeout */
300 	mal_poll_disable(dev->mal, &dev->commac);
301 	netif_tx_disable(dev->ndev);
302 }
303 
304 static inline void emac_netif_start(struct emac_instance *dev)
305 {
306 	netif_tx_lock_bh(dev->ndev);
307 	netif_addr_lock(dev->ndev);
308 	dev->no_mcast = 0;
309 	if (dev->mcast_pending && netif_running(dev->ndev))
310 		__emac_set_multicast_list(dev);
311 	netif_addr_unlock(dev->ndev);
312 	netif_tx_unlock_bh(dev->ndev);
313 
314 	netif_wake_queue(dev->ndev);
315 
316 	/* NOTE: unconditional netif_wake_queue is only appropriate
317 	 * so long as all callers are assured to have free tx slots
318 	 * (taken from tg3... though the case where that is wrong is
319 	 *  not terribly harmful)
320 	 */
321 	mal_poll_enable(dev->mal, &dev->commac);
322 }
323 
324 static inline void emac_rx_disable_async(struct emac_instance *dev)
325 {
326 	struct emac_regs __iomem *p = dev->emacp;
327 	u32 r;
328 
329 	DBG(dev, "rx_disable_async" NL);
330 
331 	r = in_be32(&p->mr0);
332 	if (r & EMAC_MR0_RXE)
333 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
334 }
335 
336 static int emac_reset(struct emac_instance *dev)
337 {
338 	struct emac_regs __iomem *p = dev->emacp;
339 	int n = 20;
340 	bool __maybe_unused try_internal_clock = false;
341 
342 	DBG(dev, "reset" NL);
343 
344 	if (!dev->reset_failed) {
345 		/* 40x erratum suggests stopping RX channel before reset,
346 		 * we stop TX as well
347 		 */
348 		emac_rx_disable(dev);
349 		emac_tx_disable(dev);
350 	}
351 
352 #ifdef CONFIG_PPC_DCR_NATIVE
353 do_retry:
354 	/*
355 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
356 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
357 	 * Note: The PHY must provide a TX Clk in order to perform a soft reset
358 	 * of the EMAC. If none is present, select the internal clock
359 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
360 	 * After a soft reset, select the external clock.
361 	 *
362 	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
363 	 * ethernet cable is not attached. This causes the reset to timeout
364 	 * and the PHY detection code in emac_init_phy() is unable to
365 	 * communicate and detect the AR8035-A PHY. As a result, the emac
366 	 * driver bails out early and the user has no ethernet.
367 	 * In order to stay compatible with existing configurations, the
368 	 * driver will temporarily switch to the internal clock, after
369 	 * the first reset fails.
370 	 */
371 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
372 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
373 					   dev->phy_map == 0xffffffff)) {
374 			/* No PHY: select internal loop clock before reset */
375 			dcri_clrset(SDR0, SDR0_ETH_CFG,
376 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
377 		} else {
378 			/* PHY present: select external clock before reset */
379 			dcri_clrset(SDR0, SDR0_ETH_CFG,
380 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
381 		}
382 	}
383 #endif
384 
385 	out_be32(&p->mr0, EMAC_MR0_SRST);
386 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
387 		--n;
388 
389 #ifdef CONFIG_PPC_DCR_NATIVE
390 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
391 		if (!n && !try_internal_clock) {
392 			/* first attempt has timed out. */
393 			n = 20;
394 			try_internal_clock = true;
395 			goto do_retry;
396 		}
397 
398 		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
399 					   dev->phy_map == 0xffffffff)) {
400 			/* No PHY: restore external clock source after reset */
401 			dcri_clrset(SDR0, SDR0_ETH_CFG,
402 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
403 		}
404 	}
405 #endif
406 
407 	if (n) {
408 		dev->reset_failed = 0;
409 		return 0;
410 	} else {
411 		emac_report_timeout_error(dev, "reset timeout");
412 		dev->reset_failed = 1;
413 		return -ETIMEDOUT;
414 	}
415 }
416 
417 static void emac_hash_mc(struct emac_instance *dev)
418 {
419 	const int regs = EMAC_XAHT_REGS(dev);
420 	u32 *gaht_base = emac_gaht_base(dev);
421 	u32 gaht_temp[EMAC_XAHT_MAX_REGS];
422 	struct netdev_hw_addr *ha;
423 	int i;
424 
425 	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
426 
427 	memset(gaht_temp, 0, sizeof (gaht_temp));
428 
429 	netdev_for_each_mc_addr(ha, dev->ndev) {
430 		int slot, reg, mask;
431 		DBG2(dev, "mc %pM" NL, ha->addr);
432 
433 		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
434 					     ether_crc(ETH_ALEN, ha->addr));
435 		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
436 		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
437 
438 		gaht_temp[reg] |= mask;
439 	}
440 
441 	for (i = 0; i < regs; i++)
442 		out_be32(gaht_base + i, gaht_temp[i]);
443 }
444 
445 static inline u32 emac_iff2rmr(struct net_device *ndev)
446 {
447 	struct emac_instance *dev = netdev_priv(ndev);
448 	u32 r;
449 
450 	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
451 
452 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
453 	    r |= EMAC4_RMR_BASE;
454 	else
455 	    r |= EMAC_RMR_BASE;
456 
457 	if (ndev->flags & IFF_PROMISC)
458 		r |= EMAC_RMR_PME;
459 	else if (ndev->flags & IFF_ALLMULTI ||
460 			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
461 		r |= EMAC_RMR_PMME;
462 	else if (!netdev_mc_empty(ndev))
463 		r |= EMAC_RMR_MAE;
464 
465 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
466 		r &= ~EMAC4_RMR_MJS_MASK;
467 		r |= EMAC4_RMR_MJS(ndev->mtu);
468 	}
469 
470 	return r;
471 }
472 
473 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
474 {
475 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
476 
477 	DBG2(dev, "__emac_calc_base_mr1" NL);
478 
479 	switch(tx_size) {
480 	case 2048:
481 		ret |= EMAC_MR1_TFS_2K;
482 		break;
483 	default:
484 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
485 		       dev->ndev->name, tx_size);
486 	}
487 
488 	switch(rx_size) {
489 	case 16384:
490 		ret |= EMAC_MR1_RFS_16K;
491 		break;
492 	case 4096:
493 		ret |= EMAC_MR1_RFS_4K;
494 		break;
495 	default:
496 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
497 		       dev->ndev->name, rx_size);
498 	}
499 
500 	return ret;
501 }
502 
503 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
504 {
505 	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
506 		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
507 
508 	DBG2(dev, "__emac4_calc_base_mr1" NL);
509 
510 	switch(tx_size) {
511 	case 16384:
512 		ret |= EMAC4_MR1_TFS_16K;
513 		break;
514 	case 8192:
515 		ret |= EMAC4_MR1_TFS_8K;
516 		break;
517 	case 4096:
518 		ret |= EMAC4_MR1_TFS_4K;
519 		break;
520 	case 2048:
521 		ret |= EMAC4_MR1_TFS_2K;
522 		break;
523 	default:
524 		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
525 		       dev->ndev->name, tx_size);
526 	}
527 
528 	switch(rx_size) {
529 	case 16384:
530 		ret |= EMAC4_MR1_RFS_16K;
531 		break;
532 	case 8192:
533 		ret |= EMAC4_MR1_RFS_8K;
534 		break;
535 	case 4096:
536 		ret |= EMAC4_MR1_RFS_4K;
537 		break;
538 	case 2048:
539 		ret |= EMAC4_MR1_RFS_2K;
540 		break;
541 	default:
542 		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
543 		       dev->ndev->name, rx_size);
544 	}
545 
546 	return ret;
547 }
548 
549 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
550 {
551 	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
552 		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
553 		__emac_calc_base_mr1(dev, tx_size, rx_size);
554 }
555 
556 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
557 {
558 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
560 	else
561 		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
562 }
563 
564 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
565 				 unsigned int low, unsigned int high)
566 {
567 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
568 		return (low << 22) | ( (high & 0x3ff) << 6);
569 	else
570 		return (low << 23) | ( (high & 0x1ff) << 7);
571 }
572 
573 static int emac_configure(struct emac_instance *dev)
574 {
575 	struct emac_regs __iomem *p = dev->emacp;
576 	struct net_device *ndev = dev->ndev;
577 	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
578 	u32 r, mr1 = 0;
579 
580 	DBG(dev, "configure" NL);
581 
582 	if (!link) {
583 		out_be32(&p->mr1, in_be32(&p->mr1)
584 			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
585 		udelay(100);
586 	} else if (emac_reset(dev) < 0)
587 		return -ETIMEDOUT;
588 
589 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
590 		tah_reset(dev->tah_dev);
591 
592 	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
593 	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
594 
595 	/* Default fifo sizes */
596 	tx_size = dev->tx_fifo_size;
597 	rx_size = dev->rx_fifo_size;
598 
599 	/* No link, force loopback */
600 	if (!link)
601 		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
602 
603 	/* Check for full duplex */
604 	else if (dev->phy.duplex == DUPLEX_FULL)
605 		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
606 
607 	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
608 	dev->stop_timeout = STOP_TIMEOUT_10;
609 	switch (dev->phy.speed) {
610 	case SPEED_1000:
611 		if (emac_phy_gpcs(dev->phy.mode)) {
612 			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
613 				(dev->phy.gpcs_address != 0xffffffff) ?
614 				 dev->phy.gpcs_address : dev->phy.address);
615 
616 			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
617 			 * identify this GPCS PHY later.
618 			 */
619 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
620 		} else
621 			mr1 |= EMAC_MR1_MF_1000;
622 
623 		/* Extended fifo sizes */
624 		tx_size = dev->tx_fifo_size_gige;
625 		rx_size = dev->rx_fifo_size_gige;
626 
627 		if (dev->ndev->mtu > ETH_DATA_LEN) {
628 			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
629 				mr1 |= EMAC4_MR1_JPSM;
630 			else
631 				mr1 |= EMAC_MR1_JPSM;
632 			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
633 		} else
634 			dev->stop_timeout = STOP_TIMEOUT_1000;
635 		break;
636 	case SPEED_100:
637 		mr1 |= EMAC_MR1_MF_100;
638 		dev->stop_timeout = STOP_TIMEOUT_100;
639 		break;
640 	default: /* make gcc happy */
641 		break;
642 	}
643 
644 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
645 		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
646 				dev->phy.speed);
647 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
648 		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
649 
650 	/* on 40x erratum forces us to NOT use integrated flow control,
651 	 * let's hope it works on 44x ;)
652 	 */
653 	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
654 	    dev->phy.duplex == DUPLEX_FULL) {
655 		if (dev->phy.pause)
656 			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
657 		else if (dev->phy.asym_pause)
658 			mr1 |= EMAC_MR1_APP;
659 	}
660 
661 	/* Add base settings & fifo sizes & program MR1 */
662 	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
663 	out_be32(&p->mr1, mr1);
664 
665 	/* Set individual MAC address */
666 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
667 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
668 		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
669 		 ndev->dev_addr[5]);
670 
671 	/* VLAN Tag Protocol ID */
672 	out_be32(&p->vtpid, 0x8100);
673 
674 	/* Receive mode register */
675 	r = emac_iff2rmr(ndev);
676 	if (r & EMAC_RMR_MAE)
677 		emac_hash_mc(dev);
678 	out_be32(&p->rmr, r);
679 
680 	/* FIFOs thresholds */
681 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
683 			       tx_size / 2 / dev->fifo_entry_size);
684 	else
685 		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
686 			      tx_size / 2 / dev->fifo_entry_size);
687 	out_be32(&p->tmr1, r);
688 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
689 
690 	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
691 	   there should be still enough space in FIFO to allow the our link
692 	   partner time to process this frame and also time to send PAUSE
693 	   frame itself.
694 
695 	   Here is the worst case scenario for the RX FIFO "headroom"
696 	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
697 
698 	   1) One maximum-length frame on TX                    1522 bytes
699 	   2) One PAUSE frame time                                64 bytes
700 	   3) PAUSE frame decode time allowance                   64 bytes
701 	   4) One maximum-length frame on RX                    1522 bytes
702 	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
703 	   ----------
704 	   3187 bytes
705 
706 	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
707 	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
708 	 */
709 	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
710 			   rx_size / 4 / dev->fifo_entry_size);
711 	out_be32(&p->rwmr, r);
712 
713 	/* Set PAUSE timer to the maximum */
714 	out_be32(&p->ptr, 0xffff);
715 
716 	/* IRQ sources */
717 	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
718 		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
719 		EMAC_ISR_IRE | EMAC_ISR_TE;
720 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
721 	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
722 						  EMAC4_ISR_RXOE | */;
723 	out_be32(&p->iser,  r);
724 
725 	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
726 	if (emac_phy_gpcs(dev->phy.mode)) {
727 		if (dev->phy.gpcs_address != 0xffffffff)
728 			emac_mii_reset_gpcs(&dev->phy);
729 		else
730 			emac_mii_reset_phy(&dev->phy);
731 	}
732 
733 	return 0;
734 }
735 
736 static void emac_reinitialize(struct emac_instance *dev)
737 {
738 	DBG(dev, "reinitialize" NL);
739 
740 	emac_netif_stop(dev);
741 	if (!emac_configure(dev)) {
742 		emac_tx_enable(dev);
743 		emac_rx_enable(dev);
744 	}
745 	emac_netif_start(dev);
746 }
747 
748 static void emac_full_tx_reset(struct emac_instance *dev)
749 {
750 	DBG(dev, "full_tx_reset" NL);
751 
752 	emac_tx_disable(dev);
753 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
754 	emac_clean_tx_ring(dev);
755 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
756 
757 	emac_configure(dev);
758 
759 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
760 	emac_tx_enable(dev);
761 	emac_rx_enable(dev);
762 }
763 
764 static void emac_reset_work(struct work_struct *work)
765 {
766 	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
767 
768 	DBG(dev, "reset_work" NL);
769 
770 	mutex_lock(&dev->link_lock);
771 	if (dev->opened) {
772 		emac_netif_stop(dev);
773 		emac_full_tx_reset(dev);
774 		emac_netif_start(dev);
775 	}
776 	mutex_unlock(&dev->link_lock);
777 }
778 
779 static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
780 {
781 	struct emac_instance *dev = netdev_priv(ndev);
782 
783 	DBG(dev, "tx_timeout" NL);
784 
785 	schedule_work(&dev->reset_work);
786 }
787 
788 
789 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
790 {
791 	int done = !!(stacr & EMAC_STACR_OC);
792 
793 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
794 		done = !done;
795 
796 	return done;
797 };
798 
799 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
800 {
801 	struct emac_regs __iomem *p = dev->emacp;
802 	u32 r = 0;
803 	int n, err = -ETIMEDOUT;
804 
805 	mutex_lock(&dev->mdio_lock);
806 
807 	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
808 
809 	/* Enable proper MDIO port */
810 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
811 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
812 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
813 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
814 
815 	/* Wait for management interface to become idle */
816 	n = 20;
817 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
818 		udelay(1);
819 		if (!--n) {
820 			DBG2(dev, " -> timeout wait idle\n");
821 			goto bail;
822 		}
823 	}
824 
825 	/* Issue read command */
826 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
827 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
828 	else
829 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
830 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
831 		r |= EMAC_STACR_OC;
832 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
833 		r |= EMACX_STACR_STAC_READ;
834 	else
835 		r |= EMAC_STACR_STAC_READ;
836 	r |= (reg & EMAC_STACR_PRA_MASK)
837 		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
838 	out_be32(&p->stacr, r);
839 
840 	/* Wait for read to complete */
841 	n = 200;
842 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
843 		udelay(1);
844 		if (!--n) {
845 			DBG2(dev, " -> timeout wait complete\n");
846 			goto bail;
847 		}
848 	}
849 
850 	if (unlikely(r & EMAC_STACR_PHYE)) {
851 		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
852 		err = -EREMOTEIO;
853 		goto bail;
854 	}
855 
856 	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
857 
858 	DBG2(dev, "mdio_read -> %04x" NL, r);
859 	err = 0;
860  bail:
861 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
862 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
863 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
864 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
865 	mutex_unlock(&dev->mdio_lock);
866 
867 	return err == 0 ? r : err;
868 }
869 
870 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
871 			      u16 val)
872 {
873 	struct emac_regs __iomem *p = dev->emacp;
874 	u32 r = 0;
875 	int n;
876 
877 	mutex_lock(&dev->mdio_lock);
878 
879 	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
880 
881 	/* Enable proper MDIO port */
882 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
883 		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
884 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
885 		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
886 
887 	/* Wait for management interface to be idle */
888 	n = 20;
889 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
890 		udelay(1);
891 		if (!--n) {
892 			DBG2(dev, " -> timeout wait idle\n");
893 			goto bail;
894 		}
895 	}
896 
897 	/* Issue write command */
898 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
899 		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
900 	else
901 		r = EMAC_STACR_BASE(dev->opb_bus_freq);
902 	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
903 		r |= EMAC_STACR_OC;
904 	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
905 		r |= EMACX_STACR_STAC_WRITE;
906 	else
907 		r |= EMAC_STACR_STAC_WRITE;
908 	r |= (reg & EMAC_STACR_PRA_MASK) |
909 		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
910 		(val << EMAC_STACR_PHYD_SHIFT);
911 	out_be32(&p->stacr, r);
912 
913 	/* Wait for write to complete */
914 	n = 200;
915 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
916 		udelay(1);
917 		if (!--n) {
918 			DBG2(dev, " -> timeout wait complete\n");
919 			goto bail;
920 		}
921 	}
922  bail:
923 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
924 		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
925 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
926 		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
927 	mutex_unlock(&dev->mdio_lock);
928 }
929 
930 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
931 {
932 	struct emac_instance *dev = netdev_priv(ndev);
933 	int res;
934 
935 	res = __emac_mdio_read((dev->mdio_instance &&
936 				dev->phy.gpcs_address != id) ?
937 				dev->mdio_instance : dev,
938 			       (u8) id, (u8) reg);
939 	return res;
940 }
941 
942 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
943 {
944 	struct emac_instance *dev = netdev_priv(ndev);
945 
946 	__emac_mdio_write((dev->mdio_instance &&
947 			   dev->phy.gpcs_address != id) ?
948 			   dev->mdio_instance : dev,
949 			  (u8) id, (u8) reg, (u16) val);
950 }
951 
952 /* Tx lock BH */
953 static void __emac_set_multicast_list(struct emac_instance *dev)
954 {
955 	struct emac_regs __iomem *p = dev->emacp;
956 	u32 rmr = emac_iff2rmr(dev->ndev);
957 
958 	DBG(dev, "__multicast %08x" NL, rmr);
959 
960 	/* I decided to relax register access rules here to avoid
961 	 * full EMAC reset.
962 	 *
963 	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
964 	 * in MR1 register and do a full EMAC reset.
965 	 * One TX BD status update is delayed and, after EMAC reset, it
966 	 * never happens, resulting in TX hung (it'll be recovered by TX
967 	 * timeout handler eventually, but this is just gross).
968 	 * So we either have to do full TX reset or try to cheat here :)
969 	 *
970 	 * The only required change is to RX mode register, so I *think* all
971 	 * we need is just to stop RX channel. This seems to work on all
972 	 * tested SoCs.                                                --ebs
973 	 *
974 	 * If we need the full reset, we might just trigger the workqueue
975 	 * and do it async... a bit nasty but should work --BenH
976 	 */
977 	dev->mcast_pending = 0;
978 	emac_rx_disable(dev);
979 	if (rmr & EMAC_RMR_MAE)
980 		emac_hash_mc(dev);
981 	out_be32(&p->rmr, rmr);
982 	emac_rx_enable(dev);
983 }
984 
985 /* Tx lock BH */
986 static void emac_set_multicast_list(struct net_device *ndev)
987 {
988 	struct emac_instance *dev = netdev_priv(ndev);
989 
990 	DBG(dev, "multicast" NL);
991 
992 	BUG_ON(!netif_running(dev->ndev));
993 
994 	if (dev->no_mcast) {
995 		dev->mcast_pending = 1;
996 		return;
997 	}
998 
999 	mutex_lock(&dev->link_lock);
1000 	__emac_set_multicast_list(dev);
1001 	mutex_unlock(&dev->link_lock);
1002 }
1003 
1004 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1005 {
1006 	struct emac_instance *dev = netdev_priv(ndev);
1007 	struct sockaddr *addr = sa;
1008 	struct emac_regs __iomem *p = dev->emacp;
1009 
1010 	if (!is_valid_ether_addr(addr->sa_data))
1011 	       return -EADDRNOTAVAIL;
1012 
1013 	mutex_lock(&dev->link_lock);
1014 
1015 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1016 
1017 	emac_rx_disable(dev);
1018 	emac_tx_disable(dev);
1019 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1020 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1021 		(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1022 		ndev->dev_addr[5]);
1023 	emac_tx_enable(dev);
1024 	emac_rx_enable(dev);
1025 
1026 	mutex_unlock(&dev->link_lock);
1027 
1028 	return 0;
1029 }
1030 
1031 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1032 {
1033 	int rx_sync_size = emac_rx_sync_size(new_mtu);
1034 	int rx_skb_size = emac_rx_skb_size(new_mtu);
1035 	int i, ret = 0;
1036 	int mr1_jumbo_bit_change = 0;
1037 
1038 	mutex_lock(&dev->link_lock);
1039 	emac_netif_stop(dev);
1040 	emac_rx_disable(dev);
1041 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1042 
1043 	if (dev->rx_sg_skb) {
1044 		++dev->estats.rx_dropped_resize;
1045 		dev_kfree_skb(dev->rx_sg_skb);
1046 		dev->rx_sg_skb = NULL;
1047 	}
1048 
1049 	/* Make a first pass over RX ring and mark BDs ready, dropping
1050 	 * non-processed packets on the way. We need this as a separate pass
1051 	 * to simplify error recovery in the case of allocation failure later.
1052 	 */
1053 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1054 		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1055 			++dev->estats.rx_dropped_resize;
1056 
1057 		dev->rx_desc[i].data_len = 0;
1058 		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1059 		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1060 	}
1061 
1062 	/* Reallocate RX ring only if bigger skb buffers are required */
1063 	if (rx_skb_size <= dev->rx_skb_size)
1064 		goto skip;
1065 
1066 	/* Second pass, allocate new skbs */
1067 	for (i = 0; i < NUM_RX_BUFF; ++i) {
1068 		struct sk_buff *skb;
1069 
1070 		skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
1071 		if (!skb) {
1072 			ret = -ENOMEM;
1073 			goto oom;
1074 		}
1075 
1076 		BUG_ON(!dev->rx_skb[i]);
1077 		dev_kfree_skb(dev->rx_skb[i]);
1078 
1079 		dev->rx_desc[i].data_ptr =
1080 		    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1081 				   rx_sync_size, DMA_FROM_DEVICE)
1082 				   + NET_IP_ALIGN;
1083 		dev->rx_skb[i] = skb;
1084 	}
1085  skip:
1086 	/* Check if we need to change "Jumbo" bit in MR1 */
1087 	if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1088 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1089 				(dev->ndev->mtu > ETH_DATA_LEN);
1090 	} else {
1091 		mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1092 				(dev->ndev->mtu > ETH_DATA_LEN);
1093 	}
1094 
1095 	if (mr1_jumbo_bit_change) {
1096 		/* This is to prevent starting RX channel in emac_rx_enable() */
1097 		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1098 
1099 		dev->ndev->mtu = new_mtu;
1100 		emac_full_tx_reset(dev);
1101 	}
1102 
1103 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1104  oom:
1105 	/* Restart RX */
1106 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1107 	dev->rx_slot = 0;
1108 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1109 	emac_rx_enable(dev);
1110 	emac_netif_start(dev);
1111 	mutex_unlock(&dev->link_lock);
1112 
1113 	return ret;
1114 }
1115 
1116 /* Process ctx, rtnl_lock semaphore */
1117 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1118 {
1119 	struct emac_instance *dev = netdev_priv(ndev);
1120 	int ret = 0;
1121 
1122 	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1123 
1124 	if (netif_running(ndev)) {
1125 		/* Check if we really need to reinitialize RX ring */
1126 		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1127 			ret = emac_resize_rx_ring(dev, new_mtu);
1128 	}
1129 
1130 	if (!ret) {
1131 		ndev->mtu = new_mtu;
1132 		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1133 		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1134 	}
1135 
1136 	return ret;
1137 }
1138 
1139 static void emac_clean_tx_ring(struct emac_instance *dev)
1140 {
1141 	int i;
1142 
1143 	for (i = 0; i < NUM_TX_BUFF; ++i) {
1144 		if (dev->tx_skb[i]) {
1145 			dev_kfree_skb(dev->tx_skb[i]);
1146 			dev->tx_skb[i] = NULL;
1147 			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1148 				++dev->estats.tx_dropped;
1149 		}
1150 		dev->tx_desc[i].ctrl = 0;
1151 		dev->tx_desc[i].data_ptr = 0;
1152 	}
1153 }
1154 
1155 static void emac_clean_rx_ring(struct emac_instance *dev)
1156 {
1157 	int i;
1158 
1159 	for (i = 0; i < NUM_RX_BUFF; ++i)
1160 		if (dev->rx_skb[i]) {
1161 			dev->rx_desc[i].ctrl = 0;
1162 			dev_kfree_skb(dev->rx_skb[i]);
1163 			dev->rx_skb[i] = NULL;
1164 			dev->rx_desc[i].data_ptr = 0;
1165 		}
1166 
1167 	if (dev->rx_sg_skb) {
1168 		dev_kfree_skb(dev->rx_sg_skb);
1169 		dev->rx_sg_skb = NULL;
1170 	}
1171 }
1172 
1173 static int
1174 __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
1175 {
1176 	if (unlikely(!skb))
1177 		return -ENOMEM;
1178 
1179 	dev->rx_skb[slot] = skb;
1180 	dev->rx_desc[slot].data_len = 0;
1181 
1182 	dev->rx_desc[slot].data_ptr =
1183 	    dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1184 			   dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
1185 	wmb();
1186 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1187 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1188 
1189 	return 0;
1190 }
1191 
1192 static int
1193 emac_alloc_rx_skb(struct emac_instance *dev, int slot)
1194 {
1195 	struct sk_buff *skb;
1196 
1197 	skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
1198 					  GFP_KERNEL);
1199 
1200 	return __emac_prepare_rx_skb(skb, dev, slot);
1201 }
1202 
1203 static int
1204 emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
1205 {
1206 	struct sk_buff *skb;
1207 
1208 	skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
1209 
1210 	return __emac_prepare_rx_skb(skb, dev, slot);
1211 }
1212 
1213 static void emac_print_link_status(struct emac_instance *dev)
1214 {
1215 	if (netif_carrier_ok(dev->ndev))
1216 		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1217 		       dev->ndev->name, dev->phy.speed,
1218 		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1219 		       dev->phy.pause ? ", pause enabled" :
1220 		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1221 	else
1222 		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1223 }
1224 
1225 /* Process ctx, rtnl_lock semaphore */
1226 static int emac_open(struct net_device *ndev)
1227 {
1228 	struct emac_instance *dev = netdev_priv(ndev);
1229 	int err, i;
1230 
1231 	DBG(dev, "open" NL);
1232 
1233 	/* Setup error IRQ handler */
1234 	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1235 	if (err) {
1236 		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1237 		       ndev->name, dev->emac_irq);
1238 		return err;
1239 	}
1240 
1241 	/* Allocate RX ring */
1242 	for (i = 0; i < NUM_RX_BUFF; ++i)
1243 		if (emac_alloc_rx_skb(dev, i)) {
1244 			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1245 			       ndev->name);
1246 			goto oom;
1247 		}
1248 
1249 	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1250 	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1251 	dev->rx_sg_skb = NULL;
1252 
1253 	mutex_lock(&dev->link_lock);
1254 	dev->opened = 1;
1255 
1256 	/* Start PHY polling now.
1257 	 */
1258 	if (dev->phy.address >= 0) {
1259 		int link_poll_interval;
1260 		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1261 			dev->phy.def->ops->read_link(&dev->phy);
1262 			emac_rx_clk_default(dev);
1263 			netif_carrier_on(dev->ndev);
1264 			link_poll_interval = PHY_POLL_LINK_ON;
1265 		} else {
1266 			emac_rx_clk_tx(dev);
1267 			netif_carrier_off(dev->ndev);
1268 			link_poll_interval = PHY_POLL_LINK_OFF;
1269 		}
1270 		dev->link_polling = 1;
1271 		wmb();
1272 		schedule_delayed_work(&dev->link_work, link_poll_interval);
1273 		emac_print_link_status(dev);
1274 	} else
1275 		netif_carrier_on(dev->ndev);
1276 
1277 	/* Required for Pause packet support in EMAC */
1278 	dev_mc_add_global(ndev, default_mcast_addr);
1279 
1280 	emac_configure(dev);
1281 	mal_poll_add(dev->mal, &dev->commac);
1282 	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1283 	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1284 	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1285 	emac_tx_enable(dev);
1286 	emac_rx_enable(dev);
1287 	emac_netif_start(dev);
1288 
1289 	mutex_unlock(&dev->link_lock);
1290 
1291 	return 0;
1292  oom:
1293 	emac_clean_rx_ring(dev);
1294 	free_irq(dev->emac_irq, dev);
1295 
1296 	return -ENOMEM;
1297 }
1298 
1299 /* BHs disabled */
1300 #if 0
1301 static int emac_link_differs(struct emac_instance *dev)
1302 {
1303 	u32 r = in_be32(&dev->emacp->mr1);
1304 
1305 	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1306 	int speed, pause, asym_pause;
1307 
1308 	if (r & EMAC_MR1_MF_1000)
1309 		speed = SPEED_1000;
1310 	else if (r & EMAC_MR1_MF_100)
1311 		speed = SPEED_100;
1312 	else
1313 		speed = SPEED_10;
1314 
1315 	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1316 	case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1317 		pause = 1;
1318 		asym_pause = 0;
1319 		break;
1320 	case EMAC_MR1_APP:
1321 		pause = 0;
1322 		asym_pause = 1;
1323 		break;
1324 	default:
1325 		pause = asym_pause = 0;
1326 	}
1327 	return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1328 	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1329 }
1330 #endif
1331 
1332 static void emac_link_timer(struct work_struct *work)
1333 {
1334 	struct emac_instance *dev =
1335 		container_of(to_delayed_work(work),
1336 			     struct emac_instance, link_work);
1337 	int link_poll_interval;
1338 
1339 	mutex_lock(&dev->link_lock);
1340 	DBG2(dev, "link timer" NL);
1341 
1342 	if (!dev->opened)
1343 		goto bail;
1344 
1345 	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1346 		if (!netif_carrier_ok(dev->ndev)) {
1347 			emac_rx_clk_default(dev);
1348 			/* Get new link parameters */
1349 			dev->phy.def->ops->read_link(&dev->phy);
1350 
1351 			netif_carrier_on(dev->ndev);
1352 			emac_netif_stop(dev);
1353 			emac_full_tx_reset(dev);
1354 			emac_netif_start(dev);
1355 			emac_print_link_status(dev);
1356 		}
1357 		link_poll_interval = PHY_POLL_LINK_ON;
1358 	} else {
1359 		if (netif_carrier_ok(dev->ndev)) {
1360 			emac_rx_clk_tx(dev);
1361 			netif_carrier_off(dev->ndev);
1362 			netif_tx_disable(dev->ndev);
1363 			emac_reinitialize(dev);
1364 			emac_print_link_status(dev);
1365 		}
1366 		link_poll_interval = PHY_POLL_LINK_OFF;
1367 	}
1368 	schedule_delayed_work(&dev->link_work, link_poll_interval);
1369  bail:
1370 	mutex_unlock(&dev->link_lock);
1371 }
1372 
1373 static void emac_force_link_update(struct emac_instance *dev)
1374 {
1375 	netif_carrier_off(dev->ndev);
1376 	smp_rmb();
1377 	if (dev->link_polling) {
1378 		cancel_delayed_work_sync(&dev->link_work);
1379 		if (dev->link_polling)
1380 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1381 	}
1382 }
1383 
1384 /* Process ctx, rtnl_lock semaphore */
1385 static int emac_close(struct net_device *ndev)
1386 {
1387 	struct emac_instance *dev = netdev_priv(ndev);
1388 
1389 	DBG(dev, "close" NL);
1390 
1391 	if (dev->phy.address >= 0) {
1392 		dev->link_polling = 0;
1393 		cancel_delayed_work_sync(&dev->link_work);
1394 	}
1395 	mutex_lock(&dev->link_lock);
1396 	emac_netif_stop(dev);
1397 	dev->opened = 0;
1398 	mutex_unlock(&dev->link_lock);
1399 
1400 	emac_rx_disable(dev);
1401 	emac_tx_disable(dev);
1402 	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1403 	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1404 	mal_poll_del(dev->mal, &dev->commac);
1405 
1406 	emac_clean_tx_ring(dev);
1407 	emac_clean_rx_ring(dev);
1408 
1409 	free_irq(dev->emac_irq, dev);
1410 
1411 	netif_carrier_off(ndev);
1412 
1413 	return 0;
1414 }
1415 
1416 static inline u16 emac_tx_csum(struct emac_instance *dev,
1417 			       struct sk_buff *skb)
1418 {
1419 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1420 		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1421 		++dev->stats.tx_packets_csum;
1422 		return EMAC_TX_CTRL_TAH_CSUM;
1423 	}
1424 	return 0;
1425 }
1426 
1427 static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1428 {
1429 	struct emac_regs __iomem *p = dev->emacp;
1430 	struct net_device *ndev = dev->ndev;
1431 
1432 	/* Send the packet out. If the if makes a significant perf
1433 	 * difference, then we can store the TMR0 value in "dev"
1434 	 * instead
1435 	 */
1436 	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1437 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1438 	else
1439 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1440 
1441 	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1442 		netif_stop_queue(ndev);
1443 		DBG2(dev, "stopped TX queue" NL);
1444 	}
1445 
1446 	netif_trans_update(ndev);
1447 	++dev->stats.tx_packets;
1448 	dev->stats.tx_bytes += len;
1449 
1450 	return NETDEV_TX_OK;
1451 }
1452 
1453 /* Tx lock BH */
1454 static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1455 {
1456 	struct emac_instance *dev = netdev_priv(ndev);
1457 	unsigned int len = skb->len;
1458 	int slot;
1459 
1460 	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1461 	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1462 
1463 	slot = dev->tx_slot++;
1464 	if (dev->tx_slot == NUM_TX_BUFF) {
1465 		dev->tx_slot = 0;
1466 		ctrl |= MAL_TX_CTRL_WRAP;
1467 	}
1468 
1469 	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1470 
1471 	dev->tx_skb[slot] = skb;
1472 	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1473 						     skb->data, len,
1474 						     DMA_TO_DEVICE);
1475 	dev->tx_desc[slot].data_len = (u16) len;
1476 	wmb();
1477 	dev->tx_desc[slot].ctrl = ctrl;
1478 
1479 	return emac_xmit_finish(dev, len);
1480 }
1481 
1482 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1483 				  u32 pd, int len, int last, u16 base_ctrl)
1484 {
1485 	while (1) {
1486 		u16 ctrl = base_ctrl;
1487 		int chunk = min(len, MAL_MAX_TX_SIZE);
1488 		len -= chunk;
1489 
1490 		slot = (slot + 1) % NUM_TX_BUFF;
1491 
1492 		if (last && !len)
1493 			ctrl |= MAL_TX_CTRL_LAST;
1494 		if (slot == NUM_TX_BUFF - 1)
1495 			ctrl |= MAL_TX_CTRL_WRAP;
1496 
1497 		dev->tx_skb[slot] = NULL;
1498 		dev->tx_desc[slot].data_ptr = pd;
1499 		dev->tx_desc[slot].data_len = (u16) chunk;
1500 		dev->tx_desc[slot].ctrl = ctrl;
1501 		++dev->tx_cnt;
1502 
1503 		if (!len)
1504 			break;
1505 
1506 		pd += chunk;
1507 	}
1508 	return slot;
1509 }
1510 
1511 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1512 static netdev_tx_t
1513 emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1514 {
1515 	struct emac_instance *dev = netdev_priv(ndev);
1516 	int nr_frags = skb_shinfo(skb)->nr_frags;
1517 	int len = skb->len, chunk;
1518 	int slot, i;
1519 	u16 ctrl;
1520 	u32 pd;
1521 
1522 	/* This is common "fast" path */
1523 	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1524 		return emac_start_xmit(skb, ndev);
1525 
1526 	len -= skb->data_len;
1527 
1528 	/* Note, this is only an *estimation*, we can still run out of empty
1529 	 * slots because of the additional fragmentation into
1530 	 * MAL_MAX_TX_SIZE-sized chunks
1531 	 */
1532 	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1533 		goto stop_queue;
1534 
1535 	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1536 	    emac_tx_csum(dev, skb);
1537 	slot = dev->tx_slot;
1538 
1539 	/* skb data */
1540 	dev->tx_skb[slot] = NULL;
1541 	chunk = min(len, MAL_MAX_TX_SIZE);
1542 	dev->tx_desc[slot].data_ptr = pd =
1543 	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1544 	dev->tx_desc[slot].data_len = (u16) chunk;
1545 	len -= chunk;
1546 	if (unlikely(len))
1547 		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1548 				       ctrl);
1549 	/* skb fragments */
1550 	for (i = 0; i < nr_frags; ++i) {
1551 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1552 		len = skb_frag_size(frag);
1553 
1554 		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1555 			goto undo_frame;
1556 
1557 		pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1558 				      DMA_TO_DEVICE);
1559 
1560 		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1561 				       ctrl);
1562 	}
1563 
1564 	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1565 
1566 	/* Attach skb to the last slot so we don't release it too early */
1567 	dev->tx_skb[slot] = skb;
1568 
1569 	/* Send the packet out */
1570 	if (dev->tx_slot == NUM_TX_BUFF - 1)
1571 		ctrl |= MAL_TX_CTRL_WRAP;
1572 	wmb();
1573 	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1574 	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1575 
1576 	return emac_xmit_finish(dev, skb->len);
1577 
1578  undo_frame:
1579 	/* Well, too bad. Our previous estimation was overly optimistic.
1580 	 * Undo everything.
1581 	 */
1582 	while (slot != dev->tx_slot) {
1583 		dev->tx_desc[slot].ctrl = 0;
1584 		--dev->tx_cnt;
1585 		if (--slot < 0)
1586 			slot = NUM_TX_BUFF - 1;
1587 	}
1588 	++dev->estats.tx_undo;
1589 
1590  stop_queue:
1591 	netif_stop_queue(ndev);
1592 	DBG2(dev, "stopped TX queue" NL);
1593 	return NETDEV_TX_BUSY;
1594 }
1595 
1596 /* Tx lock BHs */
1597 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1598 {
1599 	struct emac_error_stats *st = &dev->estats;
1600 
1601 	DBG(dev, "BD TX error %04x" NL, ctrl);
1602 
1603 	++st->tx_bd_errors;
1604 	if (ctrl & EMAC_TX_ST_BFCS)
1605 		++st->tx_bd_bad_fcs;
1606 	if (ctrl & EMAC_TX_ST_LCS)
1607 		++st->tx_bd_carrier_loss;
1608 	if (ctrl & EMAC_TX_ST_ED)
1609 		++st->tx_bd_excessive_deferral;
1610 	if (ctrl & EMAC_TX_ST_EC)
1611 		++st->tx_bd_excessive_collisions;
1612 	if (ctrl & EMAC_TX_ST_LC)
1613 		++st->tx_bd_late_collision;
1614 	if (ctrl & EMAC_TX_ST_MC)
1615 		++st->tx_bd_multple_collisions;
1616 	if (ctrl & EMAC_TX_ST_SC)
1617 		++st->tx_bd_single_collision;
1618 	if (ctrl & EMAC_TX_ST_UR)
1619 		++st->tx_bd_underrun;
1620 	if (ctrl & EMAC_TX_ST_SQE)
1621 		++st->tx_bd_sqe;
1622 }
1623 
1624 static void emac_poll_tx(void *param)
1625 {
1626 	struct emac_instance *dev = param;
1627 	u32 bad_mask;
1628 
1629 	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1630 
1631 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1632 		bad_mask = EMAC_IS_BAD_TX_TAH;
1633 	else
1634 		bad_mask = EMAC_IS_BAD_TX;
1635 
1636 	netif_tx_lock_bh(dev->ndev);
1637 	if (dev->tx_cnt) {
1638 		u16 ctrl;
1639 		int slot = dev->ack_slot, n = 0;
1640 	again:
1641 		ctrl = dev->tx_desc[slot].ctrl;
1642 		if (!(ctrl & MAL_TX_CTRL_READY)) {
1643 			struct sk_buff *skb = dev->tx_skb[slot];
1644 			++n;
1645 
1646 			if (skb) {
1647 				dev_kfree_skb(skb);
1648 				dev->tx_skb[slot] = NULL;
1649 			}
1650 			slot = (slot + 1) % NUM_TX_BUFF;
1651 
1652 			if (unlikely(ctrl & bad_mask))
1653 				emac_parse_tx_error(dev, ctrl);
1654 
1655 			if (--dev->tx_cnt)
1656 				goto again;
1657 		}
1658 		if (n) {
1659 			dev->ack_slot = slot;
1660 			if (netif_queue_stopped(dev->ndev) &&
1661 			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1662 				netif_wake_queue(dev->ndev);
1663 
1664 			DBG2(dev, "tx %d pkts" NL, n);
1665 		}
1666 	}
1667 	netif_tx_unlock_bh(dev->ndev);
1668 }
1669 
1670 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1671 				       int len)
1672 {
1673 	struct sk_buff *skb = dev->rx_skb[slot];
1674 
1675 	DBG2(dev, "recycle %d %d" NL, slot, len);
1676 
1677 	if (len)
1678 		dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1679 			       SKB_DATA_ALIGN(len + NET_IP_ALIGN),
1680 			       DMA_FROM_DEVICE);
1681 
1682 	dev->rx_desc[slot].data_len = 0;
1683 	wmb();
1684 	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1685 	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1686 }
1687 
1688 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1689 {
1690 	struct emac_error_stats *st = &dev->estats;
1691 
1692 	DBG(dev, "BD RX error %04x" NL, ctrl);
1693 
1694 	++st->rx_bd_errors;
1695 	if (ctrl & EMAC_RX_ST_OE)
1696 		++st->rx_bd_overrun;
1697 	if (ctrl & EMAC_RX_ST_BP)
1698 		++st->rx_bd_bad_packet;
1699 	if (ctrl & EMAC_RX_ST_RP)
1700 		++st->rx_bd_runt_packet;
1701 	if (ctrl & EMAC_RX_ST_SE)
1702 		++st->rx_bd_short_event;
1703 	if (ctrl & EMAC_RX_ST_AE)
1704 		++st->rx_bd_alignment_error;
1705 	if (ctrl & EMAC_RX_ST_BFCS)
1706 		++st->rx_bd_bad_fcs;
1707 	if (ctrl & EMAC_RX_ST_PTL)
1708 		++st->rx_bd_packet_too_long;
1709 	if (ctrl & EMAC_RX_ST_ORE)
1710 		++st->rx_bd_out_of_range;
1711 	if (ctrl & EMAC_RX_ST_IRE)
1712 		++st->rx_bd_in_range;
1713 }
1714 
1715 static inline void emac_rx_csum(struct emac_instance *dev,
1716 				struct sk_buff *skb, u16 ctrl)
1717 {
1718 #ifdef CONFIG_IBM_EMAC_TAH
1719 	if (!ctrl && dev->tah_dev) {
1720 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1721 		++dev->stats.rx_packets_csum;
1722 	}
1723 #endif
1724 }
1725 
1726 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1727 {
1728 	if (likely(dev->rx_sg_skb != NULL)) {
1729 		int len = dev->rx_desc[slot].data_len;
1730 		int tot_len = dev->rx_sg_skb->len + len;
1731 
1732 		if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
1733 			++dev->estats.rx_dropped_mtu;
1734 			dev_kfree_skb(dev->rx_sg_skb);
1735 			dev->rx_sg_skb = NULL;
1736 		} else {
1737 			memcpy(skb_tail_pointer(dev->rx_sg_skb),
1738 					 dev->rx_skb[slot]->data, len);
1739 			skb_put(dev->rx_sg_skb, len);
1740 			emac_recycle_rx_skb(dev, slot, len);
1741 			return 0;
1742 		}
1743 	}
1744 	emac_recycle_rx_skb(dev, slot, 0);
1745 	return -1;
1746 }
1747 
1748 /* NAPI poll context */
1749 static int emac_poll_rx(void *param, int budget)
1750 {
1751 	struct emac_instance *dev = param;
1752 	int slot = dev->rx_slot, received = 0;
1753 
1754 	DBG2(dev, "poll_rx(%d)" NL, budget);
1755 
1756  again:
1757 	while (budget > 0) {
1758 		int len;
1759 		struct sk_buff *skb;
1760 		u16 ctrl = dev->rx_desc[slot].ctrl;
1761 
1762 		if (ctrl & MAL_RX_CTRL_EMPTY)
1763 			break;
1764 
1765 		skb = dev->rx_skb[slot];
1766 		mb();
1767 		len = dev->rx_desc[slot].data_len;
1768 
1769 		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1770 			goto sg;
1771 
1772 		ctrl &= EMAC_BAD_RX_MASK;
1773 		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1774 			emac_parse_rx_error(dev, ctrl);
1775 			++dev->estats.rx_dropped_error;
1776 			emac_recycle_rx_skb(dev, slot, 0);
1777 			len = 0;
1778 			goto next;
1779 		}
1780 
1781 		if (len < ETH_HLEN) {
1782 			++dev->estats.rx_dropped_stack;
1783 			emac_recycle_rx_skb(dev, slot, len);
1784 			goto next;
1785 		}
1786 
1787 		if (len && len < EMAC_RX_COPY_THRESH) {
1788 			struct sk_buff *copy_skb;
1789 
1790 			copy_skb = napi_alloc_skb(&dev->mal->napi, len);
1791 			if (unlikely(!copy_skb))
1792 				goto oom;
1793 
1794 			memcpy(copy_skb->data - NET_IP_ALIGN,
1795 			       skb->data - NET_IP_ALIGN,
1796 			       len + NET_IP_ALIGN);
1797 			emac_recycle_rx_skb(dev, slot, len);
1798 			skb = copy_skb;
1799 		} else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
1800 			goto oom;
1801 
1802 		skb_put(skb, len);
1803 	push_packet:
1804 		skb->protocol = eth_type_trans(skb, dev->ndev);
1805 		emac_rx_csum(dev, skb, ctrl);
1806 
1807 		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1808 			++dev->estats.rx_dropped_stack;
1809 	next:
1810 		++dev->stats.rx_packets;
1811 	skip:
1812 		dev->stats.rx_bytes += len;
1813 		slot = (slot + 1) % NUM_RX_BUFF;
1814 		--budget;
1815 		++received;
1816 		continue;
1817 	sg:
1818 		if (ctrl & MAL_RX_CTRL_FIRST) {
1819 			BUG_ON(dev->rx_sg_skb);
1820 			if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
1821 				DBG(dev, "rx OOM %d" NL, slot);
1822 				++dev->estats.rx_dropped_oom;
1823 				emac_recycle_rx_skb(dev, slot, 0);
1824 			} else {
1825 				dev->rx_sg_skb = skb;
1826 				skb_put(skb, len);
1827 			}
1828 		} else if (!emac_rx_sg_append(dev, slot) &&
1829 			   (ctrl & MAL_RX_CTRL_LAST)) {
1830 
1831 			skb = dev->rx_sg_skb;
1832 			dev->rx_sg_skb = NULL;
1833 
1834 			ctrl &= EMAC_BAD_RX_MASK;
1835 			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1836 				emac_parse_rx_error(dev, ctrl);
1837 				++dev->estats.rx_dropped_error;
1838 				dev_kfree_skb(skb);
1839 				len = 0;
1840 			} else
1841 				goto push_packet;
1842 		}
1843 		goto skip;
1844 	oom:
1845 		DBG(dev, "rx OOM %d" NL, slot);
1846 		/* Drop the packet and recycle skb */
1847 		++dev->estats.rx_dropped_oom;
1848 		emac_recycle_rx_skb(dev, slot, 0);
1849 		goto next;
1850 	}
1851 
1852 	if (received) {
1853 		DBG2(dev, "rx %d BDs" NL, received);
1854 		dev->rx_slot = slot;
1855 	}
1856 
1857 	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1858 		mb();
1859 		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1860 			DBG2(dev, "rx restart" NL);
1861 			received = 0;
1862 			goto again;
1863 		}
1864 
1865 		if (dev->rx_sg_skb) {
1866 			DBG2(dev, "dropping partial rx packet" NL);
1867 			++dev->estats.rx_dropped_error;
1868 			dev_kfree_skb(dev->rx_sg_skb);
1869 			dev->rx_sg_skb = NULL;
1870 		}
1871 
1872 		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1873 		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1874 		emac_rx_enable(dev);
1875 		dev->rx_slot = 0;
1876 	}
1877 	return received;
1878 }
1879 
1880 /* NAPI poll context */
1881 static int emac_peek_rx(void *param)
1882 {
1883 	struct emac_instance *dev = param;
1884 
1885 	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1886 }
1887 
1888 /* NAPI poll context */
1889 static int emac_peek_rx_sg(void *param)
1890 {
1891 	struct emac_instance *dev = param;
1892 
1893 	int slot = dev->rx_slot;
1894 	while (1) {
1895 		u16 ctrl = dev->rx_desc[slot].ctrl;
1896 		if (ctrl & MAL_RX_CTRL_EMPTY)
1897 			return 0;
1898 		else if (ctrl & MAL_RX_CTRL_LAST)
1899 			return 1;
1900 
1901 		slot = (slot + 1) % NUM_RX_BUFF;
1902 
1903 		/* I'm just being paranoid here :) */
1904 		if (unlikely(slot == dev->rx_slot))
1905 			return 0;
1906 	}
1907 }
1908 
1909 /* Hard IRQ */
1910 static void emac_rxde(void *param)
1911 {
1912 	struct emac_instance *dev = param;
1913 
1914 	++dev->estats.rx_stopped;
1915 	emac_rx_disable_async(dev);
1916 }
1917 
1918 /* Hard IRQ */
1919 static irqreturn_t emac_irq(int irq, void *dev_instance)
1920 {
1921 	struct emac_instance *dev = dev_instance;
1922 	struct emac_regs __iomem *p = dev->emacp;
1923 	struct emac_error_stats *st = &dev->estats;
1924 	u32 isr;
1925 
1926 	spin_lock(&dev->lock);
1927 
1928 	isr = in_be32(&p->isr);
1929 	out_be32(&p->isr, isr);
1930 
1931 	DBG(dev, "isr = %08x" NL, isr);
1932 
1933 	if (isr & EMAC4_ISR_TXPE)
1934 		++st->tx_parity;
1935 	if (isr & EMAC4_ISR_RXPE)
1936 		++st->rx_parity;
1937 	if (isr & EMAC4_ISR_TXUE)
1938 		++st->tx_underrun;
1939 	if (isr & EMAC4_ISR_RXOE)
1940 		++st->rx_fifo_overrun;
1941 	if (isr & EMAC_ISR_OVR)
1942 		++st->rx_overrun;
1943 	if (isr & EMAC_ISR_BP)
1944 		++st->rx_bad_packet;
1945 	if (isr & EMAC_ISR_RP)
1946 		++st->rx_runt_packet;
1947 	if (isr & EMAC_ISR_SE)
1948 		++st->rx_short_event;
1949 	if (isr & EMAC_ISR_ALE)
1950 		++st->rx_alignment_error;
1951 	if (isr & EMAC_ISR_BFCS)
1952 		++st->rx_bad_fcs;
1953 	if (isr & EMAC_ISR_PTLE)
1954 		++st->rx_packet_too_long;
1955 	if (isr & EMAC_ISR_ORE)
1956 		++st->rx_out_of_range;
1957 	if (isr & EMAC_ISR_IRE)
1958 		++st->rx_in_range;
1959 	if (isr & EMAC_ISR_SQE)
1960 		++st->tx_sqe;
1961 	if (isr & EMAC_ISR_TE)
1962 		++st->tx_errors;
1963 
1964 	spin_unlock(&dev->lock);
1965 
1966 	return IRQ_HANDLED;
1967 }
1968 
1969 static struct net_device_stats *emac_stats(struct net_device *ndev)
1970 {
1971 	struct emac_instance *dev = netdev_priv(ndev);
1972 	struct emac_stats *st = &dev->stats;
1973 	struct emac_error_stats *est = &dev->estats;
1974 	struct net_device_stats *nst = &ndev->stats;
1975 	unsigned long flags;
1976 
1977 	DBG2(dev, "stats" NL);
1978 
1979 	/* Compute "legacy" statistics */
1980 	spin_lock_irqsave(&dev->lock, flags);
1981 	nst->rx_packets = (unsigned long)st->rx_packets;
1982 	nst->rx_bytes = (unsigned long)st->rx_bytes;
1983 	nst->tx_packets = (unsigned long)st->tx_packets;
1984 	nst->tx_bytes = (unsigned long)st->tx_bytes;
1985 	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1986 					  est->rx_dropped_error +
1987 					  est->rx_dropped_resize +
1988 					  est->rx_dropped_mtu);
1989 	nst->tx_dropped = (unsigned long)est->tx_dropped;
1990 
1991 	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1992 	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1993 					      est->rx_fifo_overrun +
1994 					      est->rx_overrun);
1995 	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1996 					       est->rx_alignment_error);
1997 	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1998 					     est->rx_bad_fcs);
1999 	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
2000 						est->rx_bd_short_event +
2001 						est->rx_bd_packet_too_long +
2002 						est->rx_bd_out_of_range +
2003 						est->rx_bd_in_range +
2004 						est->rx_runt_packet +
2005 						est->rx_short_event +
2006 						est->rx_packet_too_long +
2007 						est->rx_out_of_range +
2008 						est->rx_in_range);
2009 
2010 	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
2011 	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
2012 					      est->tx_underrun);
2013 	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
2014 	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
2015 					  est->tx_bd_excessive_collisions +
2016 					  est->tx_bd_late_collision +
2017 					  est->tx_bd_multple_collisions);
2018 	spin_unlock_irqrestore(&dev->lock, flags);
2019 	return nst;
2020 }
2021 
2022 static struct mal_commac_ops emac_commac_ops = {
2023 	.poll_tx = &emac_poll_tx,
2024 	.poll_rx = &emac_poll_rx,
2025 	.peek_rx = &emac_peek_rx,
2026 	.rxde = &emac_rxde,
2027 };
2028 
2029 static struct mal_commac_ops emac_commac_sg_ops = {
2030 	.poll_tx = &emac_poll_tx,
2031 	.poll_rx = &emac_poll_rx,
2032 	.peek_rx = &emac_peek_rx_sg,
2033 	.rxde = &emac_rxde,
2034 };
2035 
2036 /* Ethtool support */
2037 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2038 					   struct ethtool_link_ksettings *cmd)
2039 {
2040 	struct emac_instance *dev = netdev_priv(ndev);
2041 	u32 supported, advertising;
2042 
2043 	supported = dev->phy.features;
2044 	cmd->base.port = PORT_MII;
2045 	cmd->base.phy_address = dev->phy.address;
2046 
2047 	mutex_lock(&dev->link_lock);
2048 	advertising = dev->phy.advertising;
2049 	cmd->base.autoneg = dev->phy.autoneg;
2050 	cmd->base.speed = dev->phy.speed;
2051 	cmd->base.duplex = dev->phy.duplex;
2052 	mutex_unlock(&dev->link_lock);
2053 
2054 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2055 						supported);
2056 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2057 						advertising);
2058 
2059 	return 0;
2060 }
2061 
2062 static int
2063 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2064 				const struct ethtool_link_ksettings *cmd)
2065 {
2066 	struct emac_instance *dev = netdev_priv(ndev);
2067 	u32 f = dev->phy.features;
2068 	u32 advertising;
2069 
2070 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2071 						cmd->link_modes.advertising);
2072 
2073 	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2074 	    cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2075 
2076 	/* Basic sanity checks */
2077 	if (dev->phy.address < 0)
2078 		return -EOPNOTSUPP;
2079 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2080 	    cmd->base.autoneg != AUTONEG_DISABLE)
2081 		return -EINVAL;
2082 	if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2083 		return -EINVAL;
2084 	if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2085 		return -EINVAL;
2086 
2087 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
2088 		switch (cmd->base.speed) {
2089 		case SPEED_10:
2090 			if (cmd->base.duplex == DUPLEX_HALF &&
2091 			    !(f & SUPPORTED_10baseT_Half))
2092 				return -EINVAL;
2093 			if (cmd->base.duplex == DUPLEX_FULL &&
2094 			    !(f & SUPPORTED_10baseT_Full))
2095 				return -EINVAL;
2096 			break;
2097 		case SPEED_100:
2098 			if (cmd->base.duplex == DUPLEX_HALF &&
2099 			    !(f & SUPPORTED_100baseT_Half))
2100 				return -EINVAL;
2101 			if (cmd->base.duplex == DUPLEX_FULL &&
2102 			    !(f & SUPPORTED_100baseT_Full))
2103 				return -EINVAL;
2104 			break;
2105 		case SPEED_1000:
2106 			if (cmd->base.duplex == DUPLEX_HALF &&
2107 			    !(f & SUPPORTED_1000baseT_Half))
2108 				return -EINVAL;
2109 			if (cmd->base.duplex == DUPLEX_FULL &&
2110 			    !(f & SUPPORTED_1000baseT_Full))
2111 				return -EINVAL;
2112 			break;
2113 		default:
2114 			return -EINVAL;
2115 		}
2116 
2117 		mutex_lock(&dev->link_lock);
2118 		dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2119 						cmd->base.duplex);
2120 		mutex_unlock(&dev->link_lock);
2121 
2122 	} else {
2123 		if (!(f & SUPPORTED_Autoneg))
2124 			return -EINVAL;
2125 
2126 		mutex_lock(&dev->link_lock);
2127 		dev->phy.def->ops->setup_aneg(&dev->phy,
2128 					      (advertising & f) |
2129 					      (dev->phy.advertising &
2130 					       (ADVERTISED_Pause |
2131 						ADVERTISED_Asym_Pause)));
2132 		mutex_unlock(&dev->link_lock);
2133 	}
2134 	emac_force_link_update(dev);
2135 
2136 	return 0;
2137 }
2138 
2139 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2140 				       struct ethtool_ringparam *rp)
2141 {
2142 	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2143 	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2144 }
2145 
2146 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2147 					struct ethtool_pauseparam *pp)
2148 {
2149 	struct emac_instance *dev = netdev_priv(ndev);
2150 
2151 	mutex_lock(&dev->link_lock);
2152 	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2153 	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2154 		pp->autoneg = 1;
2155 
2156 	if (dev->phy.duplex == DUPLEX_FULL) {
2157 		if (dev->phy.pause)
2158 			pp->rx_pause = pp->tx_pause = 1;
2159 		else if (dev->phy.asym_pause)
2160 			pp->tx_pause = 1;
2161 	}
2162 	mutex_unlock(&dev->link_lock);
2163 }
2164 
2165 static int emac_get_regs_len(struct emac_instance *dev)
2166 {
2167 		return sizeof(struct emac_ethtool_regs_subhdr) +
2168 			sizeof(struct emac_regs);
2169 }
2170 
2171 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2172 {
2173 	struct emac_instance *dev = netdev_priv(ndev);
2174 	int size;
2175 
2176 	size = sizeof(struct emac_ethtool_regs_hdr) +
2177 		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2178 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2179 		size += zmii_get_regs_len(dev->zmii_dev);
2180 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2181 		size += rgmii_get_regs_len(dev->rgmii_dev);
2182 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2183 		size += tah_get_regs_len(dev->tah_dev);
2184 
2185 	return size;
2186 }
2187 
2188 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2189 {
2190 	struct emac_ethtool_regs_subhdr *hdr = buf;
2191 
2192 	hdr->index = dev->cell_index;
2193 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2194 		hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2195 	} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2196 		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2197 	} else {
2198 		hdr->version = EMAC_ETHTOOL_REGS_VER;
2199 	}
2200 	memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2201 	return (void *)(hdr + 1) + sizeof(struct emac_regs);
2202 }
2203 
2204 static void emac_ethtool_get_regs(struct net_device *ndev,
2205 				  struct ethtool_regs *regs, void *buf)
2206 {
2207 	struct emac_instance *dev = netdev_priv(ndev);
2208 	struct emac_ethtool_regs_hdr *hdr = buf;
2209 
2210 	hdr->components = 0;
2211 	buf = hdr + 1;
2212 
2213 	buf = mal_dump_regs(dev->mal, buf);
2214 	buf = emac_dump_regs(dev, buf);
2215 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2216 		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2217 		buf = zmii_dump_regs(dev->zmii_dev, buf);
2218 	}
2219 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2220 		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2221 		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2222 	}
2223 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2224 		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2225 		buf = tah_dump_regs(dev->tah_dev, buf);
2226 	}
2227 }
2228 
2229 static int emac_ethtool_nway_reset(struct net_device *ndev)
2230 {
2231 	struct emac_instance *dev = netdev_priv(ndev);
2232 	int res = 0;
2233 
2234 	DBG(dev, "nway_reset" NL);
2235 
2236 	if (dev->phy.address < 0)
2237 		return -EOPNOTSUPP;
2238 
2239 	mutex_lock(&dev->link_lock);
2240 	if (!dev->phy.autoneg) {
2241 		res = -EINVAL;
2242 		goto out;
2243 	}
2244 
2245 	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2246  out:
2247 	mutex_unlock(&dev->link_lock);
2248 	emac_force_link_update(dev);
2249 	return res;
2250 }
2251 
2252 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2253 {
2254 	if (stringset == ETH_SS_STATS)
2255 		return EMAC_ETHTOOL_STATS_COUNT;
2256 	else
2257 		return -EINVAL;
2258 }
2259 
2260 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2261 				     u8 * buf)
2262 {
2263 	if (stringset == ETH_SS_STATS)
2264 		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2265 }
2266 
2267 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2268 					   struct ethtool_stats *estats,
2269 					   u64 * tmp_stats)
2270 {
2271 	struct emac_instance *dev = netdev_priv(ndev);
2272 
2273 	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2274 	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2275 	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2276 }
2277 
2278 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2279 				     struct ethtool_drvinfo *info)
2280 {
2281 	struct emac_instance *dev = netdev_priv(ndev);
2282 
2283 	strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2284 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2285 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2286 		 dev->cell_index, dev->ofdev->dev.of_node);
2287 }
2288 
2289 static const struct ethtool_ops emac_ethtool_ops = {
2290 	.get_drvinfo = emac_ethtool_get_drvinfo,
2291 
2292 	.get_regs_len = emac_ethtool_get_regs_len,
2293 	.get_regs = emac_ethtool_get_regs,
2294 
2295 	.nway_reset = emac_ethtool_nway_reset,
2296 
2297 	.get_ringparam = emac_ethtool_get_ringparam,
2298 	.get_pauseparam = emac_ethtool_get_pauseparam,
2299 
2300 	.get_strings = emac_ethtool_get_strings,
2301 	.get_sset_count = emac_ethtool_get_sset_count,
2302 	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2303 
2304 	.get_link = ethtool_op_get_link,
2305 	.get_link_ksettings = emac_ethtool_get_link_ksettings,
2306 	.set_link_ksettings = emac_ethtool_set_link_ksettings,
2307 };
2308 
2309 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2310 {
2311 	struct emac_instance *dev = netdev_priv(ndev);
2312 	struct mii_ioctl_data *data = if_mii(rq);
2313 
2314 	DBG(dev, "ioctl %08x" NL, cmd);
2315 
2316 	if (dev->phy.address < 0)
2317 		return -EOPNOTSUPP;
2318 
2319 	switch (cmd) {
2320 	case SIOCGMIIPHY:
2321 		data->phy_id = dev->phy.address;
2322 		/* Fall through */
2323 	case SIOCGMIIREG:
2324 		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2325 					       data->reg_num);
2326 		return 0;
2327 
2328 	case SIOCSMIIREG:
2329 		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2330 				data->val_in);
2331 		return 0;
2332 	default:
2333 		return -EOPNOTSUPP;
2334 	}
2335 }
2336 
2337 struct emac_depentry {
2338 	u32			phandle;
2339 	struct device_node	*node;
2340 	struct platform_device	*ofdev;
2341 	void			*drvdata;
2342 };
2343 
2344 #define	EMAC_DEP_MAL_IDX	0
2345 #define	EMAC_DEP_ZMII_IDX	1
2346 #define	EMAC_DEP_RGMII_IDX	2
2347 #define	EMAC_DEP_TAH_IDX	3
2348 #define	EMAC_DEP_MDIO_IDX	4
2349 #define	EMAC_DEP_PREV_IDX	5
2350 #define	EMAC_DEP_COUNT		6
2351 
2352 static int emac_check_deps(struct emac_instance *dev,
2353 			   struct emac_depentry *deps)
2354 {
2355 	int i, there = 0;
2356 	struct device_node *np;
2357 
2358 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2359 		/* no dependency on that item, allright */
2360 		if (deps[i].phandle == 0) {
2361 			there++;
2362 			continue;
2363 		}
2364 		/* special case for blist as the dependency might go away */
2365 		if (i == EMAC_DEP_PREV_IDX) {
2366 			np = *(dev->blist - 1);
2367 			if (np == NULL) {
2368 				deps[i].phandle = 0;
2369 				there++;
2370 				continue;
2371 			}
2372 			if (deps[i].node == NULL)
2373 				deps[i].node = of_node_get(np);
2374 		}
2375 		if (deps[i].node == NULL)
2376 			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2377 		if (deps[i].node == NULL)
2378 			continue;
2379 		if (deps[i].ofdev == NULL)
2380 			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2381 		if (deps[i].ofdev == NULL)
2382 			continue;
2383 		if (deps[i].drvdata == NULL)
2384 			deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2385 		if (deps[i].drvdata != NULL)
2386 			there++;
2387 	}
2388 	return there == EMAC_DEP_COUNT;
2389 }
2390 
2391 static void emac_put_deps(struct emac_instance *dev)
2392 {
2393 	of_dev_put(dev->mal_dev);
2394 	of_dev_put(dev->zmii_dev);
2395 	of_dev_put(dev->rgmii_dev);
2396 	of_dev_put(dev->mdio_dev);
2397 	of_dev_put(dev->tah_dev);
2398 }
2399 
2400 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2401 			      void *data)
2402 {
2403 	/* We are only intereted in device addition */
2404 	if (action == BUS_NOTIFY_BOUND_DRIVER)
2405 		wake_up_all(&emac_probe_wait);
2406 	return 0;
2407 }
2408 
2409 static struct notifier_block emac_of_bus_notifier = {
2410 	.notifier_call = emac_of_bus_notify
2411 };
2412 
2413 static int emac_wait_deps(struct emac_instance *dev)
2414 {
2415 	struct emac_depentry deps[EMAC_DEP_COUNT];
2416 	int i, err;
2417 
2418 	memset(&deps, 0, sizeof(deps));
2419 
2420 	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2421 	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2422 	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2423 	if (dev->tah_ph)
2424 		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2425 	if (dev->mdio_ph)
2426 		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2427 	if (dev->blist && dev->blist > emac_boot_list)
2428 		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2429 	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2430 	wait_event_timeout(emac_probe_wait,
2431 			   emac_check_deps(dev, deps),
2432 			   EMAC_PROBE_DEP_TIMEOUT);
2433 	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2434 	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2435 	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2436 		of_node_put(deps[i].node);
2437 		if (err)
2438 			of_dev_put(deps[i].ofdev);
2439 	}
2440 	if (err == 0) {
2441 		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2442 		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2443 		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2444 		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2445 		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2446 	}
2447 	of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2448 	return err;
2449 }
2450 
2451 static int emac_read_uint_prop(struct device_node *np, const char *name,
2452 			       u32 *val, int fatal)
2453 {
2454 	int len;
2455 	const u32 *prop = of_get_property(np, name, &len);
2456 	if (prop == NULL || len < sizeof(u32)) {
2457 		if (fatal)
2458 			printk(KERN_ERR "%pOF: missing %s property\n",
2459 			       np, name);
2460 		return -ENODEV;
2461 	}
2462 	*val = *prop;
2463 	return 0;
2464 }
2465 
2466 static void emac_adjust_link(struct net_device *ndev)
2467 {
2468 	struct emac_instance *dev = netdev_priv(ndev);
2469 	struct phy_device *phy = dev->phy_dev;
2470 
2471 	dev->phy.autoneg = phy->autoneg;
2472 	dev->phy.speed = phy->speed;
2473 	dev->phy.duplex = phy->duplex;
2474 	dev->phy.pause = phy->pause;
2475 	dev->phy.asym_pause = phy->asym_pause;
2476 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2477 						phy->advertising);
2478 }
2479 
2480 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2481 {
2482 	int ret = emac_mdio_read(bus->priv, addr, regnum);
2483 	/* This is a workaround for powered down ports/phys.
2484 	 * In the wild, this was seen on the Cisco Meraki MX60(W).
2485 	 * This hardware disables ports as part of the handoff
2486 	 * procedure. Accessing the ports will lead to errors
2487 	 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2488 	 */
2489 	return ret < 0 ? 0xffff : ret;
2490 }
2491 
2492 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2493 			      int regnum, u16 val)
2494 {
2495 	emac_mdio_write(bus->priv, addr, regnum, val);
2496 	return 0;
2497 }
2498 
2499 static int emac_mii_bus_reset(struct mii_bus *bus)
2500 {
2501 	struct emac_instance *dev = netdev_priv(bus->priv);
2502 
2503 	return emac_reset(dev);
2504 }
2505 
2506 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2507 				    struct phy_device *phy_dev)
2508 {
2509 	phy_dev->autoneg = phy->autoneg;
2510 	phy_dev->speed = phy->speed;
2511 	phy_dev->duplex = phy->duplex;
2512 	ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2513 						phy->advertising);
2514 	return phy_start_aneg(phy_dev);
2515 }
2516 
2517 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2518 {
2519 	struct net_device *ndev = phy->dev;
2520 	struct emac_instance *dev = netdev_priv(ndev);
2521 
2522 	phy->autoneg = AUTONEG_ENABLE;
2523 	phy->advertising = advertise;
2524 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2525 }
2526 
2527 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2528 {
2529 	struct net_device *ndev = phy->dev;
2530 	struct emac_instance *dev = netdev_priv(ndev);
2531 
2532 	phy->autoneg = AUTONEG_DISABLE;
2533 	phy->speed = speed;
2534 	phy->duplex = fd;
2535 	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2536 }
2537 
2538 static int emac_mdio_poll_link(struct mii_phy *phy)
2539 {
2540 	struct net_device *ndev = phy->dev;
2541 	struct emac_instance *dev = netdev_priv(ndev);
2542 	int res;
2543 
2544 	res = phy_read_status(dev->phy_dev);
2545 	if (res) {
2546 		dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2547 		return ethtool_op_get_link(ndev);
2548 	}
2549 
2550 	return dev->phy_dev->link;
2551 }
2552 
2553 static int emac_mdio_read_link(struct mii_phy *phy)
2554 {
2555 	struct net_device *ndev = phy->dev;
2556 	struct emac_instance *dev = netdev_priv(ndev);
2557 	struct phy_device *phy_dev = dev->phy_dev;
2558 	int res;
2559 
2560 	res = phy_read_status(phy_dev);
2561 	if (res)
2562 		return res;
2563 
2564 	phy->speed = phy_dev->speed;
2565 	phy->duplex = phy_dev->duplex;
2566 	phy->pause = phy_dev->pause;
2567 	phy->asym_pause = phy_dev->asym_pause;
2568 	return 0;
2569 }
2570 
2571 static int emac_mdio_init_phy(struct mii_phy *phy)
2572 {
2573 	struct net_device *ndev = phy->dev;
2574 	struct emac_instance *dev = netdev_priv(ndev);
2575 
2576 	phy_start(dev->phy_dev);
2577 	return phy_init_hw(dev->phy_dev);
2578 }
2579 
2580 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2581 	.init		= emac_mdio_init_phy,
2582 	.setup_aneg	= emac_mdio_setup_aneg,
2583 	.setup_forced	= emac_mdio_setup_forced,
2584 	.poll_link	= emac_mdio_poll_link,
2585 	.read_link	= emac_mdio_read_link,
2586 };
2587 
2588 static int emac_dt_mdio_probe(struct emac_instance *dev)
2589 {
2590 	struct device_node *mii_np;
2591 	int res;
2592 
2593 	mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2594 	if (!mii_np) {
2595 		dev_err(&dev->ofdev->dev, "no mdio definition found.");
2596 		return -ENODEV;
2597 	}
2598 
2599 	if (!of_device_is_available(mii_np)) {
2600 		res = -ENODEV;
2601 		goto put_node;
2602 	}
2603 
2604 	dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2605 	if (!dev->mii_bus) {
2606 		res = -ENOMEM;
2607 		goto put_node;
2608 	}
2609 
2610 	dev->mii_bus->priv = dev->ndev;
2611 	dev->mii_bus->parent = dev->ndev->dev.parent;
2612 	dev->mii_bus->name = "emac_mdio";
2613 	dev->mii_bus->read = &emac_mii_bus_read;
2614 	dev->mii_bus->write = &emac_mii_bus_write;
2615 	dev->mii_bus->reset = &emac_mii_bus_reset;
2616 	snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2617 	res = of_mdiobus_register(dev->mii_bus, mii_np);
2618 	if (res) {
2619 		dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2620 			dev->mii_bus->name, res);
2621 	}
2622 
2623  put_node:
2624 	of_node_put(mii_np);
2625 	return res;
2626 }
2627 
2628 static int emac_dt_phy_connect(struct emac_instance *dev,
2629 			       struct device_node *phy_handle)
2630 {
2631 	dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2632 				    GFP_KERNEL);
2633 	if (!dev->phy.def)
2634 		return -ENOMEM;
2635 
2636 	dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2637 				      0, dev->phy_mode);
2638 	if (!dev->phy_dev) {
2639 		dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2640 		return -ENODEV;
2641 	}
2642 
2643 	dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2644 	dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2645 	dev->phy.def->name = dev->phy_dev->drv->name;
2646 	dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2647 	ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2648 						dev->phy_dev->supported);
2649 	dev->phy.address = dev->phy_dev->mdio.addr;
2650 	dev->phy.mode = dev->phy_dev->interface;
2651 	return 0;
2652 }
2653 
2654 static int emac_dt_phy_probe(struct emac_instance *dev)
2655 {
2656 	struct device_node *np = dev->ofdev->dev.of_node;
2657 	struct device_node *phy_handle;
2658 	int res = 1;
2659 
2660 	phy_handle = of_parse_phandle(np, "phy-handle", 0);
2661 
2662 	if (phy_handle) {
2663 		res = emac_dt_mdio_probe(dev);
2664 		if (!res) {
2665 			res = emac_dt_phy_connect(dev, phy_handle);
2666 			if (res)
2667 				mdiobus_unregister(dev->mii_bus);
2668 		}
2669 	}
2670 
2671 	of_node_put(phy_handle);
2672 	return res;
2673 }
2674 
2675 static int emac_init_phy(struct emac_instance *dev)
2676 {
2677 	struct device_node *np = dev->ofdev->dev.of_node;
2678 	struct net_device *ndev = dev->ndev;
2679 	u32 phy_map, adv;
2680 	int i;
2681 
2682 	dev->phy.dev = ndev;
2683 	dev->phy.mode = dev->phy_mode;
2684 
2685 	/* PHY-less configuration. */
2686 	if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2687 	    of_phy_is_fixed_link(np)) {
2688 		emac_reset(dev);
2689 
2690 		/* PHY-less configuration. */
2691 		dev->phy.address = -1;
2692 		dev->phy.features = SUPPORTED_MII;
2693 		if (emac_phy_supports_gige(dev->phy_mode))
2694 			dev->phy.features |= SUPPORTED_1000baseT_Full;
2695 		else
2696 			dev->phy.features |= SUPPORTED_100baseT_Full;
2697 		dev->phy.pause = 1;
2698 
2699 		if (of_phy_is_fixed_link(np)) {
2700 			int res = emac_dt_mdio_probe(dev);
2701 
2702 			if (res)
2703 				return res;
2704 
2705 			res = of_phy_register_fixed_link(np);
2706 			dev->phy_dev = of_phy_find_device(np);
2707 			if (res || !dev->phy_dev) {
2708 				mdiobus_unregister(dev->mii_bus);
2709 				return res ? res : -EINVAL;
2710 			}
2711 			emac_adjust_link(dev->ndev);
2712 			put_device(&dev->phy_dev->mdio.dev);
2713 		}
2714 		return 0;
2715 	}
2716 
2717 	mutex_lock(&emac_phy_map_lock);
2718 	phy_map = dev->phy_map | busy_phy_map;
2719 
2720 	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2721 
2722 	dev->phy.mdio_read = emac_mdio_read;
2723 	dev->phy.mdio_write = emac_mdio_write;
2724 
2725 	/* Enable internal clock source */
2726 #ifdef CONFIG_PPC_DCR_NATIVE
2727 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2728 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2729 #endif
2730 	/* PHY clock workaround */
2731 	emac_rx_clk_tx(dev);
2732 
2733 	/* Enable internal clock source on 440GX*/
2734 #ifdef CONFIG_PPC_DCR_NATIVE
2735 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2736 		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2737 #endif
2738 	/* Configure EMAC with defaults so we can at least use MDIO
2739 	 * This is needed mostly for 440GX
2740 	 */
2741 	if (emac_phy_gpcs(dev->phy.mode)) {
2742 		/* XXX
2743 		 * Make GPCS PHY address equal to EMAC index.
2744 		 * We probably should take into account busy_phy_map
2745 		 * and/or phy_map here.
2746 		 *
2747 		 * Note that the busy_phy_map is currently global
2748 		 * while it should probably be per-ASIC...
2749 		 */
2750 		dev->phy.gpcs_address = dev->gpcs_address;
2751 		if (dev->phy.gpcs_address == 0xffffffff)
2752 			dev->phy.address = dev->cell_index;
2753 	}
2754 
2755 	emac_configure(dev);
2756 
2757 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2758 		int res = emac_dt_phy_probe(dev);
2759 
2760 		switch (res) {
2761 		case 1:
2762 			/* No phy-handle property configured.
2763 			 * Continue with the existing phy probe
2764 			 * and setup code.
2765 			 */
2766 			break;
2767 
2768 		case 0:
2769 			mutex_unlock(&emac_phy_map_lock);
2770 			goto init_phy;
2771 
2772 		default:
2773 			mutex_unlock(&emac_phy_map_lock);
2774 			dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2775 				res);
2776 			return res;
2777 		}
2778 	}
2779 
2780 	if (dev->phy_address != 0xffffffff)
2781 		phy_map = ~(1 << dev->phy_address);
2782 
2783 	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2784 		if (!(phy_map & 1)) {
2785 			int r;
2786 			busy_phy_map |= 1 << i;
2787 
2788 			/* Quick check if there is a PHY at the address */
2789 			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2790 			if (r == 0xffff || r < 0)
2791 				continue;
2792 			if (!emac_mii_phy_probe(&dev->phy, i))
2793 				break;
2794 		}
2795 
2796 	/* Enable external clock source */
2797 #ifdef CONFIG_PPC_DCR_NATIVE
2798 	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2799 		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2800 #endif
2801 	mutex_unlock(&emac_phy_map_lock);
2802 	if (i == 0x20) {
2803 		printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2804 		return -ENXIO;
2805 	}
2806 
2807  init_phy:
2808 	/* Init PHY */
2809 	if (dev->phy.def->ops->init)
2810 		dev->phy.def->ops->init(&dev->phy);
2811 
2812 	/* Disable any PHY features not supported by the platform */
2813 	dev->phy.def->features &= ~dev->phy_feat_exc;
2814 	dev->phy.features &= ~dev->phy_feat_exc;
2815 
2816 	/* Setup initial link parameters */
2817 	if (dev->phy.features & SUPPORTED_Autoneg) {
2818 		adv = dev->phy.features;
2819 		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2820 			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2821 		/* Restart autonegotiation */
2822 		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2823 	} else {
2824 		u32 f = dev->phy.def->features;
2825 		int speed = SPEED_10, fd = DUPLEX_HALF;
2826 
2827 		/* Select highest supported speed/duplex */
2828 		if (f & SUPPORTED_1000baseT_Full) {
2829 			speed = SPEED_1000;
2830 			fd = DUPLEX_FULL;
2831 		} else if (f & SUPPORTED_1000baseT_Half)
2832 			speed = SPEED_1000;
2833 		else if (f & SUPPORTED_100baseT_Full) {
2834 			speed = SPEED_100;
2835 			fd = DUPLEX_FULL;
2836 		} else if (f & SUPPORTED_100baseT_Half)
2837 			speed = SPEED_100;
2838 		else if (f & SUPPORTED_10baseT_Full)
2839 			fd = DUPLEX_FULL;
2840 
2841 		/* Force link parameters */
2842 		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2843 	}
2844 	return 0;
2845 }
2846 
2847 static int emac_init_config(struct emac_instance *dev)
2848 {
2849 	struct device_node *np = dev->ofdev->dev.of_node;
2850 	const void *p;
2851 	int err;
2852 
2853 	/* Read config from device-tree */
2854 	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2855 		return -ENXIO;
2856 	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2857 		return -ENXIO;
2858 	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2859 		return -ENXIO;
2860 	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2861 		return -ENXIO;
2862 	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2863 		dev->max_mtu = ETH_DATA_LEN;
2864 	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2865 		dev->rx_fifo_size = 2048;
2866 	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2867 		dev->tx_fifo_size = 2048;
2868 	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2869 		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2870 	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2871 		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2872 	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2873 		dev->phy_address = 0xffffffff;
2874 	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2875 		dev->phy_map = 0xffffffff;
2876 	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2877 		dev->gpcs_address = 0xffffffff;
2878 	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2879 		return -ENXIO;
2880 	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2881 		dev->tah_ph = 0;
2882 	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2883 		dev->tah_port = 0;
2884 	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2885 		dev->mdio_ph = 0;
2886 	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2887 		dev->zmii_ph = 0;
2888 	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2889 		dev->zmii_port = 0xffffffff;
2890 	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2891 		dev->rgmii_ph = 0;
2892 	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2893 		dev->rgmii_port = 0xffffffff;
2894 	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2895 		dev->fifo_entry_size = 16;
2896 	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2897 		dev->mal_burst_size = 256;
2898 
2899 	/* PHY mode needs some decoding */
2900 	err = of_get_phy_mode(np, &dev->phy_mode);
2901 	if (err)
2902 		dev->phy_mode = PHY_INTERFACE_MODE_NA;
2903 
2904 	/* Check EMAC version */
2905 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2906 		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2907 		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2908 		    of_device_is_compatible(np, "ibm,emac-460gt"))
2909 			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2910 		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2911 		    of_device_is_compatible(np, "ibm,emac-405exr"))
2912 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2913 		if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2914 			dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2915 					  EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2916 					  EMAC_FTR_460EX_PHY_CLK_FIX);
2917 		}
2918 	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2919 		dev->features |= EMAC_FTR_EMAC4;
2920 		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2921 			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2922 	} else {
2923 		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2924 		    of_device_is_compatible(np, "ibm,emac-440gr"))
2925 			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2926 		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2927 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2928 			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2929 #else
2930 			printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2931 					np);
2932 			return -ENXIO;
2933 #endif
2934 		}
2935 
2936 	}
2937 
2938 	/* Fixup some feature bits based on the device tree */
2939 	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2940 		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2941 	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2942 		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2943 
2944 	/* CAB lacks the appropriate properties */
2945 	if (of_device_is_compatible(np, "ibm,emac-axon"))
2946 		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2947 			EMAC_FTR_STACR_OC_INVERT;
2948 
2949 	/* Enable TAH/ZMII/RGMII features as found */
2950 	if (dev->tah_ph != 0) {
2951 #ifdef CONFIG_IBM_EMAC_TAH
2952 		dev->features |= EMAC_FTR_HAS_TAH;
2953 #else
2954 		printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2955 		return -ENXIO;
2956 #endif
2957 	}
2958 
2959 	if (dev->zmii_ph != 0) {
2960 #ifdef CONFIG_IBM_EMAC_ZMII
2961 		dev->features |= EMAC_FTR_HAS_ZMII;
2962 #else
2963 		printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2964 		return -ENXIO;
2965 #endif
2966 	}
2967 
2968 	if (dev->rgmii_ph != 0) {
2969 #ifdef CONFIG_IBM_EMAC_RGMII
2970 		dev->features |= EMAC_FTR_HAS_RGMII;
2971 #else
2972 		printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2973 		return -ENXIO;
2974 #endif
2975 	}
2976 
2977 	/* Read MAC-address */
2978 	p = of_get_property(np, "local-mac-address", NULL);
2979 	if (p == NULL) {
2980 		printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
2981 		       np);
2982 		return -ENXIO;
2983 	}
2984 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2985 
2986 	/* IAHT and GAHT filter parameterization */
2987 	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2988 		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2989 		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2990 	} else {
2991 		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2992 		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2993 	}
2994 
2995 	/* This should never happen */
2996 	if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
2997 		return -ENXIO;
2998 
2999 	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
3000 	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
3001 	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
3002 	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
3003 	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
3004 
3005 	return 0;
3006 }
3007 
3008 static const struct net_device_ops emac_netdev_ops = {
3009 	.ndo_open		= emac_open,
3010 	.ndo_stop		= emac_close,
3011 	.ndo_get_stats		= emac_stats,
3012 	.ndo_set_rx_mode	= emac_set_multicast_list,
3013 	.ndo_do_ioctl		= emac_ioctl,
3014 	.ndo_tx_timeout		= emac_tx_timeout,
3015 	.ndo_validate_addr	= eth_validate_addr,
3016 	.ndo_set_mac_address	= emac_set_mac_address,
3017 	.ndo_start_xmit		= emac_start_xmit,
3018 };
3019 
3020 static const struct net_device_ops emac_gige_netdev_ops = {
3021 	.ndo_open		= emac_open,
3022 	.ndo_stop		= emac_close,
3023 	.ndo_get_stats		= emac_stats,
3024 	.ndo_set_rx_mode	= emac_set_multicast_list,
3025 	.ndo_do_ioctl		= emac_ioctl,
3026 	.ndo_tx_timeout		= emac_tx_timeout,
3027 	.ndo_validate_addr	= eth_validate_addr,
3028 	.ndo_set_mac_address	= emac_set_mac_address,
3029 	.ndo_start_xmit		= emac_start_xmit_sg,
3030 	.ndo_change_mtu		= emac_change_mtu,
3031 };
3032 
3033 static int emac_probe(struct platform_device *ofdev)
3034 {
3035 	struct net_device *ndev;
3036 	struct emac_instance *dev;
3037 	struct device_node *np = ofdev->dev.of_node;
3038 	struct device_node **blist = NULL;
3039 	int err, i;
3040 
3041 	/* Skip unused/unwired EMACS.  We leave the check for an unused
3042 	 * property here for now, but new flat device trees should set a
3043 	 * status property to "disabled" instead.
3044 	 */
3045 	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3046 		return -ENODEV;
3047 
3048 	/* Find ourselves in the bootlist if we are there */
3049 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3050 		if (emac_boot_list[i] == np)
3051 			blist = &emac_boot_list[i];
3052 
3053 	/* Allocate our net_device structure */
3054 	err = -ENOMEM;
3055 	ndev = alloc_etherdev(sizeof(struct emac_instance));
3056 	if (!ndev)
3057 		goto err_gone;
3058 
3059 	dev = netdev_priv(ndev);
3060 	dev->ndev = ndev;
3061 	dev->ofdev = ofdev;
3062 	dev->blist = blist;
3063 	SET_NETDEV_DEV(ndev, &ofdev->dev);
3064 
3065 	/* Initialize some embedded data structures */
3066 	mutex_init(&dev->mdio_lock);
3067 	mutex_init(&dev->link_lock);
3068 	spin_lock_init(&dev->lock);
3069 	INIT_WORK(&dev->reset_work, emac_reset_work);
3070 
3071 	/* Init various config data based on device-tree */
3072 	err = emac_init_config(dev);
3073 	if (err)
3074 		goto err_free;
3075 
3076 	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3077 	dev->emac_irq = irq_of_parse_and_map(np, 0);
3078 	dev->wol_irq = irq_of_parse_and_map(np, 1);
3079 	if (!dev->emac_irq) {
3080 		printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3081 		err = -ENODEV;
3082 		goto err_free;
3083 	}
3084 	ndev->irq = dev->emac_irq;
3085 
3086 	/* Map EMAC regs */
3087 	// TODO : platform_get_resource() and devm_ioremap_resource()
3088 	dev->emacp = of_iomap(np, 0);
3089 	if (dev->emacp == NULL) {
3090 		printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3091 		err = -ENOMEM;
3092 		goto err_irq_unmap;
3093 	}
3094 
3095 	/* Wait for dependent devices */
3096 	err = emac_wait_deps(dev);
3097 	if (err) {
3098 		printk(KERN_ERR
3099 		       "%pOF: Timeout waiting for dependent devices\n", np);
3100 		/*  display more info about what's missing ? */
3101 		goto err_reg_unmap;
3102 	}
3103 	dev->mal = platform_get_drvdata(dev->mal_dev);
3104 	if (dev->mdio_dev != NULL)
3105 		dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3106 
3107 	/* Register with MAL */
3108 	dev->commac.ops = &emac_commac_ops;
3109 	dev->commac.dev = dev;
3110 	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3111 	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3112 	err = mal_register_commac(dev->mal, &dev->commac);
3113 	if (err) {
3114 		printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3115 		       np, dev->mal_dev->dev.of_node);
3116 		goto err_rel_deps;
3117 	}
3118 	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3119 	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3120 
3121 	/* Get pointers to BD rings */
3122 	dev->tx_desc =
3123 	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3124 	dev->rx_desc =
3125 	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3126 
3127 	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3128 	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3129 
3130 	/* Clean rings */
3131 	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3132 	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3133 	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3134 	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3135 
3136 	/* Attach to ZMII, if needed */
3137 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3138 	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3139 		goto err_unreg_commac;
3140 
3141 	/* Attach to RGMII, if needed */
3142 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3143 	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3144 		goto err_detach_zmii;
3145 
3146 	/* Attach to TAH, if needed */
3147 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3148 	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3149 		goto err_detach_rgmii;
3150 
3151 	/* Set some link defaults before we can find out real parameters */
3152 	dev->phy.speed = SPEED_100;
3153 	dev->phy.duplex = DUPLEX_FULL;
3154 	dev->phy.autoneg = AUTONEG_DISABLE;
3155 	dev->phy.pause = dev->phy.asym_pause = 0;
3156 	dev->stop_timeout = STOP_TIMEOUT_100;
3157 	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3158 
3159 	/* Some SoCs like APM821xx does not support Half Duplex mode. */
3160 	if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3161 		dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3162 				     SUPPORTED_100baseT_Half |
3163 				     SUPPORTED_10baseT_Half);
3164 	}
3165 
3166 	/* Find PHY if any */
3167 	err = emac_init_phy(dev);
3168 	if (err != 0)
3169 		goto err_detach_tah;
3170 
3171 	if (dev->tah_dev) {
3172 		ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3173 		ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3174 	}
3175 	ndev->watchdog_timeo = 5 * HZ;
3176 	if (emac_phy_supports_gige(dev->phy_mode)) {
3177 		ndev->netdev_ops = &emac_gige_netdev_ops;
3178 		dev->commac.ops = &emac_commac_sg_ops;
3179 	} else
3180 		ndev->netdev_ops = &emac_netdev_ops;
3181 	ndev->ethtool_ops = &emac_ethtool_ops;
3182 
3183 	/* MTU range: 46 - 1500 or whatever is in OF */
3184 	ndev->min_mtu = EMAC_MIN_MTU;
3185 	ndev->max_mtu = dev->max_mtu;
3186 
3187 	netif_carrier_off(ndev);
3188 
3189 	err = register_netdev(ndev);
3190 	if (err) {
3191 		printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3192 		       np, err);
3193 		goto err_detach_tah;
3194 	}
3195 
3196 	/* Set our drvdata last as we don't want them visible until we are
3197 	 * fully initialized
3198 	 */
3199 	wmb();
3200 	platform_set_drvdata(ofdev, dev);
3201 
3202 	/* There's a new kid in town ! Let's tell everybody */
3203 	wake_up_all(&emac_probe_wait);
3204 
3205 
3206 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3207 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
3208 
3209 	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3210 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3211 
3212 	if (dev->phy.address >= 0)
3213 		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3214 		       dev->phy.def->name, dev->phy.address);
3215 
3216 	/* Life is good */
3217 	return 0;
3218 
3219 	/* I have a bad feeling about this ... */
3220 
3221  err_detach_tah:
3222 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3223 		tah_detach(dev->tah_dev, dev->tah_port);
3224  err_detach_rgmii:
3225 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3226 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3227  err_detach_zmii:
3228 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3229 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3230  err_unreg_commac:
3231 	mal_unregister_commac(dev->mal, &dev->commac);
3232  err_rel_deps:
3233 	emac_put_deps(dev);
3234  err_reg_unmap:
3235 	iounmap(dev->emacp);
3236  err_irq_unmap:
3237 	if (dev->wol_irq)
3238 		irq_dispose_mapping(dev->wol_irq);
3239 	if (dev->emac_irq)
3240 		irq_dispose_mapping(dev->emac_irq);
3241  err_free:
3242 	free_netdev(ndev);
3243  err_gone:
3244 	/* if we were on the bootlist, remove us as we won't show up and
3245 	 * wake up all waiters to notify them in case they were waiting
3246 	 * on us
3247 	 */
3248 	if (blist) {
3249 		*blist = NULL;
3250 		wake_up_all(&emac_probe_wait);
3251 	}
3252 	return err;
3253 }
3254 
3255 static int emac_remove(struct platform_device *ofdev)
3256 {
3257 	struct emac_instance *dev = platform_get_drvdata(ofdev);
3258 
3259 	DBG(dev, "remove" NL);
3260 
3261 	unregister_netdev(dev->ndev);
3262 
3263 	cancel_work_sync(&dev->reset_work);
3264 
3265 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3266 		tah_detach(dev->tah_dev, dev->tah_port);
3267 	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3268 		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3269 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3270 		zmii_detach(dev->zmii_dev, dev->zmii_port);
3271 
3272 	if (dev->phy_dev)
3273 		phy_disconnect(dev->phy_dev);
3274 
3275 	if (dev->mii_bus)
3276 		mdiobus_unregister(dev->mii_bus);
3277 
3278 	busy_phy_map &= ~(1 << dev->phy.address);
3279 	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3280 
3281 	mal_unregister_commac(dev->mal, &dev->commac);
3282 	emac_put_deps(dev);
3283 
3284 	iounmap(dev->emacp);
3285 
3286 	if (dev->wol_irq)
3287 		irq_dispose_mapping(dev->wol_irq);
3288 	if (dev->emac_irq)
3289 		irq_dispose_mapping(dev->emac_irq);
3290 
3291 	free_netdev(dev->ndev);
3292 
3293 	return 0;
3294 }
3295 
3296 /* XXX Features in here should be replaced by properties... */
3297 static const struct of_device_id emac_match[] =
3298 {
3299 	{
3300 		.type		= "network",
3301 		.compatible	= "ibm,emac",
3302 	},
3303 	{
3304 		.type		= "network",
3305 		.compatible	= "ibm,emac4",
3306 	},
3307 	{
3308 		.type		= "network",
3309 		.compatible	= "ibm,emac4sync",
3310 	},
3311 	{},
3312 };
3313 MODULE_DEVICE_TABLE(of, emac_match);
3314 
3315 static struct platform_driver emac_driver = {
3316 	.driver = {
3317 		.name = "emac",
3318 		.of_match_table = emac_match,
3319 	},
3320 	.probe = emac_probe,
3321 	.remove = emac_remove,
3322 };
3323 
3324 static void __init emac_make_bootlist(void)
3325 {
3326 	struct device_node *np = NULL;
3327 	int j, max, i = 0;
3328 	int cell_indices[EMAC_BOOT_LIST_SIZE];
3329 
3330 	/* Collect EMACs */
3331 	while((np = of_find_all_nodes(np)) != NULL) {
3332 		const u32 *idx;
3333 
3334 		if (of_match_node(emac_match, np) == NULL)
3335 			continue;
3336 		if (of_get_property(np, "unused", NULL))
3337 			continue;
3338 		idx = of_get_property(np, "cell-index", NULL);
3339 		if (idx == NULL)
3340 			continue;
3341 		cell_indices[i] = *idx;
3342 		emac_boot_list[i++] = of_node_get(np);
3343 		if (i >= EMAC_BOOT_LIST_SIZE) {
3344 			of_node_put(np);
3345 			break;
3346 		}
3347 	}
3348 	max = i;
3349 
3350 	/* Bubble sort them (doh, what a creative algorithm :-) */
3351 	for (i = 0; max > 1 && (i < (max - 1)); i++)
3352 		for (j = i; j < max; j++) {
3353 			if (cell_indices[i] > cell_indices[j]) {
3354 				swap(emac_boot_list[i], emac_boot_list[j]);
3355 				swap(cell_indices[i], cell_indices[j]);
3356 			}
3357 		}
3358 }
3359 
3360 static int __init emac_init(void)
3361 {
3362 	int rc;
3363 
3364 	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3365 
3366 	/* Build EMAC boot list */
3367 	emac_make_bootlist();
3368 
3369 	/* Init submodules */
3370 	rc = mal_init();
3371 	if (rc)
3372 		goto err;
3373 	rc = zmii_init();
3374 	if (rc)
3375 		goto err_mal;
3376 	rc = rgmii_init();
3377 	if (rc)
3378 		goto err_zmii;
3379 	rc = tah_init();
3380 	if (rc)
3381 		goto err_rgmii;
3382 	rc = platform_driver_register(&emac_driver);
3383 	if (rc)
3384 		goto err_tah;
3385 
3386 	return 0;
3387 
3388  err_tah:
3389 	tah_exit();
3390  err_rgmii:
3391 	rgmii_exit();
3392  err_zmii:
3393 	zmii_exit();
3394  err_mal:
3395 	mal_exit();
3396  err:
3397 	return rc;
3398 }
3399 
3400 static void __exit emac_exit(void)
3401 {
3402 	int i;
3403 
3404 	platform_driver_unregister(&emac_driver);
3405 
3406 	tah_exit();
3407 	rgmii_exit();
3408 	zmii_exit();
3409 	mal_exit();
3410 
3411 	/* Destroy EMAC boot list */
3412 	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3413 		of_node_put(emac_boot_list[i]);
3414 }
3415 
3416 module_init(emac_init);
3417 module_exit(emac_exit);
3418