xref: /linux/drivers/net/ethernet/amd/au1000_eth.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  *
3  * Alchemy Au1x00 ethernet driver
4  *
5  * Copyright 2001-2003, 2006 MontaVista Software Inc.
6  * Copyright 2002 TimeSys Corp.
7  * Added ethtool/mii-tool support,
8  * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9  * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10  * or riemer@riemer-nt.de: fixed the link beat detection with
11  * ioctls (SIOCGMIIPHY)
12  * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
13  *  converted to use linux-2.6.x's PHY framework
14  *
15  * Author: MontaVista Software, Inc.
16  *		ppopov@mvista.com or source@mvista.com
17  *
18  * ########################################################################
19  *
20  *  This program is free software; you can distribute it and/or modify it
21  *  under the terms of the GNU General Public License (Version 2) as
22  *  published by the Free Software Foundation.
23  *
24  *  This program is distributed in the hope it will be useful, but WITHOUT
25  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27  *  for more details.
28  *
29  *  You should have received a copy of the GNU General Public License along
30  *  with this program; if not, write to the Free Software Foundation, Inc.,
31  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
32  *
33  * ########################################################################
34  *
35  *
36  */
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 
39 #include <linux/capability.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/string.h>
44 #include <linux/timer.h>
45 #include <linux/errno.h>
46 #include <linux/in.h>
47 #include <linux/ioport.h>
48 #include <linux/bitops.h>
49 #include <linux/slab.h>
50 #include <linux/interrupt.h>
51 #include <linux/init.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/ethtool.h>
55 #include <linux/mii.h>
56 #include <linux/skbuff.h>
57 #include <linux/delay.h>
58 #include <linux/crc32.h>
59 #include <linux/phy.h>
60 #include <linux/platform_device.h>
61 #include <linux/cpu.h>
62 #include <linux/io.h>
63 
64 #include <asm/mipsregs.h>
65 #include <asm/irq.h>
66 #include <asm/processor.h>
67 
68 #include <au1000.h>
69 #include <au1xxx_eth.h>
70 #include <prom.h>
71 
72 #include "au1000_eth.h"
73 
74 #ifdef AU1000_ETH_DEBUG
75 static int au1000_debug = 5;
76 #else
77 static int au1000_debug = 3;
78 #endif
79 
80 #define AU1000_DEF_MSG_ENABLE	(NETIF_MSG_DRV	| \
81 				NETIF_MSG_PROBE	| \
82 				NETIF_MSG_LINK)
83 
84 #define DRV_NAME	"au1000_eth"
85 #define DRV_VERSION	"1.7"
86 #define DRV_AUTHOR	"Pete Popov <ppopov@embeddedalley.com>"
87 #define DRV_DESC	"Au1xxx on-chip Ethernet driver"
88 
89 MODULE_AUTHOR(DRV_AUTHOR);
90 MODULE_DESCRIPTION(DRV_DESC);
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
93 
94 /*
95  * Theory of operation
96  *
97  * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
98  * There are four receive and four transmit descriptors.  These
99  * descriptors are not in memory; rather, they are just a set of
100  * hardware registers.
101  *
102  * Since the Au1000 has a coherent data cache, the receive and
103  * transmit buffers are allocated from the KSEG0 segment. The
104  * hardware registers, however, are still mapped at KSEG1 to
105  * make sure there's no out-of-order writes, and that all writes
106  * complete immediately.
107  */
108 
109 /*
110  * board-specific configurations
111  *
112  * PHY detection algorithm
113  *
114  * If phy_static_config is undefined, the PHY setup is
115  * autodetected:
116  *
117  * mii_probe() first searches the current MAC's MII bus for a PHY,
118  * selecting the first (or last, if phy_search_highest_addr is
119  * defined) PHY address not already claimed by another netdev.
120  *
121  * If nothing was found that way when searching for the 2nd ethernet
122  * controller's PHY and phy1_search_mac0 is defined, then
123  * the first MII bus is searched as well for an unclaimed PHY; this is
124  * needed in case of a dual-PHY accessible only through the MAC0's MII
125  * bus.
126  *
127  * Finally, if no PHY is found, then the corresponding ethernet
128  * controller is not registered to the network subsystem.
129  */
130 
131 /* autodetection defaults: phy1_search_mac0 */
132 
133 /* static PHY setup
134  *
135  * most boards PHY setup should be detectable properly with the
136  * autodetection algorithm in mii_probe(), but in some cases (e.g. if
137  * you have a switch attached, or want to use the PHY's interrupt
138  * notification capabilities) you can provide a static PHY
139  * configuration here
140  *
141  * IRQs may only be set, if a PHY address was configured
142  * If a PHY address is given, also a bus id is required to be set
143  *
144  * ps: make sure the used irqs are configured properly in the board
145  * specific irq-map
146  */
147 
148 static void au1000_enable_mac(struct net_device *dev, int force_reset)
149 {
150 	unsigned long flags;
151 	struct au1000_private *aup = netdev_priv(dev);
152 
153 	spin_lock_irqsave(&aup->lock, flags);
154 
155 	if (force_reset || (!aup->mac_enabled)) {
156 		writel(MAC_EN_CLOCK_ENABLE, aup->enable);
157 		au_sync_delay(2);
158 		writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
159 				| MAC_EN_CLOCK_ENABLE), aup->enable);
160 		au_sync_delay(2);
161 
162 		aup->mac_enabled = 1;
163 	}
164 
165 	spin_unlock_irqrestore(&aup->lock, flags);
166 }
167 
168 /*
169  * MII operations
170  */
171 static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
172 {
173 	struct au1000_private *aup = netdev_priv(dev);
174 	u32 *const mii_control_reg = &aup->mac->mii_control;
175 	u32 *const mii_data_reg = &aup->mac->mii_data;
176 	u32 timedout = 20;
177 	u32 mii_control;
178 
179 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
180 		mdelay(1);
181 		if (--timedout == 0) {
182 			netdev_err(dev, "read_MII busy timeout!!\n");
183 			return -1;
184 		}
185 	}
186 
187 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
188 		MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
189 
190 	writel(mii_control, mii_control_reg);
191 
192 	timedout = 20;
193 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
194 		mdelay(1);
195 		if (--timedout == 0) {
196 			netdev_err(dev, "mdio_read busy timeout!!\n");
197 			return -1;
198 		}
199 	}
200 	return readl(mii_data_reg);
201 }
202 
203 static void au1000_mdio_write(struct net_device *dev, int phy_addr,
204 			      int reg, u16 value)
205 {
206 	struct au1000_private *aup = netdev_priv(dev);
207 	u32 *const mii_control_reg = &aup->mac->mii_control;
208 	u32 *const mii_data_reg = &aup->mac->mii_data;
209 	u32 timedout = 20;
210 	u32 mii_control;
211 
212 	while (readl(mii_control_reg) & MAC_MII_BUSY) {
213 		mdelay(1);
214 		if (--timedout == 0) {
215 			netdev_err(dev, "mdio_write busy timeout!!\n");
216 			return;
217 		}
218 	}
219 
220 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
221 		MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
222 
223 	writel(value, mii_data_reg);
224 	writel(mii_control, mii_control_reg);
225 }
226 
227 static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
228 {
229 	/* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
230 	 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
231 	 */
232 	struct net_device *const dev = bus->priv;
233 
234 	/* make sure the MAC associated with this
235 	 * mii_bus is enabled
236 	 */
237 	au1000_enable_mac(dev, 0);
238 
239 	return au1000_mdio_read(dev, phy_addr, regnum);
240 }
241 
242 static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
243 				u16 value)
244 {
245 	struct net_device *const dev = bus->priv;
246 
247 	/* make sure the MAC associated with this
248 	 * mii_bus is enabled
249 	 */
250 	au1000_enable_mac(dev, 0);
251 
252 	au1000_mdio_write(dev, phy_addr, regnum, value);
253 	return 0;
254 }
255 
256 static int au1000_mdiobus_reset(struct mii_bus *bus)
257 {
258 	struct net_device *const dev = bus->priv;
259 
260 	/* make sure the MAC associated with this
261 	 * mii_bus is enabled
262 	 */
263 	au1000_enable_mac(dev, 0);
264 
265 	return 0;
266 }
267 
268 static void au1000_hard_stop(struct net_device *dev)
269 {
270 	struct au1000_private *aup = netdev_priv(dev);
271 	u32 reg;
272 
273 	netif_dbg(aup, drv, dev, "hard stop\n");
274 
275 	reg = readl(&aup->mac->control);
276 	reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
277 	writel(reg, &aup->mac->control);
278 	au_sync_delay(10);
279 }
280 
281 static void au1000_enable_rx_tx(struct net_device *dev)
282 {
283 	struct au1000_private *aup = netdev_priv(dev);
284 	u32 reg;
285 
286 	netif_dbg(aup, hw, dev, "enable_rx_tx\n");
287 
288 	reg = readl(&aup->mac->control);
289 	reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
290 	writel(reg, &aup->mac->control);
291 	au_sync_delay(10);
292 }
293 
294 static void
295 au1000_adjust_link(struct net_device *dev)
296 {
297 	struct au1000_private *aup = netdev_priv(dev);
298 	struct phy_device *phydev = aup->phy_dev;
299 	unsigned long flags;
300 	u32 reg;
301 
302 	int status_change = 0;
303 
304 	BUG_ON(!aup->phy_dev);
305 
306 	spin_lock_irqsave(&aup->lock, flags);
307 
308 	if (phydev->link && (aup->old_speed != phydev->speed)) {
309 		/* speed changed */
310 
311 		switch (phydev->speed) {
312 		case SPEED_10:
313 		case SPEED_100:
314 			break;
315 		default:
316 			netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
317 							phydev->speed);
318 			break;
319 		}
320 
321 		aup->old_speed = phydev->speed;
322 
323 		status_change = 1;
324 	}
325 
326 	if (phydev->link && (aup->old_duplex != phydev->duplex)) {
327 		/* duplex mode changed */
328 
329 		/* switching duplex mode requires to disable rx and tx! */
330 		au1000_hard_stop(dev);
331 
332 		reg = readl(&aup->mac->control);
333 		if (DUPLEX_FULL == phydev->duplex) {
334 			reg |= MAC_FULL_DUPLEX;
335 			reg &= ~MAC_DISABLE_RX_OWN;
336 		} else {
337 			reg &= ~MAC_FULL_DUPLEX;
338 			reg |= MAC_DISABLE_RX_OWN;
339 		}
340 		writel(reg, &aup->mac->control);
341 		au_sync_delay(1);
342 
343 		au1000_enable_rx_tx(dev);
344 		aup->old_duplex = phydev->duplex;
345 
346 		status_change = 1;
347 	}
348 
349 	if (phydev->link != aup->old_link) {
350 		/* link state changed */
351 
352 		if (!phydev->link) {
353 			/* link went down */
354 			aup->old_speed = 0;
355 			aup->old_duplex = -1;
356 		}
357 
358 		aup->old_link = phydev->link;
359 		status_change = 1;
360 	}
361 
362 	spin_unlock_irqrestore(&aup->lock, flags);
363 
364 	if (status_change) {
365 		if (phydev->link)
366 			netdev_info(dev, "link up (%d/%s)\n",
367 			       phydev->speed,
368 			       DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
369 		else
370 			netdev_info(dev, "link down\n");
371 	}
372 }
373 
374 static int au1000_mii_probe(struct net_device *dev)
375 {
376 	struct au1000_private *const aup = netdev_priv(dev);
377 	struct phy_device *phydev = NULL;
378 	int phy_addr;
379 
380 	if (aup->phy_static_config) {
381 		BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
382 
383 		if (aup->phy_addr)
384 			phydev = aup->mii_bus->phy_map[aup->phy_addr];
385 		else
386 			netdev_info(dev, "using PHY-less setup\n");
387 		return 0;
388 	}
389 
390 	/* find the first (lowest address) PHY
391 	 * on the current MAC's MII bus
392 	 */
393 	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
394 		if (aup->mii_bus->phy_map[phy_addr]) {
395 			phydev = aup->mii_bus->phy_map[phy_addr];
396 			if (!aup->phy_search_highest_addr)
397 				/* break out with first one found */
398 				break;
399 		}
400 
401 	if (aup->phy1_search_mac0) {
402 		/* try harder to find a PHY */
403 		if (!phydev && (aup->mac_id == 1)) {
404 			/* no PHY found, maybe we have a dual PHY? */
405 			dev_info(&dev->dev, ": no PHY found on MAC1, "
406 				"let's see if it's attached to MAC0...\n");
407 
408 			/* find the first (lowest address) non-attached
409 			 * PHY on the MAC0 MII bus
410 			 */
411 			for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
412 				struct phy_device *const tmp_phydev =
413 					aup->mii_bus->phy_map[phy_addr];
414 
415 				if (aup->mac_id == 1)
416 					break;
417 
418 				/* no PHY here... */
419 				if (!tmp_phydev)
420 					continue;
421 
422 				/* already claimed by MAC0 */
423 				if (tmp_phydev->attached_dev)
424 					continue;
425 
426 				phydev = tmp_phydev;
427 				break; /* found it */
428 			}
429 		}
430 	}
431 
432 	if (!phydev) {
433 		netdev_err(dev, "no PHY found\n");
434 		return -1;
435 	}
436 
437 	/* now we are supposed to have a proper phydev, to attach to... */
438 	BUG_ON(phydev->attached_dev);
439 
440 	phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
441 			0, PHY_INTERFACE_MODE_MII);
442 
443 	if (IS_ERR(phydev)) {
444 		netdev_err(dev, "Could not attach to PHY\n");
445 		return PTR_ERR(phydev);
446 	}
447 
448 	/* mask with MAC supported features */
449 	phydev->supported &= (SUPPORTED_10baseT_Half
450 			      | SUPPORTED_10baseT_Full
451 			      | SUPPORTED_100baseT_Half
452 			      | SUPPORTED_100baseT_Full
453 			      | SUPPORTED_Autoneg
454 			      /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
455 			      | SUPPORTED_MII
456 			      | SUPPORTED_TP);
457 
458 	phydev->advertising = phydev->supported;
459 
460 	aup->old_link = 0;
461 	aup->old_speed = 0;
462 	aup->old_duplex = -1;
463 	aup->phy_dev = phydev;
464 
465 	netdev_info(dev, "attached PHY driver [%s] "
466 	       "(mii_bus:phy_addr=%s, irq=%d)\n",
467 	       phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
468 
469 	return 0;
470 }
471 
472 
473 /*
474  * Buffer allocation/deallocation routines. The buffer descriptor returned
475  * has the virtual and dma address of a buffer suitable for
476  * both, receive and transmit operations.
477  */
478 static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
479 {
480 	struct db_dest *pDB;
481 	pDB = aup->pDBfree;
482 
483 	if (pDB)
484 		aup->pDBfree = pDB->pnext;
485 
486 	return pDB;
487 }
488 
489 void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
490 {
491 	struct db_dest *pDBfree = aup->pDBfree;
492 	if (pDBfree)
493 		pDBfree->pnext = pDB;
494 	aup->pDBfree = pDB;
495 }
496 
497 static void au1000_reset_mac_unlocked(struct net_device *dev)
498 {
499 	struct au1000_private *const aup = netdev_priv(dev);
500 	int i;
501 
502 	au1000_hard_stop(dev);
503 
504 	writel(MAC_EN_CLOCK_ENABLE, aup->enable);
505 	au_sync_delay(2);
506 	writel(0, aup->enable);
507 	au_sync_delay(2);
508 
509 	aup->tx_full = 0;
510 	for (i = 0; i < NUM_RX_DMA; i++) {
511 		/* reset control bits */
512 		aup->rx_dma_ring[i]->buff_stat &= ~0xf;
513 	}
514 	for (i = 0; i < NUM_TX_DMA; i++) {
515 		/* reset control bits */
516 		aup->tx_dma_ring[i]->buff_stat &= ~0xf;
517 	}
518 
519 	aup->mac_enabled = 0;
520 
521 }
522 
523 static void au1000_reset_mac(struct net_device *dev)
524 {
525 	struct au1000_private *const aup = netdev_priv(dev);
526 	unsigned long flags;
527 
528 	netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
529 					(unsigned)aup);
530 
531 	spin_lock_irqsave(&aup->lock, flags);
532 
533 	au1000_reset_mac_unlocked(dev);
534 
535 	spin_unlock_irqrestore(&aup->lock, flags);
536 }
537 
538 /*
539  * Setup the receive and transmit "rings".  These pointers are the addresses
540  * of the rx and tx MAC DMA registers so they are fixed by the hardware --
541  * these are not descriptors sitting in memory.
542  */
543 static void
544 au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
545 {
546 	int i;
547 
548 	for (i = 0; i < NUM_RX_DMA; i++) {
549 		aup->rx_dma_ring[i] = (struct rx_dma *)
550 			(tx_base + 0x100 + sizeof(struct rx_dma) * i);
551 	}
552 	for (i = 0; i < NUM_TX_DMA; i++) {
553 		aup->tx_dma_ring[i] = (struct tx_dma *)
554 			(tx_base + sizeof(struct tx_dma) * i);
555 	}
556 }
557 
558 /*
559  * ethtool operations
560  */
561 
562 static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
563 {
564 	struct au1000_private *aup = netdev_priv(dev);
565 
566 	if (aup->phy_dev)
567 		return phy_ethtool_gset(aup->phy_dev, cmd);
568 
569 	return -EINVAL;
570 }
571 
572 static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
573 {
574 	struct au1000_private *aup = netdev_priv(dev);
575 
576 	if (!capable(CAP_NET_ADMIN))
577 		return -EPERM;
578 
579 	if (aup->phy_dev)
580 		return phy_ethtool_sset(aup->phy_dev, cmd);
581 
582 	return -EINVAL;
583 }
584 
585 static void
586 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
587 {
588 	struct au1000_private *aup = netdev_priv(dev);
589 
590 	strcpy(info->driver, DRV_NAME);
591 	strcpy(info->version, DRV_VERSION);
592 	info->fw_version[0] = '\0';
593 	sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
594 	info->regdump_len = 0;
595 }
596 
597 static void au1000_set_msglevel(struct net_device *dev, u32 value)
598 {
599 	struct au1000_private *aup = netdev_priv(dev);
600 	aup->msg_enable = value;
601 }
602 
603 static u32 au1000_get_msglevel(struct net_device *dev)
604 {
605 	struct au1000_private *aup = netdev_priv(dev);
606 	return aup->msg_enable;
607 }
608 
609 static const struct ethtool_ops au1000_ethtool_ops = {
610 	.get_settings = au1000_get_settings,
611 	.set_settings = au1000_set_settings,
612 	.get_drvinfo = au1000_get_drvinfo,
613 	.get_link = ethtool_op_get_link,
614 	.get_msglevel = au1000_get_msglevel,
615 	.set_msglevel = au1000_set_msglevel,
616 };
617 
618 
619 /*
620  * Initialize the interface.
621  *
622  * When the device powers up, the clocks are disabled and the
623  * mac is in reset state.  When the interface is closed, we
624  * do the same -- reset the device and disable the clocks to
625  * conserve power. Thus, whenever au1000_init() is called,
626  * the device should already be in reset state.
627  */
628 static int au1000_init(struct net_device *dev)
629 {
630 	struct au1000_private *aup = netdev_priv(dev);
631 	unsigned long flags;
632 	int i;
633 	u32 control;
634 
635 	netif_dbg(aup, hw, dev, "au1000_init\n");
636 
637 	/* bring the device out of reset */
638 	au1000_enable_mac(dev, 1);
639 
640 	spin_lock_irqsave(&aup->lock, flags);
641 
642 	writel(0, &aup->mac->control);
643 	aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
644 	aup->tx_tail = aup->tx_head;
645 	aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
646 
647 	writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
648 					&aup->mac->mac_addr_high);
649 	writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
650 		dev->dev_addr[1]<<8 | dev->dev_addr[0],
651 					&aup->mac->mac_addr_low);
652 
653 
654 	for (i = 0; i < NUM_RX_DMA; i++)
655 		aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
656 
657 	au_sync();
658 
659 	control = MAC_RX_ENABLE | MAC_TX_ENABLE;
660 #ifndef CONFIG_CPU_LITTLE_ENDIAN
661 	control |= MAC_BIG_ENDIAN;
662 #endif
663 	if (aup->phy_dev) {
664 		if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
665 			control |= MAC_FULL_DUPLEX;
666 		else
667 			control |= MAC_DISABLE_RX_OWN;
668 	} else { /* PHY-less op, assume full-duplex */
669 		control |= MAC_FULL_DUPLEX;
670 	}
671 
672 	writel(control, &aup->mac->control);
673 	writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
674 	au_sync();
675 
676 	spin_unlock_irqrestore(&aup->lock, flags);
677 	return 0;
678 }
679 
680 static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
681 {
682 	struct net_device_stats *ps = &dev->stats;
683 
684 	ps->rx_packets++;
685 	if (status & RX_MCAST_FRAME)
686 		ps->multicast++;
687 
688 	if (status & RX_ERROR) {
689 		ps->rx_errors++;
690 		if (status & RX_MISSED_FRAME)
691 			ps->rx_missed_errors++;
692 		if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
693 			ps->rx_length_errors++;
694 		if (status & RX_CRC_ERROR)
695 			ps->rx_crc_errors++;
696 		if (status & RX_COLL)
697 			ps->collisions++;
698 	} else
699 		ps->rx_bytes += status & RX_FRAME_LEN_MASK;
700 
701 }
702 
703 /*
704  * Au1000 receive routine.
705  */
706 static int au1000_rx(struct net_device *dev)
707 {
708 	struct au1000_private *aup = netdev_priv(dev);
709 	struct sk_buff *skb;
710 	struct rx_dma *prxd;
711 	u32 buff_stat, status;
712 	struct db_dest *pDB;
713 	u32	frmlen;
714 
715 	netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
716 
717 	prxd = aup->rx_dma_ring[aup->rx_head];
718 	buff_stat = prxd->buff_stat;
719 	while (buff_stat & RX_T_DONE)  {
720 		status = prxd->status;
721 		pDB = aup->rx_db_inuse[aup->rx_head];
722 		au1000_update_rx_stats(dev, status);
723 		if (!(status & RX_ERROR))  {
724 
725 			/* good frame */
726 			frmlen = (status & RX_FRAME_LEN_MASK);
727 			frmlen -= 4; /* Remove FCS */
728 			skb = netdev_alloc_skb(dev, frmlen + 2);
729 			if (skb == NULL) {
730 				netdev_err(dev, "Memory squeeze, dropping packet.\n");
731 				dev->stats.rx_dropped++;
732 				continue;
733 			}
734 			skb_reserve(skb, 2);	/* 16 byte IP header align */
735 			skb_copy_to_linear_data(skb,
736 				(unsigned char *)pDB->vaddr, frmlen);
737 			skb_put(skb, frmlen);
738 			skb->protocol = eth_type_trans(skb, dev);
739 			netif_rx(skb);	/* pass the packet to upper layers */
740 		} else {
741 			if (au1000_debug > 4) {
742 				pr_err("rx_error(s):");
743 				if (status & RX_MISSED_FRAME)
744 					pr_cont(" miss");
745 				if (status & RX_WDOG_TIMER)
746 					pr_cont(" wdog");
747 				if (status & RX_RUNT)
748 					pr_cont(" runt");
749 				if (status & RX_OVERLEN)
750 					pr_cont(" overlen");
751 				if (status & RX_COLL)
752 					pr_cont(" coll");
753 				if (status & RX_MII_ERROR)
754 					pr_cont(" mii error");
755 				if (status & RX_CRC_ERROR)
756 					pr_cont(" crc error");
757 				if (status & RX_LEN_ERROR)
758 					pr_cont(" len error");
759 				if (status & RX_U_CNTRL_FRAME)
760 					pr_cont(" u control frame");
761 				pr_cont("\n");
762 			}
763 		}
764 		prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
765 		aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
766 		au_sync();
767 
768 		/* next descriptor */
769 		prxd = aup->rx_dma_ring[aup->rx_head];
770 		buff_stat = prxd->buff_stat;
771 	}
772 	return 0;
773 }
774 
775 static void au1000_update_tx_stats(struct net_device *dev, u32 status)
776 {
777 	struct au1000_private *aup = netdev_priv(dev);
778 	struct net_device_stats *ps = &dev->stats;
779 
780 	if (status & TX_FRAME_ABORTED) {
781 		if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
782 			if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
783 				/* any other tx errors are only valid
784 				 * in half duplex mode
785 				 */
786 				ps->tx_errors++;
787 				ps->tx_aborted_errors++;
788 			}
789 		} else {
790 			ps->tx_errors++;
791 			ps->tx_aborted_errors++;
792 			if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
793 				ps->tx_carrier_errors++;
794 		}
795 	}
796 }
797 
798 /*
799  * Called from the interrupt service routine to acknowledge
800  * the TX DONE bits.  This is a must if the irq is setup as
801  * edge triggered.
802  */
803 static void au1000_tx_ack(struct net_device *dev)
804 {
805 	struct au1000_private *aup = netdev_priv(dev);
806 	struct tx_dma *ptxd;
807 
808 	ptxd = aup->tx_dma_ring[aup->tx_tail];
809 
810 	while (ptxd->buff_stat & TX_T_DONE) {
811 		au1000_update_tx_stats(dev, ptxd->status);
812 		ptxd->buff_stat &= ~TX_T_DONE;
813 		ptxd->len = 0;
814 		au_sync();
815 
816 		aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
817 		ptxd = aup->tx_dma_ring[aup->tx_tail];
818 
819 		if (aup->tx_full) {
820 			aup->tx_full = 0;
821 			netif_wake_queue(dev);
822 		}
823 	}
824 }
825 
826 /*
827  * Au1000 interrupt service routine.
828  */
829 static irqreturn_t au1000_interrupt(int irq, void *dev_id)
830 {
831 	struct net_device *dev = dev_id;
832 
833 	/* Handle RX interrupts first to minimize chance of overrun */
834 
835 	au1000_rx(dev);
836 	au1000_tx_ack(dev);
837 	return IRQ_RETVAL(1);
838 }
839 
840 static int au1000_open(struct net_device *dev)
841 {
842 	int retval;
843 	struct au1000_private *aup = netdev_priv(dev);
844 
845 	netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
846 
847 	retval = request_irq(dev->irq, au1000_interrupt, 0,
848 					dev->name, dev);
849 	if (retval) {
850 		netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
851 		return retval;
852 	}
853 
854 	retval = au1000_init(dev);
855 	if (retval) {
856 		netdev_err(dev, "error in au1000_init\n");
857 		free_irq(dev->irq, dev);
858 		return retval;
859 	}
860 
861 	if (aup->phy_dev) {
862 		/* cause the PHY state machine to schedule a link state check */
863 		aup->phy_dev->state = PHY_CHANGELINK;
864 		phy_start(aup->phy_dev);
865 	}
866 
867 	netif_start_queue(dev);
868 
869 	netif_dbg(aup, drv, dev, "open: Initialization done.\n");
870 
871 	return 0;
872 }
873 
874 static int au1000_close(struct net_device *dev)
875 {
876 	unsigned long flags;
877 	struct au1000_private *const aup = netdev_priv(dev);
878 
879 	netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
880 
881 	if (aup->phy_dev)
882 		phy_stop(aup->phy_dev);
883 
884 	spin_lock_irqsave(&aup->lock, flags);
885 
886 	au1000_reset_mac_unlocked(dev);
887 
888 	/* stop the device */
889 	netif_stop_queue(dev);
890 
891 	/* disable the interrupt */
892 	free_irq(dev->irq, dev);
893 	spin_unlock_irqrestore(&aup->lock, flags);
894 
895 	return 0;
896 }
897 
898 /*
899  * Au1000 transmit routine.
900  */
901 static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
902 {
903 	struct au1000_private *aup = netdev_priv(dev);
904 	struct net_device_stats *ps = &dev->stats;
905 	struct tx_dma *ptxd;
906 	u32 buff_stat;
907 	struct db_dest *pDB;
908 	int i;
909 
910 	netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
911 				(unsigned)aup, skb->len,
912 				skb->data, aup->tx_head);
913 
914 	ptxd = aup->tx_dma_ring[aup->tx_head];
915 	buff_stat = ptxd->buff_stat;
916 	if (buff_stat & TX_DMA_ENABLE) {
917 		/* We've wrapped around and the transmitter is still busy */
918 		netif_stop_queue(dev);
919 		aup->tx_full = 1;
920 		return NETDEV_TX_BUSY;
921 	} else if (buff_stat & TX_T_DONE) {
922 		au1000_update_tx_stats(dev, ptxd->status);
923 		ptxd->len = 0;
924 	}
925 
926 	if (aup->tx_full) {
927 		aup->tx_full = 0;
928 		netif_wake_queue(dev);
929 	}
930 
931 	pDB = aup->tx_db_inuse[aup->tx_head];
932 	skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
933 	if (skb->len < ETH_ZLEN) {
934 		for (i = skb->len; i < ETH_ZLEN; i++)
935 			((char *)pDB->vaddr)[i] = 0;
936 
937 		ptxd->len = ETH_ZLEN;
938 	} else
939 		ptxd->len = skb->len;
940 
941 	ps->tx_packets++;
942 	ps->tx_bytes += ptxd->len;
943 
944 	ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
945 	au_sync();
946 	dev_kfree_skb(skb);
947 	aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
948 	return NETDEV_TX_OK;
949 }
950 
951 /*
952  * The Tx ring has been full longer than the watchdog timeout
953  * value. The transmitter must be hung?
954  */
955 static void au1000_tx_timeout(struct net_device *dev)
956 {
957 	netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
958 	au1000_reset_mac(dev);
959 	au1000_init(dev);
960 	dev->trans_start = jiffies; /* prevent tx timeout */
961 	netif_wake_queue(dev);
962 }
963 
964 static void au1000_multicast_list(struct net_device *dev)
965 {
966 	struct au1000_private *aup = netdev_priv(dev);
967 	u32 reg;
968 
969 	netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
970 	reg = readl(&aup->mac->control);
971 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
972 		reg |= MAC_PROMISCUOUS;
973 	} else if ((dev->flags & IFF_ALLMULTI)  ||
974 			   netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
975 		reg |= MAC_PASS_ALL_MULTI;
976 		reg &= ~MAC_PROMISCUOUS;
977 		netdev_info(dev, "Pass all multicast\n");
978 	} else {
979 		struct netdev_hw_addr *ha;
980 		u32 mc_filter[2];	/* Multicast hash filter */
981 
982 		mc_filter[1] = mc_filter[0] = 0;
983 		netdev_for_each_mc_addr(ha, dev)
984 			set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
985 					(long *)mc_filter);
986 		writel(mc_filter[1], &aup->mac->multi_hash_high);
987 		writel(mc_filter[0], &aup->mac->multi_hash_low);
988 		reg &= ~MAC_PROMISCUOUS;
989 		reg |= MAC_HASH_MODE;
990 	}
991 	writel(reg, &aup->mac->control);
992 }
993 
994 static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
995 {
996 	struct au1000_private *aup = netdev_priv(dev);
997 
998 	if (!netif_running(dev))
999 		return -EINVAL;
1000 
1001 	if (!aup->phy_dev)
1002 		return -EINVAL; /* PHY not controllable */
1003 
1004 	return phy_mii_ioctl(aup->phy_dev, rq, cmd);
1005 }
1006 
1007 static const struct net_device_ops au1000_netdev_ops = {
1008 	.ndo_open		= au1000_open,
1009 	.ndo_stop		= au1000_close,
1010 	.ndo_start_xmit		= au1000_tx,
1011 	.ndo_set_rx_mode	= au1000_multicast_list,
1012 	.ndo_do_ioctl		= au1000_ioctl,
1013 	.ndo_tx_timeout		= au1000_tx_timeout,
1014 	.ndo_set_mac_address	= eth_mac_addr,
1015 	.ndo_validate_addr	= eth_validate_addr,
1016 	.ndo_change_mtu		= eth_change_mtu,
1017 };
1018 
1019 static int au1000_probe(struct platform_device *pdev)
1020 {
1021 	static unsigned version_printed;
1022 	struct au1000_private *aup = NULL;
1023 	struct au1000_eth_platform_data *pd;
1024 	struct net_device *dev = NULL;
1025 	struct db_dest *pDB, *pDBfree;
1026 	int irq, i, err = 0;
1027 	struct resource *base, *macen, *macdma;
1028 
1029 	base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1030 	if (!base) {
1031 		dev_err(&pdev->dev, "failed to retrieve base register\n");
1032 		err = -ENODEV;
1033 		goto out;
1034 	}
1035 
1036 	macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1037 	if (!macen) {
1038 		dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1039 		err = -ENODEV;
1040 		goto out;
1041 	}
1042 
1043 	irq = platform_get_irq(pdev, 0);
1044 	if (irq < 0) {
1045 		dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1046 		err = -ENODEV;
1047 		goto out;
1048 	}
1049 
1050 	macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1051 	if (!macdma) {
1052 		dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1053 		err = -ENODEV;
1054 		goto out;
1055 	}
1056 
1057 	if (!request_mem_region(base->start, resource_size(base),
1058 							pdev->name)) {
1059 		dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1060 		err = -ENXIO;
1061 		goto out;
1062 	}
1063 
1064 	if (!request_mem_region(macen->start, resource_size(macen),
1065 							pdev->name)) {
1066 		dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1067 		err = -ENXIO;
1068 		goto err_request;
1069 	}
1070 
1071 	if (!request_mem_region(macdma->start, resource_size(macdma),
1072 							pdev->name)) {
1073 		dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1074 		err = -ENXIO;
1075 		goto err_macdma;
1076 	}
1077 
1078 	dev = alloc_etherdev(sizeof(struct au1000_private));
1079 	if (!dev) {
1080 		err = -ENOMEM;
1081 		goto err_alloc;
1082 	}
1083 
1084 	SET_NETDEV_DEV(dev, &pdev->dev);
1085 	platform_set_drvdata(pdev, dev);
1086 	aup = netdev_priv(dev);
1087 
1088 	spin_lock_init(&aup->lock);
1089 	aup->msg_enable = (au1000_debug < 4 ?
1090 				AU1000_DEF_MSG_ENABLE : au1000_debug);
1091 
1092 	/* Allocate the data buffers
1093 	 * Snooping works fine with eth on all au1xxx
1094 	 */
1095 	aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1096 						(NUM_TX_BUFFS + NUM_RX_BUFFS),
1097 						&aup->dma_addr,	0);
1098 	if (!aup->vaddr) {
1099 		dev_err(&pdev->dev, "failed to allocate data buffers\n");
1100 		err = -ENOMEM;
1101 		goto err_vaddr;
1102 	}
1103 
1104 	/* aup->mac is the base address of the MAC's registers */
1105 	aup->mac = (struct mac_reg *)
1106 			ioremap_nocache(base->start, resource_size(base));
1107 	if (!aup->mac) {
1108 		dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1109 		err = -ENXIO;
1110 		goto err_remap1;
1111 	}
1112 
1113 	/* Setup some variables for quick register address access */
1114 	aup->enable = (u32 *)ioremap_nocache(macen->start,
1115 						resource_size(macen));
1116 	if (!aup->enable) {
1117 		dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1118 		err = -ENXIO;
1119 		goto err_remap2;
1120 	}
1121 	aup->mac_id = pdev->id;
1122 
1123 	aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1124 	if (!aup->macdma) {
1125 		dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1126 		err = -ENXIO;
1127 		goto err_remap3;
1128 	}
1129 
1130 	au1000_setup_hw_rings(aup, aup->macdma);
1131 
1132 	writel(0, aup->enable);
1133 	aup->mac_enabled = 0;
1134 
1135 	pd = pdev->dev.platform_data;
1136 	if (!pd) {
1137 		dev_info(&pdev->dev, "no platform_data passed,"
1138 					" PHY search on MAC0\n");
1139 		aup->phy1_search_mac0 = 1;
1140 	} else {
1141 		if (is_valid_ether_addr(pd->mac)) {
1142 			memcpy(dev->dev_addr, pd->mac, 6);
1143 		} else {
1144 			/* Set a random MAC since no valid provided by platform_data. */
1145 			eth_hw_addr_random(dev);
1146 		}
1147 
1148 		aup->phy_static_config = pd->phy_static_config;
1149 		aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1150 		aup->phy1_search_mac0 = pd->phy1_search_mac0;
1151 		aup->phy_addr = pd->phy_addr;
1152 		aup->phy_busid = pd->phy_busid;
1153 		aup->phy_irq = pd->phy_irq;
1154 	}
1155 
1156 	if (aup->phy_busid && aup->phy_busid > 0) {
1157 		dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1158 		err = -ENODEV;
1159 		goto err_mdiobus_alloc;
1160 	}
1161 
1162 	aup->mii_bus = mdiobus_alloc();
1163 	if (aup->mii_bus == NULL) {
1164 		dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1165 		err = -ENOMEM;
1166 		goto err_mdiobus_alloc;
1167 	}
1168 
1169 	aup->mii_bus->priv = dev;
1170 	aup->mii_bus->read = au1000_mdiobus_read;
1171 	aup->mii_bus->write = au1000_mdiobus_write;
1172 	aup->mii_bus->reset = au1000_mdiobus_reset;
1173 	aup->mii_bus->name = "au1000_eth_mii";
1174 	snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1175 		pdev->name, aup->mac_id);
1176 	aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1177 	if (aup->mii_bus->irq == NULL) {
1178 		err = -ENOMEM;
1179 		goto err_out;
1180 	}
1181 
1182 	for (i = 0; i < PHY_MAX_ADDR; ++i)
1183 		aup->mii_bus->irq[i] = PHY_POLL;
1184 	/* if known, set corresponding PHY IRQs */
1185 	if (aup->phy_static_config)
1186 		if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1187 			aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1188 
1189 	err = mdiobus_register(aup->mii_bus);
1190 	if (err) {
1191 		dev_err(&pdev->dev, "failed to register MDIO bus\n");
1192 		goto err_mdiobus_reg;
1193 	}
1194 
1195 	err = au1000_mii_probe(dev);
1196 	if (err != 0)
1197 		goto err_out;
1198 
1199 	pDBfree = NULL;
1200 	/* setup the data buffer descriptors and attach a buffer to each one */
1201 	pDB = aup->db;
1202 	for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1203 		pDB->pnext = pDBfree;
1204 		pDBfree = pDB;
1205 		pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1206 		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1207 		pDB++;
1208 	}
1209 	aup->pDBfree = pDBfree;
1210 
1211 	err = -ENODEV;
1212 	for (i = 0; i < NUM_RX_DMA; i++) {
1213 		pDB = au1000_GetFreeDB(aup);
1214 		if (!pDB)
1215 			goto err_out;
1216 
1217 		aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1218 		aup->rx_db_inuse[i] = pDB;
1219 	}
1220 
1221 	err = -ENODEV;
1222 	for (i = 0; i < NUM_TX_DMA; i++) {
1223 		pDB = au1000_GetFreeDB(aup);
1224 		if (!pDB)
1225 			goto err_out;
1226 
1227 		aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1228 		aup->tx_dma_ring[i]->len = 0;
1229 		aup->tx_db_inuse[i] = pDB;
1230 	}
1231 
1232 	dev->base_addr = base->start;
1233 	dev->irq = irq;
1234 	dev->netdev_ops = &au1000_netdev_ops;
1235 	SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1236 	dev->watchdog_timeo = ETH_TX_TIMEOUT;
1237 
1238 	/*
1239 	 * The boot code uses the ethernet controller, so reset it to start
1240 	 * fresh.  au1000_init() expects that the device is in reset state.
1241 	 */
1242 	au1000_reset_mac(dev);
1243 
1244 	err = register_netdev(dev);
1245 	if (err) {
1246 		netdev_err(dev, "Cannot register net device, aborting.\n");
1247 		goto err_out;
1248 	}
1249 
1250 	netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1251 			(unsigned long)base->start, irq);
1252 	if (version_printed++ == 0)
1253 		pr_info("%s version %s %s\n",
1254 					DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1255 
1256 	return 0;
1257 
1258 err_out:
1259 	if (aup->mii_bus != NULL)
1260 		mdiobus_unregister(aup->mii_bus);
1261 
1262 	/* here we should have a valid dev plus aup-> register addresses
1263 	 * so we can reset the mac properly.
1264 	 */
1265 	au1000_reset_mac(dev);
1266 
1267 	for (i = 0; i < NUM_RX_DMA; i++) {
1268 		if (aup->rx_db_inuse[i])
1269 			au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1270 	}
1271 	for (i = 0; i < NUM_TX_DMA; i++) {
1272 		if (aup->tx_db_inuse[i])
1273 			au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1274 	}
1275 err_mdiobus_reg:
1276 	mdiobus_free(aup->mii_bus);
1277 err_mdiobus_alloc:
1278 	iounmap(aup->macdma);
1279 err_remap3:
1280 	iounmap(aup->enable);
1281 err_remap2:
1282 	iounmap(aup->mac);
1283 err_remap1:
1284 	dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1285 			     (void *)aup->vaddr, aup->dma_addr);
1286 err_vaddr:
1287 	free_netdev(dev);
1288 err_alloc:
1289 	release_mem_region(macdma->start, resource_size(macdma));
1290 err_macdma:
1291 	release_mem_region(macen->start, resource_size(macen));
1292 err_request:
1293 	release_mem_region(base->start, resource_size(base));
1294 out:
1295 	return err;
1296 }
1297 
1298 static int au1000_remove(struct platform_device *pdev)
1299 {
1300 	struct net_device *dev = platform_get_drvdata(pdev);
1301 	struct au1000_private *aup = netdev_priv(dev);
1302 	int i;
1303 	struct resource *base, *macen;
1304 
1305 	platform_set_drvdata(pdev, NULL);
1306 
1307 	unregister_netdev(dev);
1308 	mdiobus_unregister(aup->mii_bus);
1309 	mdiobus_free(aup->mii_bus);
1310 
1311 	for (i = 0; i < NUM_RX_DMA; i++)
1312 		if (aup->rx_db_inuse[i])
1313 			au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1314 
1315 	for (i = 0; i < NUM_TX_DMA; i++)
1316 		if (aup->tx_db_inuse[i])
1317 			au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1318 
1319 	dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1320 			(NUM_TX_BUFFS + NUM_RX_BUFFS),
1321 			(void *)aup->vaddr, aup->dma_addr);
1322 
1323 	iounmap(aup->macdma);
1324 	iounmap(aup->mac);
1325 	iounmap(aup->enable);
1326 
1327 	base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1328 	release_mem_region(base->start, resource_size(base));
1329 
1330 	base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331 	release_mem_region(base->start, resource_size(base));
1332 
1333 	macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1334 	release_mem_region(macen->start, resource_size(macen));
1335 
1336 	free_netdev(dev);
1337 
1338 	return 0;
1339 }
1340 
1341 static struct platform_driver au1000_eth_driver = {
1342 	.probe  = au1000_probe,
1343 	.remove = au1000_remove,
1344 	.driver = {
1345 		.name   = "au1000-eth",
1346 		.owner  = THIS_MODULE,
1347 	},
1348 };
1349 
1350 module_platform_driver(au1000_eth_driver);
1351 
1352 MODULE_ALIAS("platform:au1000-eth");
1353