xref: /linux/drivers/net/ethernet/davicom/dm9000.c (revision 4343f61103cdb8ccd6f3d5dd7168f1176a1cee37)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Davicom DM9000 Fast Ethernet driver for Linux.
4  * 	Copyright (C) 1997  Sten Wang
5  *
6  * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
7  *
8  * Additional updates, Copyright:
9  *	Ben Dooks <ben@simtec.co.uk>
10  *	Sascha Hauer <s.hauer@pengutronix.de>
11  */
12 
13 #include <linux/module.h>
14 #include <linux/ioport.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/mii.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/ethtool.h>
25 #include <linux/dm9000.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33 
34 #include <asm/delay.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 
38 #include "dm9000.h"
39 
40 /* Board/System/Debug information/definition ---------------- */
41 
42 #define DM9000_PHY		0x40	/* PHY address 0x01 */
43 
44 #define CARDNAME	"dm9000"
45 #define DRV_VERSION	"1.31"
46 
47 /*
48  * Transmit timeout, default 5 seconds.
49  */
50 static int watchdog = 5000;
51 module_param(watchdog, int, 0400);
52 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
53 
54 /*
55  * Debug messages level
56  */
57 static int debug;
58 module_param(debug, int, 0644);
59 MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
60 
61 /* DM9000 register address locking.
62  *
63  * The DM9000 uses an address register to control where data written
64  * to the data register goes. This means that the address register
65  * must be preserved over interrupts or similar calls.
66  *
67  * During interrupt and other critical calls, a spinlock is used to
68  * protect the system, but the calls themselves save the address
69  * in the address register in case they are interrupting another
70  * access to the device.
71  *
72  * For general accesses a lock is provided so that calls which are
73  * allowed to sleep are serialised so that the address register does
74  * not need to be saved. This lock also serves to serialise access
75  * to the EEPROM and PHY access registers which are shared between
76  * these two devices.
77  */
78 
79 /* The driver supports the original DM9000E, and now the two newer
80  * devices, DM9000A and DM9000B.
81  */
82 
83 enum dm9000_type {
84 	TYPE_DM9000E,	/* original DM9000 */
85 	TYPE_DM9000A,
86 	TYPE_DM9000B
87 };
88 
89 /* Structure/enum declaration ------------------------------- */
90 struct board_info {
91 
92 	void __iomem	*io_addr;	/* Register I/O base address */
93 	void __iomem	*io_data;	/* Data I/O address */
94 	u16		 irq;		/* IRQ */
95 
96 	u16		tx_pkt_cnt;
97 	u16		queue_pkt_len;
98 	u16		queue_start_addr;
99 	u16		queue_ip_summed;
100 	u16		dbug_cnt;
101 	u8		io_mode;		/* 0:word, 2:byte */
102 	u8		phy_addr;
103 	u8		imr_all;
104 
105 	unsigned int	flags;
106 	unsigned int	in_timeout:1;
107 	unsigned int	in_suspend:1;
108 	unsigned int	wake_supported:1;
109 
110 	enum dm9000_type type;
111 
112 	void (*inblk)(void __iomem *port, void *data, int length);
113 	void (*outblk)(void __iomem *port, void *data, int length);
114 	void (*dumpblk)(void __iomem *port, int length);
115 
116 	struct device	*dev;	     /* parent device */
117 
118 	struct resource	*addr_res;   /* resources found */
119 	struct resource *data_res;
120 	struct resource	*addr_req;   /* resources requested */
121 	struct resource *data_req;
122 
123 	int		 irq_wake;
124 
125 	struct mutex	 addr_lock;	/* phy and eeprom access lock */
126 
127 	struct delayed_work phy_poll;
128 	struct net_device  *ndev;
129 
130 	spinlock_t	lock;
131 
132 	struct mii_if_info mii;
133 	u32		msg_enable;
134 	u32		wake_state;
135 
136 	int		ip_summed;
137 };
138 
139 /* debug code */
140 
141 #define dm9000_dbg(db, lev, msg...) do {		\
142 	if ((lev) < debug) {				\
143 		dev_dbg(db->dev, msg);			\
144 	}						\
145 } while (0)
146 
147 static inline struct board_info *to_dm9000_board(struct net_device *dev)
148 {
149 	return netdev_priv(dev);
150 }
151 
152 /* DM9000 network board routine ---------------------------- */
153 
154 /*
155  *   Read a byte from I/O port
156  */
157 static u8
158 ior(struct board_info *db, int reg)
159 {
160 	writeb(reg, db->io_addr);
161 	return readb(db->io_data);
162 }
163 
164 /*
165  *   Write a byte to I/O port
166  */
167 
168 static void
169 iow(struct board_info *db, int reg, int value)
170 {
171 	writeb(reg, db->io_addr);
172 	writeb(value, db->io_data);
173 }
174 
175 static void
176 dm9000_reset(struct board_info *db)
177 {
178 	dev_dbg(db->dev, "resetting device\n");
179 
180 	/* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
181 	 * The essential point is that we have to do a double reset, and the
182 	 * instruction is to set LBK into MAC internal loopback mode.
183 	 */
184 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
185 	udelay(100); /* Application note says at least 20 us */
186 	if (ior(db, DM9000_NCR) & 1)
187 		dev_err(db->dev, "dm9000 did not respond to first reset\n");
188 
189 	iow(db, DM9000_NCR, 0);
190 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
191 	udelay(100);
192 	if (ior(db, DM9000_NCR) & 1)
193 		dev_err(db->dev, "dm9000 did not respond to second reset\n");
194 }
195 
196 /* routines for sending block to chip */
197 
198 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
199 {
200 	iowrite8_rep(reg, data, count);
201 }
202 
203 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
204 {
205 	iowrite16_rep(reg, data, (count+1) >> 1);
206 }
207 
208 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
209 {
210 	iowrite32_rep(reg, data, (count+3) >> 2);
211 }
212 
213 /* input block from chip to memory */
214 
215 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
216 {
217 	ioread8_rep(reg, data, count);
218 }
219 
220 
221 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
222 {
223 	ioread16_rep(reg, data, (count+1) >> 1);
224 }
225 
226 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
227 {
228 	ioread32_rep(reg, data, (count+3) >> 2);
229 }
230 
231 /* dump block from chip to null */
232 
233 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
234 {
235 	int i;
236 	int tmp;
237 
238 	for (i = 0; i < count; i++)
239 		tmp = readb(reg);
240 }
241 
242 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
243 {
244 	int i;
245 	int tmp;
246 
247 	count = (count + 1) >> 1;
248 
249 	for (i = 0; i < count; i++)
250 		tmp = readw(reg);
251 }
252 
253 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
254 {
255 	int i;
256 	int tmp;
257 
258 	count = (count + 3) >> 2;
259 
260 	for (i = 0; i < count; i++)
261 		tmp = readl(reg);
262 }
263 
264 /*
265  * Sleep, either by using msleep() or if we are suspending, then
266  * use mdelay() to sleep.
267  */
268 static void dm9000_msleep(struct board_info *db, unsigned int ms)
269 {
270 	if (db->in_suspend || db->in_timeout)
271 		mdelay(ms);
272 	else
273 		msleep(ms);
274 }
275 
276 /* Read a word from phyxcer */
277 static int
278 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
279 {
280 	struct board_info *db = netdev_priv(dev);
281 	unsigned long flags;
282 	unsigned int reg_save;
283 	int ret;
284 
285 	mutex_lock(&db->addr_lock);
286 
287 	spin_lock_irqsave(&db->lock, flags);
288 
289 	/* Save previous register address */
290 	reg_save = readb(db->io_addr);
291 
292 	/* Fill the phyxcer register into REG_0C */
293 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
294 
295 	/* Issue phyxcer read command */
296 	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
297 
298 	writeb(reg_save, db->io_addr);
299 	spin_unlock_irqrestore(&db->lock, flags);
300 
301 	dm9000_msleep(db, 1);		/* Wait read complete */
302 
303 	spin_lock_irqsave(&db->lock, flags);
304 	reg_save = readb(db->io_addr);
305 
306 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */
307 
308 	/* The read data keeps on REG_0D & REG_0E */
309 	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
310 
311 	/* restore the previous address */
312 	writeb(reg_save, db->io_addr);
313 	spin_unlock_irqrestore(&db->lock, flags);
314 
315 	mutex_unlock(&db->addr_lock);
316 
317 	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
318 	return ret;
319 }
320 
321 /* Write a word to phyxcer */
322 static void
323 dm9000_phy_write(struct net_device *dev,
324 		 int phyaddr_unused, int reg, int value)
325 {
326 	struct board_info *db = netdev_priv(dev);
327 	unsigned long flags;
328 	unsigned long reg_save;
329 
330 	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
331 	if (!db->in_timeout)
332 		mutex_lock(&db->addr_lock);
333 
334 	spin_lock_irqsave(&db->lock, flags);
335 
336 	/* Save previous register address */
337 	reg_save = readb(db->io_addr);
338 
339 	/* Fill the phyxcer register into REG_0C */
340 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
341 
342 	/* Fill the written data into REG_0D & REG_0E */
343 	iow(db, DM9000_EPDRL, value);
344 	iow(db, DM9000_EPDRH, value >> 8);
345 
346 	/* Issue phyxcer write command */
347 	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
348 
349 	writeb(reg_save, db->io_addr);
350 	spin_unlock_irqrestore(&db->lock, flags);
351 
352 	dm9000_msleep(db, 1);		/* Wait write complete */
353 
354 	spin_lock_irqsave(&db->lock, flags);
355 	reg_save = readb(db->io_addr);
356 
357 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */
358 
359 	/* restore the previous address */
360 	writeb(reg_save, db->io_addr);
361 
362 	spin_unlock_irqrestore(&db->lock, flags);
363 	if (!db->in_timeout)
364 		mutex_unlock(&db->addr_lock);
365 }
366 
367 /* dm9000_set_io
368  *
369  * select the specified set of io routines to use with the
370  * device
371  */
372 
373 static void dm9000_set_io(struct board_info *db, int byte_width)
374 {
375 	/* use the size of the data resource to work out what IO
376 	 * routines we want to use
377 	 */
378 
379 	switch (byte_width) {
380 	case 1:
381 		db->dumpblk = dm9000_dumpblk_8bit;
382 		db->outblk  = dm9000_outblk_8bit;
383 		db->inblk   = dm9000_inblk_8bit;
384 		break;
385 
386 
387 	case 3:
388 		dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
389 		/* fall through */
390 	case 2:
391 		db->dumpblk = dm9000_dumpblk_16bit;
392 		db->outblk  = dm9000_outblk_16bit;
393 		db->inblk   = dm9000_inblk_16bit;
394 		break;
395 
396 	case 4:
397 	default:
398 		db->dumpblk = dm9000_dumpblk_32bit;
399 		db->outblk  = dm9000_outblk_32bit;
400 		db->inblk   = dm9000_inblk_32bit;
401 		break;
402 	}
403 }
404 
405 static void dm9000_schedule_poll(struct board_info *db)
406 {
407 	if (db->type == TYPE_DM9000E)
408 		schedule_delayed_work(&db->phy_poll, HZ * 2);
409 }
410 
411 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
412 {
413 	struct board_info *dm = to_dm9000_board(dev);
414 
415 	if (!netif_running(dev))
416 		return -EINVAL;
417 
418 	return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
419 }
420 
421 static unsigned int
422 dm9000_read_locked(struct board_info *db, int reg)
423 {
424 	unsigned long flags;
425 	unsigned int ret;
426 
427 	spin_lock_irqsave(&db->lock, flags);
428 	ret = ior(db, reg);
429 	spin_unlock_irqrestore(&db->lock, flags);
430 
431 	return ret;
432 }
433 
434 static int dm9000_wait_eeprom(struct board_info *db)
435 {
436 	unsigned int status;
437 	int timeout = 8;	/* wait max 8msec */
438 
439 	/* The DM9000 data sheets say we should be able to
440 	 * poll the ERRE bit in EPCR to wait for the EEPROM
441 	 * operation. From testing several chips, this bit
442 	 * does not seem to work.
443 	 *
444 	 * We attempt to use the bit, but fall back to the
445 	 * timeout (which is why we do not return an error
446 	 * on expiry) to say that the EEPROM operation has
447 	 * completed.
448 	 */
449 
450 	while (1) {
451 		status = dm9000_read_locked(db, DM9000_EPCR);
452 
453 		if ((status & EPCR_ERRE) == 0)
454 			break;
455 
456 		msleep(1);
457 
458 		if (timeout-- < 0) {
459 			dev_dbg(db->dev, "timeout waiting EEPROM\n");
460 			break;
461 		}
462 	}
463 
464 	return 0;
465 }
466 
467 /*
468  *  Read a word data from EEPROM
469  */
470 static void
471 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
472 {
473 	unsigned long flags;
474 
475 	if (db->flags & DM9000_PLATF_NO_EEPROM) {
476 		to[0] = 0xff;
477 		to[1] = 0xff;
478 		return;
479 	}
480 
481 	mutex_lock(&db->addr_lock);
482 
483 	spin_lock_irqsave(&db->lock, flags);
484 
485 	iow(db, DM9000_EPAR, offset);
486 	iow(db, DM9000_EPCR, EPCR_ERPRR);
487 
488 	spin_unlock_irqrestore(&db->lock, flags);
489 
490 	dm9000_wait_eeprom(db);
491 
492 	/* delay for at-least 150uS */
493 	msleep(1);
494 
495 	spin_lock_irqsave(&db->lock, flags);
496 
497 	iow(db, DM9000_EPCR, 0x0);
498 
499 	to[0] = ior(db, DM9000_EPDRL);
500 	to[1] = ior(db, DM9000_EPDRH);
501 
502 	spin_unlock_irqrestore(&db->lock, flags);
503 
504 	mutex_unlock(&db->addr_lock);
505 }
506 
507 /*
508  * Write a word data to SROM
509  */
510 static void
511 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
512 {
513 	unsigned long flags;
514 
515 	if (db->flags & DM9000_PLATF_NO_EEPROM)
516 		return;
517 
518 	mutex_lock(&db->addr_lock);
519 
520 	spin_lock_irqsave(&db->lock, flags);
521 	iow(db, DM9000_EPAR, offset);
522 	iow(db, DM9000_EPDRH, data[1]);
523 	iow(db, DM9000_EPDRL, data[0]);
524 	iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
525 	spin_unlock_irqrestore(&db->lock, flags);
526 
527 	dm9000_wait_eeprom(db);
528 
529 	mdelay(1);	/* wait at least 150uS to clear */
530 
531 	spin_lock_irqsave(&db->lock, flags);
532 	iow(db, DM9000_EPCR, 0);
533 	spin_unlock_irqrestore(&db->lock, flags);
534 
535 	mutex_unlock(&db->addr_lock);
536 }
537 
538 /* ethtool ops */
539 
540 static void dm9000_get_drvinfo(struct net_device *dev,
541 			       struct ethtool_drvinfo *info)
542 {
543 	struct board_info *dm = to_dm9000_board(dev);
544 
545 	strlcpy(info->driver, CARDNAME, sizeof(info->driver));
546 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
547 	strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
548 		sizeof(info->bus_info));
549 }
550 
551 static u32 dm9000_get_msglevel(struct net_device *dev)
552 {
553 	struct board_info *dm = to_dm9000_board(dev);
554 
555 	return dm->msg_enable;
556 }
557 
558 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
559 {
560 	struct board_info *dm = to_dm9000_board(dev);
561 
562 	dm->msg_enable = value;
563 }
564 
565 static int dm9000_get_link_ksettings(struct net_device *dev,
566 				     struct ethtool_link_ksettings *cmd)
567 {
568 	struct board_info *dm = to_dm9000_board(dev);
569 
570 	mii_ethtool_get_link_ksettings(&dm->mii, cmd);
571 	return 0;
572 }
573 
574 static int dm9000_set_link_ksettings(struct net_device *dev,
575 				     const struct ethtool_link_ksettings *cmd)
576 {
577 	struct board_info *dm = to_dm9000_board(dev);
578 
579 	return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
580 }
581 
582 static int dm9000_nway_reset(struct net_device *dev)
583 {
584 	struct board_info *dm = to_dm9000_board(dev);
585 	return mii_nway_restart(&dm->mii);
586 }
587 
588 static int dm9000_set_features(struct net_device *dev,
589 	netdev_features_t features)
590 {
591 	struct board_info *dm = to_dm9000_board(dev);
592 	netdev_features_t changed = dev->features ^ features;
593 	unsigned long flags;
594 
595 	if (!(changed & NETIF_F_RXCSUM))
596 		return 0;
597 
598 	spin_lock_irqsave(&dm->lock, flags);
599 	iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
600 	spin_unlock_irqrestore(&dm->lock, flags);
601 
602 	return 0;
603 }
604 
605 static u32 dm9000_get_link(struct net_device *dev)
606 {
607 	struct board_info *dm = to_dm9000_board(dev);
608 	u32 ret;
609 
610 	if (dm->flags & DM9000_PLATF_EXT_PHY)
611 		ret = mii_link_ok(&dm->mii);
612 	else
613 		ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
614 
615 	return ret;
616 }
617 
618 #define DM_EEPROM_MAGIC		(0x444D394B)
619 
620 static int dm9000_get_eeprom_len(struct net_device *dev)
621 {
622 	return 128;
623 }
624 
625 static int dm9000_get_eeprom(struct net_device *dev,
626 			     struct ethtool_eeprom *ee, u8 *data)
627 {
628 	struct board_info *dm = to_dm9000_board(dev);
629 	int offset = ee->offset;
630 	int len = ee->len;
631 	int i;
632 
633 	/* EEPROM access is aligned to two bytes */
634 
635 	if ((len & 1) != 0 || (offset & 1) != 0)
636 		return -EINVAL;
637 
638 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
639 		return -ENOENT;
640 
641 	ee->magic = DM_EEPROM_MAGIC;
642 
643 	for (i = 0; i < len; i += 2)
644 		dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
645 
646 	return 0;
647 }
648 
649 static int dm9000_set_eeprom(struct net_device *dev,
650 			     struct ethtool_eeprom *ee, u8 *data)
651 {
652 	struct board_info *dm = to_dm9000_board(dev);
653 	int offset = ee->offset;
654 	int len = ee->len;
655 	int done;
656 
657 	/* EEPROM access is aligned to two bytes */
658 
659 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
660 		return -ENOENT;
661 
662 	if (ee->magic != DM_EEPROM_MAGIC)
663 		return -EINVAL;
664 
665 	while (len > 0) {
666 		if (len & 1 || offset & 1) {
667 			int which = offset & 1;
668 			u8 tmp[2];
669 
670 			dm9000_read_eeprom(dm, offset / 2, tmp);
671 			tmp[which] = *data;
672 			dm9000_write_eeprom(dm, offset / 2, tmp);
673 
674 			done = 1;
675 		} else {
676 			dm9000_write_eeprom(dm, offset / 2, data);
677 			done = 2;
678 		}
679 
680 		data += done;
681 		offset += done;
682 		len -= done;
683 	}
684 
685 	return 0;
686 }
687 
688 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
689 {
690 	struct board_info *dm = to_dm9000_board(dev);
691 
692 	memset(w, 0, sizeof(struct ethtool_wolinfo));
693 
694 	/* note, we could probably support wake-phy too */
695 	w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
696 	w->wolopts = dm->wake_state;
697 }
698 
699 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
700 {
701 	struct board_info *dm = to_dm9000_board(dev);
702 	unsigned long flags;
703 	u32 opts = w->wolopts;
704 	u32 wcr = 0;
705 
706 	if (!dm->wake_supported)
707 		return -EOPNOTSUPP;
708 
709 	if (opts & ~WAKE_MAGIC)
710 		return -EINVAL;
711 
712 	if (opts & WAKE_MAGIC)
713 		wcr |= WCR_MAGICEN;
714 
715 	mutex_lock(&dm->addr_lock);
716 
717 	spin_lock_irqsave(&dm->lock, flags);
718 	iow(dm, DM9000_WCR, wcr);
719 	spin_unlock_irqrestore(&dm->lock, flags);
720 
721 	mutex_unlock(&dm->addr_lock);
722 
723 	if (dm->wake_state != opts) {
724 		/* change in wol state, update IRQ state */
725 
726 		if (!dm->wake_state)
727 			irq_set_irq_wake(dm->irq_wake, 1);
728 		else if (dm->wake_state && !opts)
729 			irq_set_irq_wake(dm->irq_wake, 0);
730 	}
731 
732 	dm->wake_state = opts;
733 	return 0;
734 }
735 
736 static const struct ethtool_ops dm9000_ethtool_ops = {
737 	.get_drvinfo		= dm9000_get_drvinfo,
738 	.get_msglevel		= dm9000_get_msglevel,
739 	.set_msglevel		= dm9000_set_msglevel,
740 	.nway_reset		= dm9000_nway_reset,
741 	.get_link		= dm9000_get_link,
742 	.get_wol		= dm9000_get_wol,
743 	.set_wol		= dm9000_set_wol,
744 	.get_eeprom_len		= dm9000_get_eeprom_len,
745 	.get_eeprom		= dm9000_get_eeprom,
746 	.set_eeprom		= dm9000_set_eeprom,
747 	.get_link_ksettings	= dm9000_get_link_ksettings,
748 	.set_link_ksettings	= dm9000_set_link_ksettings,
749 };
750 
751 static void dm9000_show_carrier(struct board_info *db,
752 				unsigned carrier, unsigned nsr)
753 {
754 	int lpa;
755 	struct net_device *ndev = db->ndev;
756 	struct mii_if_info *mii = &db->mii;
757 	unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
758 
759 	if (carrier) {
760 		lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
761 		dev_info(db->dev,
762 			 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
763 			 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
764 			 (ncr & NCR_FDX) ? "full" : "half", lpa);
765 	} else {
766 		dev_info(db->dev, "%s: link down\n", ndev->name);
767 	}
768 }
769 
770 static void
771 dm9000_poll_work(struct work_struct *w)
772 {
773 	struct delayed_work *dw = to_delayed_work(w);
774 	struct board_info *db = container_of(dw, struct board_info, phy_poll);
775 	struct net_device *ndev = db->ndev;
776 
777 	if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
778 	    !(db->flags & DM9000_PLATF_EXT_PHY)) {
779 		unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
780 		unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
781 		unsigned new_carrier;
782 
783 		new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
784 
785 		if (old_carrier != new_carrier) {
786 			if (netif_msg_link(db))
787 				dm9000_show_carrier(db, new_carrier, nsr);
788 
789 			if (!new_carrier)
790 				netif_carrier_off(ndev);
791 			else
792 				netif_carrier_on(ndev);
793 		}
794 	} else
795 		mii_check_media(&db->mii, netif_msg_link(db), 0);
796 
797 	if (netif_running(ndev))
798 		dm9000_schedule_poll(db);
799 }
800 
801 /* dm9000_release_board
802  *
803  * release a board, and any mapped resources
804  */
805 
806 static void
807 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
808 {
809 	/* unmap our resources */
810 
811 	iounmap(db->io_addr);
812 	iounmap(db->io_data);
813 
814 	/* release the resources */
815 
816 	if (db->data_req)
817 		release_resource(db->data_req);
818 	kfree(db->data_req);
819 
820 	if (db->addr_req)
821 		release_resource(db->addr_req);
822 	kfree(db->addr_req);
823 }
824 
825 static unsigned char dm9000_type_to_char(enum dm9000_type type)
826 {
827 	switch (type) {
828 	case TYPE_DM9000E: return 'e';
829 	case TYPE_DM9000A: return 'a';
830 	case TYPE_DM9000B: return 'b';
831 	}
832 
833 	return '?';
834 }
835 
836 /*
837  *  Set DM9000 multicast address
838  */
839 static void
840 dm9000_hash_table_unlocked(struct net_device *dev)
841 {
842 	struct board_info *db = netdev_priv(dev);
843 	struct netdev_hw_addr *ha;
844 	int i, oft;
845 	u32 hash_val;
846 	u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
847 	u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
848 
849 	dm9000_dbg(db, 1, "entering %s\n", __func__);
850 
851 	for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
852 		iow(db, oft, dev->dev_addr[i]);
853 
854 	if (dev->flags & IFF_PROMISC)
855 		rcr |= RCR_PRMSC;
856 
857 	if (dev->flags & IFF_ALLMULTI)
858 		rcr |= RCR_ALL;
859 
860 	/* the multicast address in Hash Table : 64 bits */
861 	netdev_for_each_mc_addr(ha, dev) {
862 		hash_val = ether_crc_le(6, ha->addr) & 0x3f;
863 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
864 	}
865 
866 	/* Write the hash table to MAC MD table */
867 	for (i = 0, oft = DM9000_MAR; i < 4; i++) {
868 		iow(db, oft++, hash_table[i]);
869 		iow(db, oft++, hash_table[i] >> 8);
870 	}
871 
872 	iow(db, DM9000_RCR, rcr);
873 }
874 
875 static void
876 dm9000_hash_table(struct net_device *dev)
877 {
878 	struct board_info *db = netdev_priv(dev);
879 	unsigned long flags;
880 
881 	spin_lock_irqsave(&db->lock, flags);
882 	dm9000_hash_table_unlocked(dev);
883 	spin_unlock_irqrestore(&db->lock, flags);
884 }
885 
886 static void
887 dm9000_mask_interrupts(struct board_info *db)
888 {
889 	iow(db, DM9000_IMR, IMR_PAR);
890 }
891 
892 static void
893 dm9000_unmask_interrupts(struct board_info *db)
894 {
895 	iow(db, DM9000_IMR, db->imr_all);
896 }
897 
898 /*
899  * Initialize dm9000 board
900  */
901 static void
902 dm9000_init_dm9000(struct net_device *dev)
903 {
904 	struct board_info *db = netdev_priv(dev);
905 	unsigned int imr;
906 	unsigned int ncr;
907 
908 	dm9000_dbg(db, 1, "entering %s\n", __func__);
909 
910 	dm9000_reset(db);
911 	dm9000_mask_interrupts(db);
912 
913 	/* I/O mode */
914 	db->io_mode = ior(db, DM9000_ISR) >> 6;	/* ISR bit7:6 keeps I/O mode */
915 
916 	/* Checksum mode */
917 	if (dev->hw_features & NETIF_F_RXCSUM)
918 		iow(db, DM9000_RCSR,
919 			(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
920 
921 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
922 	iow(db, DM9000_GPR, 0);
923 
924 	/* If we are dealing with DM9000B, some extra steps are required: a
925 	 * manual phy reset, and setting init params.
926 	 */
927 	if (db->type == TYPE_DM9000B) {
928 		dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
929 		dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
930 	}
931 
932 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
933 
934 	/* if wol is needed, then always set NCR_WAKEEN otherwise we end
935 	 * up dumping the wake events if we disable this. There is already
936 	 * a wake-mask in DM9000_WCR */
937 	if (db->wake_supported)
938 		ncr |= NCR_WAKEEN;
939 
940 	iow(db, DM9000_NCR, ncr);
941 
942 	/* Program operating register */
943 	iow(db, DM9000_TCR, 0);	        /* TX Polling clear */
944 	iow(db, DM9000_BPTR, 0x3f);	/* Less 3Kb, 200us */
945 	iow(db, DM9000_FCR, 0xff);	/* Flow Control */
946 	iow(db, DM9000_SMCR, 0);        /* Special Mode */
947 	/* clear TX status */
948 	iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
949 	iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
950 
951 	/* Set address filter table */
952 	dm9000_hash_table_unlocked(dev);
953 
954 	imr = IMR_PAR | IMR_PTM | IMR_PRM;
955 	if (db->type != TYPE_DM9000E)
956 		imr |= IMR_LNKCHNG;
957 
958 	db->imr_all = imr;
959 
960 	/* Init Driver variable */
961 	db->tx_pkt_cnt = 0;
962 	db->queue_pkt_len = 0;
963 	netif_trans_update(dev);
964 }
965 
966 /* Our watchdog timed out. Called by the networking layer */
967 static void dm9000_timeout(struct net_device *dev)
968 {
969 	struct board_info *db = netdev_priv(dev);
970 	u8 reg_save;
971 	unsigned long flags;
972 
973 	/* Save previous register address */
974 	spin_lock_irqsave(&db->lock, flags);
975 	db->in_timeout = 1;
976 	reg_save = readb(db->io_addr);
977 
978 	netif_stop_queue(dev);
979 	dm9000_init_dm9000(dev);
980 	dm9000_unmask_interrupts(db);
981 	/* We can accept TX packets again */
982 	netif_trans_update(dev); /* prevent tx timeout */
983 	netif_wake_queue(dev);
984 
985 	/* Restore previous register address */
986 	writeb(reg_save, db->io_addr);
987 	db->in_timeout = 0;
988 	spin_unlock_irqrestore(&db->lock, flags);
989 }
990 
991 static void dm9000_send_packet(struct net_device *dev,
992 			       int ip_summed,
993 			       u16 pkt_len)
994 {
995 	struct board_info *dm = to_dm9000_board(dev);
996 
997 	/* The DM9000 is not smart enough to leave fragmented packets alone. */
998 	if (dm->ip_summed != ip_summed) {
999 		if (ip_summed == CHECKSUM_NONE)
1000 			iow(dm, DM9000_TCCR, 0);
1001 		else
1002 			iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1003 		dm->ip_summed = ip_summed;
1004 	}
1005 
1006 	/* Set TX length to DM9000 */
1007 	iow(dm, DM9000_TXPLL, pkt_len);
1008 	iow(dm, DM9000_TXPLH, pkt_len >> 8);
1009 
1010 	/* Issue TX polling command */
1011 	iow(dm, DM9000_TCR, TCR_TXREQ);	/* Cleared after TX complete */
1012 }
1013 
1014 /*
1015  *  Hardware start transmission.
1016  *  Send a packet to media from the upper layer.
1017  */
1018 static int
1019 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1020 {
1021 	unsigned long flags;
1022 	struct board_info *db = netdev_priv(dev);
1023 
1024 	dm9000_dbg(db, 3, "%s:\n", __func__);
1025 
1026 	if (db->tx_pkt_cnt > 1)
1027 		return NETDEV_TX_BUSY;
1028 
1029 	spin_lock_irqsave(&db->lock, flags);
1030 
1031 	/* Move data to DM9000 TX RAM */
1032 	writeb(DM9000_MWCMD, db->io_addr);
1033 
1034 	(db->outblk)(db->io_data, skb->data, skb->len);
1035 	dev->stats.tx_bytes += skb->len;
1036 
1037 	db->tx_pkt_cnt++;
1038 	/* TX control: First packet immediately send, second packet queue */
1039 	if (db->tx_pkt_cnt == 1) {
1040 		dm9000_send_packet(dev, skb->ip_summed, skb->len);
1041 	} else {
1042 		/* Second packet */
1043 		db->queue_pkt_len = skb->len;
1044 		db->queue_ip_summed = skb->ip_summed;
1045 		netif_stop_queue(dev);
1046 	}
1047 
1048 	spin_unlock_irqrestore(&db->lock, flags);
1049 
1050 	/* free this SKB */
1051 	dev_consume_skb_any(skb);
1052 
1053 	return NETDEV_TX_OK;
1054 }
1055 
1056 /*
1057  * DM9000 interrupt handler
1058  * receive the packet to upper layer, free the transmitted packet
1059  */
1060 
1061 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1062 {
1063 	int tx_status = ior(db, DM9000_NSR);	/* Got TX status */
1064 
1065 	if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1066 		/* One packet sent complete */
1067 		db->tx_pkt_cnt--;
1068 		dev->stats.tx_packets++;
1069 
1070 		if (netif_msg_tx_done(db))
1071 			dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1072 
1073 		/* Queue packet check & send */
1074 		if (db->tx_pkt_cnt > 0)
1075 			dm9000_send_packet(dev, db->queue_ip_summed,
1076 					   db->queue_pkt_len);
1077 		netif_wake_queue(dev);
1078 	}
1079 }
1080 
1081 struct dm9000_rxhdr {
1082 	u8	RxPktReady;
1083 	u8	RxStatus;
1084 	__le16	RxLen;
1085 } __packed;
1086 
1087 /*
1088  *  Received a packet and pass to upper layer
1089  */
1090 static void
1091 dm9000_rx(struct net_device *dev)
1092 {
1093 	struct board_info *db = netdev_priv(dev);
1094 	struct dm9000_rxhdr rxhdr;
1095 	struct sk_buff *skb;
1096 	u8 rxbyte, *rdptr;
1097 	bool GoodPacket;
1098 	int RxLen;
1099 
1100 	/* Check packet ready or not */
1101 	do {
1102 		ior(db, DM9000_MRCMDX);	/* Dummy read */
1103 
1104 		/* Get most updated data */
1105 		rxbyte = readb(db->io_data);
1106 
1107 		/* Status check: this byte must be 0 or 1 */
1108 		if (rxbyte & DM9000_PKT_ERR) {
1109 			dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1110 			iow(db, DM9000_RCR, 0x00);	/* Stop Device */
1111 			return;
1112 		}
1113 
1114 		if (!(rxbyte & DM9000_PKT_RDY))
1115 			return;
1116 
1117 		/* A packet ready now  & Get status/length */
1118 		GoodPacket = true;
1119 		writeb(DM9000_MRCMD, db->io_addr);
1120 
1121 		(db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1122 
1123 		RxLen = le16_to_cpu(rxhdr.RxLen);
1124 
1125 		if (netif_msg_rx_status(db))
1126 			dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1127 				rxhdr.RxStatus, RxLen);
1128 
1129 		/* Packet Status check */
1130 		if (RxLen < 0x40) {
1131 			GoodPacket = false;
1132 			if (netif_msg_rx_err(db))
1133 				dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1134 		}
1135 
1136 		if (RxLen > DM9000_PKT_MAX) {
1137 			dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1138 		}
1139 
1140 		/* rxhdr.RxStatus is identical to RSR register. */
1141 		if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1142 				      RSR_PLE | RSR_RWTO |
1143 				      RSR_LCS | RSR_RF)) {
1144 			GoodPacket = false;
1145 			if (rxhdr.RxStatus & RSR_FOE) {
1146 				if (netif_msg_rx_err(db))
1147 					dev_dbg(db->dev, "fifo error\n");
1148 				dev->stats.rx_fifo_errors++;
1149 			}
1150 			if (rxhdr.RxStatus & RSR_CE) {
1151 				if (netif_msg_rx_err(db))
1152 					dev_dbg(db->dev, "crc error\n");
1153 				dev->stats.rx_crc_errors++;
1154 			}
1155 			if (rxhdr.RxStatus & RSR_RF) {
1156 				if (netif_msg_rx_err(db))
1157 					dev_dbg(db->dev, "length error\n");
1158 				dev->stats.rx_length_errors++;
1159 			}
1160 		}
1161 
1162 		/* Move data from DM9000 */
1163 		if (GoodPacket &&
1164 		    ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1165 			skb_reserve(skb, 2);
1166 			rdptr = skb_put(skb, RxLen - 4);
1167 
1168 			/* Read received packet from RX SRAM */
1169 
1170 			(db->inblk)(db->io_data, rdptr, RxLen);
1171 			dev->stats.rx_bytes += RxLen;
1172 
1173 			/* Pass to upper layer */
1174 			skb->protocol = eth_type_trans(skb, dev);
1175 			if (dev->features & NETIF_F_RXCSUM) {
1176 				if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1177 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1178 				else
1179 					skb_checksum_none_assert(skb);
1180 			}
1181 			netif_rx(skb);
1182 			dev->stats.rx_packets++;
1183 
1184 		} else {
1185 			/* need to dump the packet's data */
1186 
1187 			(db->dumpblk)(db->io_data, RxLen);
1188 		}
1189 	} while (rxbyte & DM9000_PKT_RDY);
1190 }
1191 
1192 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1193 {
1194 	struct net_device *dev = dev_id;
1195 	struct board_info *db = netdev_priv(dev);
1196 	int int_status;
1197 	unsigned long flags;
1198 	u8 reg_save;
1199 
1200 	dm9000_dbg(db, 3, "entering %s\n", __func__);
1201 
1202 	/* A real interrupt coming */
1203 
1204 	/* holders of db->lock must always block IRQs */
1205 	spin_lock_irqsave(&db->lock, flags);
1206 
1207 	/* Save previous register address */
1208 	reg_save = readb(db->io_addr);
1209 
1210 	dm9000_mask_interrupts(db);
1211 	/* Got DM9000 interrupt status */
1212 	int_status = ior(db, DM9000_ISR);	/* Got ISR */
1213 	iow(db, DM9000_ISR, int_status);	/* Clear ISR status */
1214 
1215 	if (netif_msg_intr(db))
1216 		dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1217 
1218 	/* Received the coming packet */
1219 	if (int_status & ISR_PRS)
1220 		dm9000_rx(dev);
1221 
1222 	/* Transmit Interrupt check */
1223 	if (int_status & ISR_PTS)
1224 		dm9000_tx_done(dev, db);
1225 
1226 	if (db->type != TYPE_DM9000E) {
1227 		if (int_status & ISR_LNKCHNG) {
1228 			/* fire a link-change request */
1229 			schedule_delayed_work(&db->phy_poll, 1);
1230 		}
1231 	}
1232 
1233 	dm9000_unmask_interrupts(db);
1234 	/* Restore previous register address */
1235 	writeb(reg_save, db->io_addr);
1236 
1237 	spin_unlock_irqrestore(&db->lock, flags);
1238 
1239 	return IRQ_HANDLED;
1240 }
1241 
1242 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1243 {
1244 	struct net_device *dev = dev_id;
1245 	struct board_info *db = netdev_priv(dev);
1246 	unsigned long flags;
1247 	unsigned nsr, wcr;
1248 
1249 	spin_lock_irqsave(&db->lock, flags);
1250 
1251 	nsr = ior(db, DM9000_NSR);
1252 	wcr = ior(db, DM9000_WCR);
1253 
1254 	dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1255 
1256 	if (nsr & NSR_WAKEST) {
1257 		/* clear, so we can avoid */
1258 		iow(db, DM9000_NSR, NSR_WAKEST);
1259 
1260 		if (wcr & WCR_LINKST)
1261 			dev_info(db->dev, "wake by link status change\n");
1262 		if (wcr & WCR_SAMPLEST)
1263 			dev_info(db->dev, "wake by sample packet\n");
1264 		if (wcr & WCR_MAGICST)
1265 			dev_info(db->dev, "wake by magic packet\n");
1266 		if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1267 			dev_err(db->dev, "wake signalled with no reason? "
1268 				"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1269 	}
1270 
1271 	spin_unlock_irqrestore(&db->lock, flags);
1272 
1273 	return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1274 }
1275 
1276 #ifdef CONFIG_NET_POLL_CONTROLLER
1277 /*
1278  *Used by netconsole
1279  */
1280 static void dm9000_poll_controller(struct net_device *dev)
1281 {
1282 	disable_irq(dev->irq);
1283 	dm9000_interrupt(dev->irq, dev);
1284 	enable_irq(dev->irq);
1285 }
1286 #endif
1287 
1288 /*
1289  *  Open the interface.
1290  *  The interface is opened whenever "ifconfig" actives it.
1291  */
1292 static int
1293 dm9000_open(struct net_device *dev)
1294 {
1295 	struct board_info *db = netdev_priv(dev);
1296 	unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1297 
1298 	if (netif_msg_ifup(db))
1299 		dev_dbg(db->dev, "enabling %s\n", dev->name);
1300 
1301 	/* If there is no IRQ type specified, tell the user that this is a
1302 	 * problem
1303 	 */
1304 	if (irq_flags == IRQF_TRIGGER_NONE)
1305 		dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1306 
1307 	irq_flags |= IRQF_SHARED;
1308 
1309 	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1310 	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
1311 	mdelay(1); /* delay needs by DM9000B */
1312 
1313 	/* Initialize DM9000 board */
1314 	dm9000_init_dm9000(dev);
1315 
1316 	if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1317 		return -EAGAIN;
1318 	/* Now that we have an interrupt handler hooked up we can unmask
1319 	 * our interrupts
1320 	 */
1321 	dm9000_unmask_interrupts(db);
1322 
1323 	/* Init driver variable */
1324 	db->dbug_cnt = 0;
1325 
1326 	mii_check_media(&db->mii, netif_msg_link(db), 1);
1327 	netif_start_queue(dev);
1328 
1329 	/* Poll initial link status */
1330 	schedule_delayed_work(&db->phy_poll, 1);
1331 
1332 	return 0;
1333 }
1334 
1335 static void
1336 dm9000_shutdown(struct net_device *dev)
1337 {
1338 	struct board_info *db = netdev_priv(dev);
1339 
1340 	/* RESET device */
1341 	dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);	/* PHY RESET */
1342 	iow(db, DM9000_GPR, 0x01);	/* Power-Down PHY */
1343 	dm9000_mask_interrupts(db);
1344 	iow(db, DM9000_RCR, 0x00);	/* Disable RX */
1345 }
1346 
1347 /*
1348  * Stop the interface.
1349  * The interface is stopped when it is brought.
1350  */
1351 static int
1352 dm9000_stop(struct net_device *ndev)
1353 {
1354 	struct board_info *db = netdev_priv(ndev);
1355 
1356 	if (netif_msg_ifdown(db))
1357 		dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1358 
1359 	cancel_delayed_work_sync(&db->phy_poll);
1360 
1361 	netif_stop_queue(ndev);
1362 	netif_carrier_off(ndev);
1363 
1364 	/* free interrupt */
1365 	free_irq(ndev->irq, ndev);
1366 
1367 	dm9000_shutdown(ndev);
1368 
1369 	return 0;
1370 }
1371 
1372 static const struct net_device_ops dm9000_netdev_ops = {
1373 	.ndo_open		= dm9000_open,
1374 	.ndo_stop		= dm9000_stop,
1375 	.ndo_start_xmit		= dm9000_start_xmit,
1376 	.ndo_tx_timeout		= dm9000_timeout,
1377 	.ndo_set_rx_mode	= dm9000_hash_table,
1378 	.ndo_do_ioctl		= dm9000_ioctl,
1379 	.ndo_set_features	= dm9000_set_features,
1380 	.ndo_validate_addr	= eth_validate_addr,
1381 	.ndo_set_mac_address	= eth_mac_addr,
1382 #ifdef CONFIG_NET_POLL_CONTROLLER
1383 	.ndo_poll_controller	= dm9000_poll_controller,
1384 #endif
1385 };
1386 
1387 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1388 {
1389 	struct dm9000_plat_data *pdata;
1390 	struct device_node *np = dev->of_node;
1391 	const void *mac_addr;
1392 
1393 	if (!IS_ENABLED(CONFIG_OF) || !np)
1394 		return ERR_PTR(-ENXIO);
1395 
1396 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1397 	if (!pdata)
1398 		return ERR_PTR(-ENOMEM);
1399 
1400 	if (of_find_property(np, "davicom,ext-phy", NULL))
1401 		pdata->flags |= DM9000_PLATF_EXT_PHY;
1402 	if (of_find_property(np, "davicom,no-eeprom", NULL))
1403 		pdata->flags |= DM9000_PLATF_NO_EEPROM;
1404 
1405 	mac_addr = of_get_mac_address(np);
1406 	if (!IS_ERR(mac_addr))
1407 		ether_addr_copy(pdata->dev_addr, mac_addr);
1408 
1409 	return pdata;
1410 }
1411 
1412 /*
1413  * Search DM9000 board, allocate space and register it
1414  */
1415 static int
1416 dm9000_probe(struct platform_device *pdev)
1417 {
1418 	struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1419 	struct board_info *db;	/* Point a board information structure */
1420 	struct net_device *ndev;
1421 	struct device *dev = &pdev->dev;
1422 	const unsigned char *mac_src;
1423 	int ret = 0;
1424 	int iosize;
1425 	int i;
1426 	u32 id_val;
1427 	int reset_gpios;
1428 	enum of_gpio_flags flags;
1429 	struct regulator *power;
1430 	bool inv_mac_addr = false;
1431 
1432 	power = devm_regulator_get(dev, "vcc");
1433 	if (IS_ERR(power)) {
1434 		if (PTR_ERR(power) == -EPROBE_DEFER)
1435 			return -EPROBE_DEFER;
1436 		dev_dbg(dev, "no regulator provided\n");
1437 	} else {
1438 		ret = regulator_enable(power);
1439 		if (ret != 0) {
1440 			dev_err(dev,
1441 				"Failed to enable power regulator: %d\n", ret);
1442 			return ret;
1443 		}
1444 		dev_dbg(dev, "regulator enabled\n");
1445 	}
1446 
1447 	reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1448 					      &flags);
1449 	if (gpio_is_valid(reset_gpios)) {
1450 		ret = devm_gpio_request_one(dev, reset_gpios, flags,
1451 					    "dm9000_reset");
1452 		if (ret) {
1453 			dev_err(dev, "failed to request reset gpio %d: %d\n",
1454 				reset_gpios, ret);
1455 			return -ENODEV;
1456 		}
1457 
1458 		/* According to manual PWRST# Low Period Min 1ms */
1459 		msleep(2);
1460 		gpio_set_value(reset_gpios, 1);
1461 		/* Needs 3ms to read eeprom when PWRST is deasserted */
1462 		msleep(4);
1463 	}
1464 
1465 	if (!pdata) {
1466 		pdata = dm9000_parse_dt(&pdev->dev);
1467 		if (IS_ERR(pdata))
1468 			return PTR_ERR(pdata);
1469 	}
1470 
1471 	/* Init network device */
1472 	ndev = alloc_etherdev(sizeof(struct board_info));
1473 	if (!ndev)
1474 		return -ENOMEM;
1475 
1476 	SET_NETDEV_DEV(ndev, &pdev->dev);
1477 
1478 	dev_dbg(&pdev->dev, "dm9000_probe()\n");
1479 
1480 	/* setup board info structure */
1481 	db = netdev_priv(ndev);
1482 
1483 	db->dev = &pdev->dev;
1484 	db->ndev = ndev;
1485 
1486 	spin_lock_init(&db->lock);
1487 	mutex_init(&db->addr_lock);
1488 
1489 	INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1490 
1491 	db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492 	db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1493 
1494 	if (!db->addr_res || !db->data_res) {
1495 		dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1496 			db->addr_res, db->data_res);
1497 		ret = -ENOENT;
1498 		goto out;
1499 	}
1500 
1501 	ndev->irq = platform_get_irq(pdev, 0);
1502 	if (ndev->irq < 0) {
1503 		dev_err(db->dev, "interrupt resource unavailable: %d\n",
1504 			ndev->irq);
1505 		ret = ndev->irq;
1506 		goto out;
1507 	}
1508 
1509 	db->irq_wake = platform_get_irq(pdev, 1);
1510 	if (db->irq_wake >= 0) {
1511 		dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1512 
1513 		ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1514 				  IRQF_SHARED, dev_name(db->dev), ndev);
1515 		if (ret) {
1516 			dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1517 		} else {
1518 
1519 			/* test to see if irq is really wakeup capable */
1520 			ret = irq_set_irq_wake(db->irq_wake, 1);
1521 			if (ret) {
1522 				dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1523 					db->irq_wake, ret);
1524 				ret = 0;
1525 			} else {
1526 				irq_set_irq_wake(db->irq_wake, 0);
1527 				db->wake_supported = 1;
1528 			}
1529 		}
1530 	}
1531 
1532 	iosize = resource_size(db->addr_res);
1533 	db->addr_req = request_mem_region(db->addr_res->start, iosize,
1534 					  pdev->name);
1535 
1536 	if (db->addr_req == NULL) {
1537 		dev_err(db->dev, "cannot claim address reg area\n");
1538 		ret = -EIO;
1539 		goto out;
1540 	}
1541 
1542 	db->io_addr = ioremap(db->addr_res->start, iosize);
1543 
1544 	if (db->io_addr == NULL) {
1545 		dev_err(db->dev, "failed to ioremap address reg\n");
1546 		ret = -EINVAL;
1547 		goto out;
1548 	}
1549 
1550 	iosize = resource_size(db->data_res);
1551 	db->data_req = request_mem_region(db->data_res->start, iosize,
1552 					  pdev->name);
1553 
1554 	if (db->data_req == NULL) {
1555 		dev_err(db->dev, "cannot claim data reg area\n");
1556 		ret = -EIO;
1557 		goto out;
1558 	}
1559 
1560 	db->io_data = ioremap(db->data_res->start, iosize);
1561 
1562 	if (db->io_data == NULL) {
1563 		dev_err(db->dev, "failed to ioremap data reg\n");
1564 		ret = -EINVAL;
1565 		goto out;
1566 	}
1567 
1568 	/* fill in parameters for net-dev structure */
1569 	ndev->base_addr = (unsigned long)db->io_addr;
1570 
1571 	/* ensure at least we have a default set of IO routines */
1572 	dm9000_set_io(db, iosize);
1573 
1574 	/* check to see if anything is being over-ridden */
1575 	if (pdata != NULL) {
1576 		/* check to see if the driver wants to over-ride the
1577 		 * default IO width */
1578 
1579 		if (pdata->flags & DM9000_PLATF_8BITONLY)
1580 			dm9000_set_io(db, 1);
1581 
1582 		if (pdata->flags & DM9000_PLATF_16BITONLY)
1583 			dm9000_set_io(db, 2);
1584 
1585 		if (pdata->flags & DM9000_PLATF_32BITONLY)
1586 			dm9000_set_io(db, 4);
1587 
1588 		/* check to see if there are any IO routine
1589 		 * over-rides */
1590 
1591 		if (pdata->inblk != NULL)
1592 			db->inblk = pdata->inblk;
1593 
1594 		if (pdata->outblk != NULL)
1595 			db->outblk = pdata->outblk;
1596 
1597 		if (pdata->dumpblk != NULL)
1598 			db->dumpblk = pdata->dumpblk;
1599 
1600 		db->flags = pdata->flags;
1601 	}
1602 
1603 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1604 	db->flags |= DM9000_PLATF_SIMPLE_PHY;
1605 #endif
1606 
1607 	dm9000_reset(db);
1608 
1609 	/* try multiple times, DM9000 sometimes gets the read wrong */
1610 	for (i = 0; i < 8; i++) {
1611 		id_val  = ior(db, DM9000_VIDL);
1612 		id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1613 		id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1614 		id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1615 
1616 		if (id_val == DM9000_ID)
1617 			break;
1618 		dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1619 	}
1620 
1621 	if (id_val != DM9000_ID) {
1622 		dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1623 		ret = -ENODEV;
1624 		goto out;
1625 	}
1626 
1627 	/* Identify what type of DM9000 we are working on */
1628 
1629 	id_val = ior(db, DM9000_CHIPR);
1630 	dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1631 
1632 	switch (id_val) {
1633 	case CHIPR_DM9000A:
1634 		db->type = TYPE_DM9000A;
1635 		break;
1636 	case CHIPR_DM9000B:
1637 		db->type = TYPE_DM9000B;
1638 		break;
1639 	default:
1640 		dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1641 		db->type = TYPE_DM9000E;
1642 	}
1643 
1644 	/* dm9000a/b are capable of hardware checksum offload */
1645 	if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1646 		ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1647 		ndev->features |= ndev->hw_features;
1648 	}
1649 
1650 	/* from this point we assume that we have found a DM9000 */
1651 
1652 	ndev->netdev_ops	= &dm9000_netdev_ops;
1653 	ndev->watchdog_timeo	= msecs_to_jiffies(watchdog);
1654 	ndev->ethtool_ops	= &dm9000_ethtool_ops;
1655 
1656 	db->msg_enable       = NETIF_MSG_LINK;
1657 	db->mii.phy_id_mask  = 0x1f;
1658 	db->mii.reg_num_mask = 0x1f;
1659 	db->mii.force_media  = 0;
1660 	db->mii.full_duplex  = 0;
1661 	db->mii.dev	     = ndev;
1662 	db->mii.mdio_read    = dm9000_phy_read;
1663 	db->mii.mdio_write   = dm9000_phy_write;
1664 
1665 	mac_src = "eeprom";
1666 
1667 	/* try reading the node address from the attached EEPROM */
1668 	for (i = 0; i < 6; i += 2)
1669 		dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1670 
1671 	if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1672 		mac_src = "platform data";
1673 		memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1674 	}
1675 
1676 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1677 		/* try reading from mac */
1678 
1679 		mac_src = "chip";
1680 		for (i = 0; i < 6; i++)
1681 			ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1682 	}
1683 
1684 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1685 		inv_mac_addr = true;
1686 		eth_hw_addr_random(ndev);
1687 		mac_src = "random";
1688 	}
1689 
1690 
1691 	platform_set_drvdata(pdev, ndev);
1692 	ret = register_netdev(ndev);
1693 
1694 	if (ret == 0) {
1695 		if (inv_mac_addr)
1696 			dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1697 				 ndev->name);
1698 		printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1699 		       ndev->name, dm9000_type_to_char(db->type),
1700 		       db->io_addr, db->io_data, ndev->irq,
1701 		       ndev->dev_addr, mac_src);
1702 	}
1703 	return 0;
1704 
1705 out:
1706 	dev_err(db->dev, "not found (%d).\n", ret);
1707 
1708 	dm9000_release_board(pdev, db);
1709 	free_netdev(ndev);
1710 
1711 	return ret;
1712 }
1713 
1714 static int
1715 dm9000_drv_suspend(struct device *dev)
1716 {
1717 	struct net_device *ndev = dev_get_drvdata(dev);
1718 	struct board_info *db;
1719 
1720 	if (ndev) {
1721 		db = netdev_priv(ndev);
1722 		db->in_suspend = 1;
1723 
1724 		if (!netif_running(ndev))
1725 			return 0;
1726 
1727 		netif_device_detach(ndev);
1728 
1729 		/* only shutdown if not using WoL */
1730 		if (!db->wake_state)
1731 			dm9000_shutdown(ndev);
1732 	}
1733 	return 0;
1734 }
1735 
1736 static int
1737 dm9000_drv_resume(struct device *dev)
1738 {
1739 	struct net_device *ndev = dev_get_drvdata(dev);
1740 	struct board_info *db = netdev_priv(ndev);
1741 
1742 	if (ndev) {
1743 		if (netif_running(ndev)) {
1744 			/* reset if we were not in wake mode to ensure if
1745 			 * the device was powered off it is in a known state */
1746 			if (!db->wake_state) {
1747 				dm9000_init_dm9000(ndev);
1748 				dm9000_unmask_interrupts(db);
1749 			}
1750 
1751 			netif_device_attach(ndev);
1752 		}
1753 
1754 		db->in_suspend = 0;
1755 	}
1756 	return 0;
1757 }
1758 
1759 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1760 	.suspend	= dm9000_drv_suspend,
1761 	.resume		= dm9000_drv_resume,
1762 };
1763 
1764 static int
1765 dm9000_drv_remove(struct platform_device *pdev)
1766 {
1767 	struct net_device *ndev = platform_get_drvdata(pdev);
1768 
1769 	unregister_netdev(ndev);
1770 	dm9000_release_board(pdev, netdev_priv(ndev));
1771 	free_netdev(ndev);		/* free device structure */
1772 
1773 	dev_dbg(&pdev->dev, "released and freed device\n");
1774 	return 0;
1775 }
1776 
1777 #ifdef CONFIG_OF
1778 static const struct of_device_id dm9000_of_matches[] = {
1779 	{ .compatible = "davicom,dm9000", },
1780 	{ /* sentinel */ }
1781 };
1782 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1783 #endif
1784 
1785 static struct platform_driver dm9000_driver = {
1786 	.driver	= {
1787 		.name    = "dm9000",
1788 		.pm	 = &dm9000_drv_pm_ops,
1789 		.of_match_table = of_match_ptr(dm9000_of_matches),
1790 	},
1791 	.probe   = dm9000_probe,
1792 	.remove  = dm9000_drv_remove,
1793 };
1794 
1795 module_platform_driver(dm9000_driver);
1796 
1797 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1798 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1799 MODULE_LICENSE("GPL");
1800 MODULE_ALIAS("platform:dm9000");
1801