xref: /linux/drivers/net/ethernet/davicom/dm9000.c (revision 516d5f8b04ce2bcd24f03323fc743ae25b81373d)
1 /*
2  *      Davicom DM9000 Fast Ethernet driver for Linux.
3  * 	Copyright (C) 1997  Sten Wang
4  *
5  * 	This program is free software; you can redistribute it and/or
6  * 	modify it under the terms of the GNU General Public License
7  * 	as published by the Free Software Foundation; either version 2
8  * 	of the License, or (at your option) any later version.
9  *
10  * 	This program is distributed in the hope that it will be useful,
11  * 	but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * 	GNU General Public License for more details.
14  *
15  * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16  *
17  * Additional updates, Copyright:
18  *	Ben Dooks <ben@simtec.co.uk>
19  *	Sascha Hauer <s.hauer@pengutronix.de>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/interrupt.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/of.h>
32 #include <linux/of_net.h>
33 #include <linux/ethtool.h>
34 #include <linux/dm9000.h>
35 #include <linux/delay.h>
36 #include <linux/platform_device.h>
37 #include <linux/irq.h>
38 #include <linux/slab.h>
39 
40 #include <asm/delay.h>
41 #include <asm/irq.h>
42 #include <asm/io.h>
43 
44 #include "dm9000.h"
45 
46 /* Board/System/Debug information/definition ---------------- */
47 
48 #define DM9000_PHY		0x40	/* PHY address 0x01 */
49 
50 #define CARDNAME	"dm9000"
51 #define DRV_VERSION	"1.31"
52 
53 /*
54  * Transmit timeout, default 5 seconds.
55  */
56 static int watchdog = 5000;
57 module_param(watchdog, int, 0400);
58 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
59 
60 /*
61  * Debug messages level
62  */
63 static int debug;
64 module_param(debug, int, 0644);
65 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
66 
67 /* DM9000 register address locking.
68  *
69  * The DM9000 uses an address register to control where data written
70  * to the data register goes. This means that the address register
71  * must be preserved over interrupts or similar calls.
72  *
73  * During interrupt and other critical calls, a spinlock is used to
74  * protect the system, but the calls themselves save the address
75  * in the address register in case they are interrupting another
76  * access to the device.
77  *
78  * For general accesses a lock is provided so that calls which are
79  * allowed to sleep are serialised so that the address register does
80  * not need to be saved. This lock also serves to serialise access
81  * to the EEPROM and PHY access registers which are shared between
82  * these two devices.
83  */
84 
85 /* The driver supports the original DM9000E, and now the two newer
86  * devices, DM9000A and DM9000B.
87  */
88 
89 enum dm9000_type {
90 	TYPE_DM9000E,	/* original DM9000 */
91 	TYPE_DM9000A,
92 	TYPE_DM9000B
93 };
94 
95 /* Structure/enum declaration ------------------------------- */
96 struct board_info {
97 
98 	void __iomem	*io_addr;	/* Register I/O base address */
99 	void __iomem	*io_data;	/* Data I/O address */
100 	u16		 irq;		/* IRQ */
101 
102 	u16		tx_pkt_cnt;
103 	u16		queue_pkt_len;
104 	u16		queue_start_addr;
105 	u16		queue_ip_summed;
106 	u16		dbug_cnt;
107 	u8		io_mode;		/* 0:word, 2:byte */
108 	u8		phy_addr;
109 	u8		imr_all;
110 
111 	unsigned int	flags;
112 	unsigned int	in_timeout:1;
113 	unsigned int	in_suspend:1;
114 	unsigned int	wake_supported:1;
115 
116 	enum dm9000_type type;
117 
118 	void (*inblk)(void __iomem *port, void *data, int length);
119 	void (*outblk)(void __iomem *port, void *data, int length);
120 	void (*dumpblk)(void __iomem *port, int length);
121 
122 	struct device	*dev;	     /* parent device */
123 
124 	struct resource	*addr_res;   /* resources found */
125 	struct resource *data_res;
126 	struct resource	*addr_req;   /* resources requested */
127 	struct resource *data_req;
128 	struct resource *irq_res;
129 
130 	int		 irq_wake;
131 
132 	struct mutex	 addr_lock;	/* phy and eeprom access lock */
133 
134 	struct delayed_work phy_poll;
135 	struct net_device  *ndev;
136 
137 	spinlock_t	lock;
138 
139 	struct mii_if_info mii;
140 	u32		msg_enable;
141 	u32		wake_state;
142 
143 	int		ip_summed;
144 };
145 
146 /* debug code */
147 
148 #define dm9000_dbg(db, lev, msg...) do {		\
149 	if ((lev) < debug) {				\
150 		dev_dbg(db->dev, msg);			\
151 	}						\
152 } while (0)
153 
154 static inline struct board_info *to_dm9000_board(struct net_device *dev)
155 {
156 	return netdev_priv(dev);
157 }
158 
159 /* DM9000 network board routine ---------------------------- */
160 
161 /*
162  *   Read a byte from I/O port
163  */
164 static u8
165 ior(struct board_info *db, int reg)
166 {
167 	writeb(reg, db->io_addr);
168 	return readb(db->io_data);
169 }
170 
171 /*
172  *   Write a byte to I/O port
173  */
174 
175 static void
176 iow(struct board_info *db, int reg, int value)
177 {
178 	writeb(reg, db->io_addr);
179 	writeb(value, db->io_data);
180 }
181 
182 static void
183 dm9000_reset(struct board_info *db)
184 {
185 	dev_dbg(db->dev, "resetting device\n");
186 
187 	/* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
188 	 * The essential point is that we have to do a double reset, and the
189 	 * instruction is to set LBK into MAC internal loopback mode.
190 	 */
191 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
192 	udelay(100); /* Application note says at least 20 us */
193 	if (ior(db, DM9000_NCR) & 1)
194 		dev_err(db->dev, "dm9000 did not respond to first reset\n");
195 
196 	iow(db, DM9000_NCR, 0);
197 	iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
198 	udelay(100);
199 	if (ior(db, DM9000_NCR) & 1)
200 		dev_err(db->dev, "dm9000 did not respond to second reset\n");
201 }
202 
203 /* routines for sending block to chip */
204 
205 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
206 {
207 	iowrite8_rep(reg, data, count);
208 }
209 
210 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
211 {
212 	iowrite16_rep(reg, data, (count+1) >> 1);
213 }
214 
215 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
216 {
217 	iowrite32_rep(reg, data, (count+3) >> 2);
218 }
219 
220 /* input block from chip to memory */
221 
222 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
223 {
224 	ioread8_rep(reg, data, count);
225 }
226 
227 
228 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
229 {
230 	ioread16_rep(reg, data, (count+1) >> 1);
231 }
232 
233 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
234 {
235 	ioread32_rep(reg, data, (count+3) >> 2);
236 }
237 
238 /* dump block from chip to null */
239 
240 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
241 {
242 	int i;
243 	int tmp;
244 
245 	for (i = 0; i < count; i++)
246 		tmp = readb(reg);
247 }
248 
249 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
250 {
251 	int i;
252 	int tmp;
253 
254 	count = (count + 1) >> 1;
255 
256 	for (i = 0; i < count; i++)
257 		tmp = readw(reg);
258 }
259 
260 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
261 {
262 	int i;
263 	int tmp;
264 
265 	count = (count + 3) >> 2;
266 
267 	for (i = 0; i < count; i++)
268 		tmp = readl(reg);
269 }
270 
271 /*
272  * Sleep, either by using msleep() or if we are suspending, then
273  * use mdelay() to sleep.
274  */
275 static void dm9000_msleep(struct board_info *db, unsigned int ms)
276 {
277 	if (db->in_suspend || db->in_timeout)
278 		mdelay(ms);
279 	else
280 		msleep(ms);
281 }
282 
283 /* Read a word from phyxcer */
284 static int
285 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
286 {
287 	struct board_info *db = netdev_priv(dev);
288 	unsigned long flags;
289 	unsigned int reg_save;
290 	int ret;
291 
292 	mutex_lock(&db->addr_lock);
293 
294 	spin_lock_irqsave(&db->lock, flags);
295 
296 	/* Save previous register address */
297 	reg_save = readb(db->io_addr);
298 
299 	/* Fill the phyxcer register into REG_0C */
300 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
301 
302 	/* Issue phyxcer read command */
303 	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
304 
305 	writeb(reg_save, db->io_addr);
306 	spin_unlock_irqrestore(&db->lock, flags);
307 
308 	dm9000_msleep(db, 1);		/* Wait read complete */
309 
310 	spin_lock_irqsave(&db->lock, flags);
311 	reg_save = readb(db->io_addr);
312 
313 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */
314 
315 	/* The read data keeps on REG_0D & REG_0E */
316 	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
317 
318 	/* restore the previous address */
319 	writeb(reg_save, db->io_addr);
320 	spin_unlock_irqrestore(&db->lock, flags);
321 
322 	mutex_unlock(&db->addr_lock);
323 
324 	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
325 	return ret;
326 }
327 
328 /* Write a word to phyxcer */
329 static void
330 dm9000_phy_write(struct net_device *dev,
331 		 int phyaddr_unused, int reg, int value)
332 {
333 	struct board_info *db = netdev_priv(dev);
334 	unsigned long flags;
335 	unsigned long reg_save;
336 
337 	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
338 	if (!db->in_timeout)
339 		mutex_lock(&db->addr_lock);
340 
341 	spin_lock_irqsave(&db->lock, flags);
342 
343 	/* Save previous register address */
344 	reg_save = readb(db->io_addr);
345 
346 	/* Fill the phyxcer register into REG_0C */
347 	iow(db, DM9000_EPAR, DM9000_PHY | reg);
348 
349 	/* Fill the written data into REG_0D & REG_0E */
350 	iow(db, DM9000_EPDRL, value);
351 	iow(db, DM9000_EPDRH, value >> 8);
352 
353 	/* Issue phyxcer write command */
354 	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
355 
356 	writeb(reg_save, db->io_addr);
357 	spin_unlock_irqrestore(&db->lock, flags);
358 
359 	dm9000_msleep(db, 1);		/* Wait write complete */
360 
361 	spin_lock_irqsave(&db->lock, flags);
362 	reg_save = readb(db->io_addr);
363 
364 	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */
365 
366 	/* restore the previous address */
367 	writeb(reg_save, db->io_addr);
368 
369 	spin_unlock_irqrestore(&db->lock, flags);
370 	if (!db->in_timeout)
371 		mutex_unlock(&db->addr_lock);
372 }
373 
374 /* dm9000_set_io
375  *
376  * select the specified set of io routines to use with the
377  * device
378  */
379 
380 static void dm9000_set_io(struct board_info *db, int byte_width)
381 {
382 	/* use the size of the data resource to work out what IO
383 	 * routines we want to use
384 	 */
385 
386 	switch (byte_width) {
387 	case 1:
388 		db->dumpblk = dm9000_dumpblk_8bit;
389 		db->outblk  = dm9000_outblk_8bit;
390 		db->inblk   = dm9000_inblk_8bit;
391 		break;
392 
393 
394 	case 3:
395 		dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
396 	case 2:
397 		db->dumpblk = dm9000_dumpblk_16bit;
398 		db->outblk  = dm9000_outblk_16bit;
399 		db->inblk   = dm9000_inblk_16bit;
400 		break;
401 
402 	case 4:
403 	default:
404 		db->dumpblk = dm9000_dumpblk_32bit;
405 		db->outblk  = dm9000_outblk_32bit;
406 		db->inblk   = dm9000_inblk_32bit;
407 		break;
408 	}
409 }
410 
411 static void dm9000_schedule_poll(struct board_info *db)
412 {
413 	if (db->type == TYPE_DM9000E)
414 		schedule_delayed_work(&db->phy_poll, HZ * 2);
415 }
416 
417 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
418 {
419 	struct board_info *dm = to_dm9000_board(dev);
420 
421 	if (!netif_running(dev))
422 		return -EINVAL;
423 
424 	return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
425 }
426 
427 static unsigned int
428 dm9000_read_locked(struct board_info *db, int reg)
429 {
430 	unsigned long flags;
431 	unsigned int ret;
432 
433 	spin_lock_irqsave(&db->lock, flags);
434 	ret = ior(db, reg);
435 	spin_unlock_irqrestore(&db->lock, flags);
436 
437 	return ret;
438 }
439 
440 static int dm9000_wait_eeprom(struct board_info *db)
441 {
442 	unsigned int status;
443 	int timeout = 8;	/* wait max 8msec */
444 
445 	/* The DM9000 data sheets say we should be able to
446 	 * poll the ERRE bit in EPCR to wait for the EEPROM
447 	 * operation. From testing several chips, this bit
448 	 * does not seem to work.
449 	 *
450 	 * We attempt to use the bit, but fall back to the
451 	 * timeout (which is why we do not return an error
452 	 * on expiry) to say that the EEPROM operation has
453 	 * completed.
454 	 */
455 
456 	while (1) {
457 		status = dm9000_read_locked(db, DM9000_EPCR);
458 
459 		if ((status & EPCR_ERRE) == 0)
460 			break;
461 
462 		msleep(1);
463 
464 		if (timeout-- < 0) {
465 			dev_dbg(db->dev, "timeout waiting EEPROM\n");
466 			break;
467 		}
468 	}
469 
470 	return 0;
471 }
472 
473 /*
474  *  Read a word data from EEPROM
475  */
476 static void
477 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
478 {
479 	unsigned long flags;
480 
481 	if (db->flags & DM9000_PLATF_NO_EEPROM) {
482 		to[0] = 0xff;
483 		to[1] = 0xff;
484 		return;
485 	}
486 
487 	mutex_lock(&db->addr_lock);
488 
489 	spin_lock_irqsave(&db->lock, flags);
490 
491 	iow(db, DM9000_EPAR, offset);
492 	iow(db, DM9000_EPCR, EPCR_ERPRR);
493 
494 	spin_unlock_irqrestore(&db->lock, flags);
495 
496 	dm9000_wait_eeprom(db);
497 
498 	/* delay for at-least 150uS */
499 	msleep(1);
500 
501 	spin_lock_irqsave(&db->lock, flags);
502 
503 	iow(db, DM9000_EPCR, 0x0);
504 
505 	to[0] = ior(db, DM9000_EPDRL);
506 	to[1] = ior(db, DM9000_EPDRH);
507 
508 	spin_unlock_irqrestore(&db->lock, flags);
509 
510 	mutex_unlock(&db->addr_lock);
511 }
512 
513 /*
514  * Write a word data to SROM
515  */
516 static void
517 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
518 {
519 	unsigned long flags;
520 
521 	if (db->flags & DM9000_PLATF_NO_EEPROM)
522 		return;
523 
524 	mutex_lock(&db->addr_lock);
525 
526 	spin_lock_irqsave(&db->lock, flags);
527 	iow(db, DM9000_EPAR, offset);
528 	iow(db, DM9000_EPDRH, data[1]);
529 	iow(db, DM9000_EPDRL, data[0]);
530 	iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
531 	spin_unlock_irqrestore(&db->lock, flags);
532 
533 	dm9000_wait_eeprom(db);
534 
535 	mdelay(1);	/* wait at least 150uS to clear */
536 
537 	spin_lock_irqsave(&db->lock, flags);
538 	iow(db, DM9000_EPCR, 0);
539 	spin_unlock_irqrestore(&db->lock, flags);
540 
541 	mutex_unlock(&db->addr_lock);
542 }
543 
544 /* ethtool ops */
545 
546 static void dm9000_get_drvinfo(struct net_device *dev,
547 			       struct ethtool_drvinfo *info)
548 {
549 	struct board_info *dm = to_dm9000_board(dev);
550 
551 	strlcpy(info->driver, CARDNAME, sizeof(info->driver));
552 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
553 	strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
554 		sizeof(info->bus_info));
555 }
556 
557 static u32 dm9000_get_msglevel(struct net_device *dev)
558 {
559 	struct board_info *dm = to_dm9000_board(dev);
560 
561 	return dm->msg_enable;
562 }
563 
564 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
565 {
566 	struct board_info *dm = to_dm9000_board(dev);
567 
568 	dm->msg_enable = value;
569 }
570 
571 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
572 {
573 	struct board_info *dm = to_dm9000_board(dev);
574 
575 	mii_ethtool_gset(&dm->mii, cmd);
576 	return 0;
577 }
578 
579 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
580 {
581 	struct board_info *dm = to_dm9000_board(dev);
582 
583 	return mii_ethtool_sset(&dm->mii, cmd);
584 }
585 
586 static int dm9000_nway_reset(struct net_device *dev)
587 {
588 	struct board_info *dm = to_dm9000_board(dev);
589 	return mii_nway_restart(&dm->mii);
590 }
591 
592 static int dm9000_set_features(struct net_device *dev,
593 	netdev_features_t features)
594 {
595 	struct board_info *dm = to_dm9000_board(dev);
596 	netdev_features_t changed = dev->features ^ features;
597 	unsigned long flags;
598 
599 	if (!(changed & NETIF_F_RXCSUM))
600 		return 0;
601 
602 	spin_lock_irqsave(&dm->lock, flags);
603 	iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
604 	spin_unlock_irqrestore(&dm->lock, flags);
605 
606 	return 0;
607 }
608 
609 static u32 dm9000_get_link(struct net_device *dev)
610 {
611 	struct board_info *dm = to_dm9000_board(dev);
612 	u32 ret;
613 
614 	if (dm->flags & DM9000_PLATF_EXT_PHY)
615 		ret = mii_link_ok(&dm->mii);
616 	else
617 		ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
618 
619 	return ret;
620 }
621 
622 #define DM_EEPROM_MAGIC		(0x444D394B)
623 
624 static int dm9000_get_eeprom_len(struct net_device *dev)
625 {
626 	return 128;
627 }
628 
629 static int dm9000_get_eeprom(struct net_device *dev,
630 			     struct ethtool_eeprom *ee, u8 *data)
631 {
632 	struct board_info *dm = to_dm9000_board(dev);
633 	int offset = ee->offset;
634 	int len = ee->len;
635 	int i;
636 
637 	/* EEPROM access is aligned to two bytes */
638 
639 	if ((len & 1) != 0 || (offset & 1) != 0)
640 		return -EINVAL;
641 
642 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
643 		return -ENOENT;
644 
645 	ee->magic = DM_EEPROM_MAGIC;
646 
647 	for (i = 0; i < len; i += 2)
648 		dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
649 
650 	return 0;
651 }
652 
653 static int dm9000_set_eeprom(struct net_device *dev,
654 			     struct ethtool_eeprom *ee, u8 *data)
655 {
656 	struct board_info *dm = to_dm9000_board(dev);
657 	int offset = ee->offset;
658 	int len = ee->len;
659 	int done;
660 
661 	/* EEPROM access is aligned to two bytes */
662 
663 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
664 		return -ENOENT;
665 
666 	if (ee->magic != DM_EEPROM_MAGIC)
667 		return -EINVAL;
668 
669 	while (len > 0) {
670 		if (len & 1 || offset & 1) {
671 			int which = offset & 1;
672 			u8 tmp[2];
673 
674 			dm9000_read_eeprom(dm, offset / 2, tmp);
675 			tmp[which] = *data;
676 			dm9000_write_eeprom(dm, offset / 2, tmp);
677 
678 			done = 1;
679 		} else {
680 			dm9000_write_eeprom(dm, offset / 2, data);
681 			done = 2;
682 		}
683 
684 		data += done;
685 		offset += done;
686 		len -= done;
687 	}
688 
689 	return 0;
690 }
691 
692 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
693 {
694 	struct board_info *dm = to_dm9000_board(dev);
695 
696 	memset(w, 0, sizeof(struct ethtool_wolinfo));
697 
698 	/* note, we could probably support wake-phy too */
699 	w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
700 	w->wolopts = dm->wake_state;
701 }
702 
703 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
704 {
705 	struct board_info *dm = to_dm9000_board(dev);
706 	unsigned long flags;
707 	u32 opts = w->wolopts;
708 	u32 wcr = 0;
709 
710 	if (!dm->wake_supported)
711 		return -EOPNOTSUPP;
712 
713 	if (opts & ~WAKE_MAGIC)
714 		return -EINVAL;
715 
716 	if (opts & WAKE_MAGIC)
717 		wcr |= WCR_MAGICEN;
718 
719 	mutex_lock(&dm->addr_lock);
720 
721 	spin_lock_irqsave(&dm->lock, flags);
722 	iow(dm, DM9000_WCR, wcr);
723 	spin_unlock_irqrestore(&dm->lock, flags);
724 
725 	mutex_unlock(&dm->addr_lock);
726 
727 	if (dm->wake_state != opts) {
728 		/* change in wol state, update IRQ state */
729 
730 		if (!dm->wake_state)
731 			irq_set_irq_wake(dm->irq_wake, 1);
732 		else if (dm->wake_state && !opts)
733 			irq_set_irq_wake(dm->irq_wake, 0);
734 	}
735 
736 	dm->wake_state = opts;
737 	return 0;
738 }
739 
740 static const struct ethtool_ops dm9000_ethtool_ops = {
741 	.get_drvinfo		= dm9000_get_drvinfo,
742 	.get_settings		= dm9000_get_settings,
743 	.set_settings		= dm9000_set_settings,
744 	.get_msglevel		= dm9000_get_msglevel,
745 	.set_msglevel		= dm9000_set_msglevel,
746 	.nway_reset		= dm9000_nway_reset,
747 	.get_link		= dm9000_get_link,
748 	.get_wol		= dm9000_get_wol,
749 	.set_wol		= dm9000_set_wol,
750 	.get_eeprom_len		= dm9000_get_eeprom_len,
751 	.get_eeprom		= dm9000_get_eeprom,
752 	.set_eeprom		= dm9000_set_eeprom,
753 };
754 
755 static void dm9000_show_carrier(struct board_info *db,
756 				unsigned carrier, unsigned nsr)
757 {
758 	int lpa;
759 	struct net_device *ndev = db->ndev;
760 	struct mii_if_info *mii = &db->mii;
761 	unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
762 
763 	if (carrier) {
764 		lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
765 		dev_info(db->dev,
766 			 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
767 			 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
768 			 (ncr & NCR_FDX) ? "full" : "half", lpa);
769 	} else {
770 		dev_info(db->dev, "%s: link down\n", ndev->name);
771 	}
772 }
773 
774 static void
775 dm9000_poll_work(struct work_struct *w)
776 {
777 	struct delayed_work *dw = to_delayed_work(w);
778 	struct board_info *db = container_of(dw, struct board_info, phy_poll);
779 	struct net_device *ndev = db->ndev;
780 
781 	if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
782 	    !(db->flags & DM9000_PLATF_EXT_PHY)) {
783 		unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
784 		unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
785 		unsigned new_carrier;
786 
787 		new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
788 
789 		if (old_carrier != new_carrier) {
790 			if (netif_msg_link(db))
791 				dm9000_show_carrier(db, new_carrier, nsr);
792 
793 			if (!new_carrier)
794 				netif_carrier_off(ndev);
795 			else
796 				netif_carrier_on(ndev);
797 		}
798 	} else
799 		mii_check_media(&db->mii, netif_msg_link(db), 0);
800 
801 	if (netif_running(ndev))
802 		dm9000_schedule_poll(db);
803 }
804 
805 /* dm9000_release_board
806  *
807  * release a board, and any mapped resources
808  */
809 
810 static void
811 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
812 {
813 	/* unmap our resources */
814 
815 	iounmap(db->io_addr);
816 	iounmap(db->io_data);
817 
818 	/* release the resources */
819 
820 	if (db->data_req)
821 		release_resource(db->data_req);
822 	kfree(db->data_req);
823 
824 	if (db->addr_req)
825 		release_resource(db->addr_req);
826 	kfree(db->addr_req);
827 }
828 
829 static unsigned char dm9000_type_to_char(enum dm9000_type type)
830 {
831 	switch (type) {
832 	case TYPE_DM9000E: return 'e';
833 	case TYPE_DM9000A: return 'a';
834 	case TYPE_DM9000B: return 'b';
835 	}
836 
837 	return '?';
838 }
839 
840 /*
841  *  Set DM9000 multicast address
842  */
843 static void
844 dm9000_hash_table_unlocked(struct net_device *dev)
845 {
846 	struct board_info *db = netdev_priv(dev);
847 	struct netdev_hw_addr *ha;
848 	int i, oft;
849 	u32 hash_val;
850 	u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
851 	u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
852 
853 	dm9000_dbg(db, 1, "entering %s\n", __func__);
854 
855 	for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
856 		iow(db, oft, dev->dev_addr[i]);
857 
858 	if (dev->flags & IFF_PROMISC)
859 		rcr |= RCR_PRMSC;
860 
861 	if (dev->flags & IFF_ALLMULTI)
862 		rcr |= RCR_ALL;
863 
864 	/* the multicast address in Hash Table : 64 bits */
865 	netdev_for_each_mc_addr(ha, dev) {
866 		hash_val = ether_crc_le(6, ha->addr) & 0x3f;
867 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
868 	}
869 
870 	/* Write the hash table to MAC MD table */
871 	for (i = 0, oft = DM9000_MAR; i < 4; i++) {
872 		iow(db, oft++, hash_table[i]);
873 		iow(db, oft++, hash_table[i] >> 8);
874 	}
875 
876 	iow(db, DM9000_RCR, rcr);
877 }
878 
879 static void
880 dm9000_hash_table(struct net_device *dev)
881 {
882 	struct board_info *db = netdev_priv(dev);
883 	unsigned long flags;
884 
885 	spin_lock_irqsave(&db->lock, flags);
886 	dm9000_hash_table_unlocked(dev);
887 	spin_unlock_irqrestore(&db->lock, flags);
888 }
889 
890 static void
891 dm9000_mask_interrupts(struct board_info *db)
892 {
893 	iow(db, DM9000_IMR, IMR_PAR);
894 }
895 
896 static void
897 dm9000_unmask_interrupts(struct board_info *db)
898 {
899 	iow(db, DM9000_IMR, db->imr_all);
900 }
901 
902 /*
903  * Initialize dm9000 board
904  */
905 static void
906 dm9000_init_dm9000(struct net_device *dev)
907 {
908 	struct board_info *db = netdev_priv(dev);
909 	unsigned int imr;
910 	unsigned int ncr;
911 
912 	dm9000_dbg(db, 1, "entering %s\n", __func__);
913 
914 	dm9000_reset(db);
915 	dm9000_mask_interrupts(db);
916 
917 	/* I/O mode */
918 	db->io_mode = ior(db, DM9000_ISR) >> 6;	/* ISR bit7:6 keeps I/O mode */
919 
920 	/* Checksum mode */
921 	if (dev->hw_features & NETIF_F_RXCSUM)
922 		iow(db, DM9000_RCSR,
923 			(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
924 
925 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
926 	iow(db, DM9000_GPR, 0);
927 
928 	/* If we are dealing with DM9000B, some extra steps are required: a
929 	 * manual phy reset, and setting init params.
930 	 */
931 	if (db->type == TYPE_DM9000B) {
932 		dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
933 		dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
934 	}
935 
936 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
937 
938 	/* if wol is needed, then always set NCR_WAKEEN otherwise we end
939 	 * up dumping the wake events if we disable this. There is already
940 	 * a wake-mask in DM9000_WCR */
941 	if (db->wake_supported)
942 		ncr |= NCR_WAKEEN;
943 
944 	iow(db, DM9000_NCR, ncr);
945 
946 	/* Program operating register */
947 	iow(db, DM9000_TCR, 0);	        /* TX Polling clear */
948 	iow(db, DM9000_BPTR, 0x3f);	/* Less 3Kb, 200us */
949 	iow(db, DM9000_FCR, 0xff);	/* Flow Control */
950 	iow(db, DM9000_SMCR, 0);        /* Special Mode */
951 	/* clear TX status */
952 	iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
953 	iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
954 
955 	/* Set address filter table */
956 	dm9000_hash_table_unlocked(dev);
957 
958 	imr = IMR_PAR | IMR_PTM | IMR_PRM;
959 	if (db->type != TYPE_DM9000E)
960 		imr |= IMR_LNKCHNG;
961 
962 	db->imr_all = imr;
963 
964 	/* Init Driver variable */
965 	db->tx_pkt_cnt = 0;
966 	db->queue_pkt_len = 0;
967 	dev->trans_start = jiffies;
968 }
969 
970 /* Our watchdog timed out. Called by the networking layer */
971 static void dm9000_timeout(struct net_device *dev)
972 {
973 	struct board_info *db = netdev_priv(dev);
974 	u8 reg_save;
975 	unsigned long flags;
976 
977 	/* Save previous register address */
978 	spin_lock_irqsave(&db->lock, flags);
979 	db->in_timeout = 1;
980 	reg_save = readb(db->io_addr);
981 
982 	netif_stop_queue(dev);
983 	dm9000_init_dm9000(dev);
984 	dm9000_unmask_interrupts(db);
985 	/* We can accept TX packets again */
986 	dev->trans_start = jiffies; /* prevent tx timeout */
987 	netif_wake_queue(dev);
988 
989 	/* Restore previous register address */
990 	writeb(reg_save, db->io_addr);
991 	db->in_timeout = 0;
992 	spin_unlock_irqrestore(&db->lock, flags);
993 }
994 
995 static void dm9000_send_packet(struct net_device *dev,
996 			       int ip_summed,
997 			       u16 pkt_len)
998 {
999 	struct board_info *dm = to_dm9000_board(dev);
1000 
1001 	/* The DM9000 is not smart enough to leave fragmented packets alone. */
1002 	if (dm->ip_summed != ip_summed) {
1003 		if (ip_summed == CHECKSUM_NONE)
1004 			iow(dm, DM9000_TCCR, 0);
1005 		else
1006 			iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1007 		dm->ip_summed = ip_summed;
1008 	}
1009 
1010 	/* Set TX length to DM9000 */
1011 	iow(dm, DM9000_TXPLL, pkt_len);
1012 	iow(dm, DM9000_TXPLH, pkt_len >> 8);
1013 
1014 	/* Issue TX polling command */
1015 	iow(dm, DM9000_TCR, TCR_TXREQ);	/* Cleared after TX complete */
1016 }
1017 
1018 /*
1019  *  Hardware start transmission.
1020  *  Send a packet to media from the upper layer.
1021  */
1022 static int
1023 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1024 {
1025 	unsigned long flags;
1026 	struct board_info *db = netdev_priv(dev);
1027 
1028 	dm9000_dbg(db, 3, "%s:\n", __func__);
1029 
1030 	if (db->tx_pkt_cnt > 1)
1031 		return NETDEV_TX_BUSY;
1032 
1033 	spin_lock_irqsave(&db->lock, flags);
1034 
1035 	/* Move data to DM9000 TX RAM */
1036 	writeb(DM9000_MWCMD, db->io_addr);
1037 
1038 	(db->outblk)(db->io_data, skb->data, skb->len);
1039 	dev->stats.tx_bytes += skb->len;
1040 
1041 	db->tx_pkt_cnt++;
1042 	/* TX control: First packet immediately send, second packet queue */
1043 	if (db->tx_pkt_cnt == 1) {
1044 		dm9000_send_packet(dev, skb->ip_summed, skb->len);
1045 	} else {
1046 		/* Second packet */
1047 		db->queue_pkt_len = skb->len;
1048 		db->queue_ip_summed = skb->ip_summed;
1049 		netif_stop_queue(dev);
1050 	}
1051 
1052 	spin_unlock_irqrestore(&db->lock, flags);
1053 
1054 	/* free this SKB */
1055 	dev_consume_skb_any(skb);
1056 
1057 	return NETDEV_TX_OK;
1058 }
1059 
1060 /*
1061  * DM9000 interrupt handler
1062  * receive the packet to upper layer, free the transmitted packet
1063  */
1064 
1065 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1066 {
1067 	int tx_status = ior(db, DM9000_NSR);	/* Got TX status */
1068 
1069 	if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1070 		/* One packet sent complete */
1071 		db->tx_pkt_cnt--;
1072 		dev->stats.tx_packets++;
1073 
1074 		if (netif_msg_tx_done(db))
1075 			dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1076 
1077 		/* Queue packet check & send */
1078 		if (db->tx_pkt_cnt > 0)
1079 			dm9000_send_packet(dev, db->queue_ip_summed,
1080 					   db->queue_pkt_len);
1081 		netif_wake_queue(dev);
1082 	}
1083 }
1084 
1085 struct dm9000_rxhdr {
1086 	u8	RxPktReady;
1087 	u8	RxStatus;
1088 	__le16	RxLen;
1089 } __packed;
1090 
1091 /*
1092  *  Received a packet and pass to upper layer
1093  */
1094 static void
1095 dm9000_rx(struct net_device *dev)
1096 {
1097 	struct board_info *db = netdev_priv(dev);
1098 	struct dm9000_rxhdr rxhdr;
1099 	struct sk_buff *skb;
1100 	u8 rxbyte, *rdptr;
1101 	bool GoodPacket;
1102 	int RxLen;
1103 
1104 	/* Check packet ready or not */
1105 	do {
1106 		ior(db, DM9000_MRCMDX);	/* Dummy read */
1107 
1108 		/* Get most updated data */
1109 		rxbyte = readb(db->io_data);
1110 
1111 		/* Status check: this byte must be 0 or 1 */
1112 		if (rxbyte & DM9000_PKT_ERR) {
1113 			dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1114 			iow(db, DM9000_RCR, 0x00);	/* Stop Device */
1115 			return;
1116 		}
1117 
1118 		if (!(rxbyte & DM9000_PKT_RDY))
1119 			return;
1120 
1121 		/* A packet ready now  & Get status/length */
1122 		GoodPacket = true;
1123 		writeb(DM9000_MRCMD, db->io_addr);
1124 
1125 		(db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1126 
1127 		RxLen = le16_to_cpu(rxhdr.RxLen);
1128 
1129 		if (netif_msg_rx_status(db))
1130 			dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1131 				rxhdr.RxStatus, RxLen);
1132 
1133 		/* Packet Status check */
1134 		if (RxLen < 0x40) {
1135 			GoodPacket = false;
1136 			if (netif_msg_rx_err(db))
1137 				dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1138 		}
1139 
1140 		if (RxLen > DM9000_PKT_MAX) {
1141 			dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1142 		}
1143 
1144 		/* rxhdr.RxStatus is identical to RSR register. */
1145 		if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1146 				      RSR_PLE | RSR_RWTO |
1147 				      RSR_LCS | RSR_RF)) {
1148 			GoodPacket = false;
1149 			if (rxhdr.RxStatus & RSR_FOE) {
1150 				if (netif_msg_rx_err(db))
1151 					dev_dbg(db->dev, "fifo error\n");
1152 				dev->stats.rx_fifo_errors++;
1153 			}
1154 			if (rxhdr.RxStatus & RSR_CE) {
1155 				if (netif_msg_rx_err(db))
1156 					dev_dbg(db->dev, "crc error\n");
1157 				dev->stats.rx_crc_errors++;
1158 			}
1159 			if (rxhdr.RxStatus & RSR_RF) {
1160 				if (netif_msg_rx_err(db))
1161 					dev_dbg(db->dev, "length error\n");
1162 				dev->stats.rx_length_errors++;
1163 			}
1164 		}
1165 
1166 		/* Move data from DM9000 */
1167 		if (GoodPacket &&
1168 		    ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1169 			skb_reserve(skb, 2);
1170 			rdptr = (u8 *) skb_put(skb, RxLen - 4);
1171 
1172 			/* Read received packet from RX SRAM */
1173 
1174 			(db->inblk)(db->io_data, rdptr, RxLen);
1175 			dev->stats.rx_bytes += RxLen;
1176 
1177 			/* Pass to upper layer */
1178 			skb->protocol = eth_type_trans(skb, dev);
1179 			if (dev->features & NETIF_F_RXCSUM) {
1180 				if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1181 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1182 				else
1183 					skb_checksum_none_assert(skb);
1184 			}
1185 			netif_rx(skb);
1186 			dev->stats.rx_packets++;
1187 
1188 		} else {
1189 			/* need to dump the packet's data */
1190 
1191 			(db->dumpblk)(db->io_data, RxLen);
1192 		}
1193 	} while (rxbyte & DM9000_PKT_RDY);
1194 }
1195 
1196 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1197 {
1198 	struct net_device *dev = dev_id;
1199 	struct board_info *db = netdev_priv(dev);
1200 	int int_status;
1201 	unsigned long flags;
1202 	u8 reg_save;
1203 
1204 	dm9000_dbg(db, 3, "entering %s\n", __func__);
1205 
1206 	/* A real interrupt coming */
1207 
1208 	/* holders of db->lock must always block IRQs */
1209 	spin_lock_irqsave(&db->lock, flags);
1210 
1211 	/* Save previous register address */
1212 	reg_save = readb(db->io_addr);
1213 
1214 	dm9000_mask_interrupts(db);
1215 	/* Got DM9000 interrupt status */
1216 	int_status = ior(db, DM9000_ISR);	/* Got ISR */
1217 	iow(db, DM9000_ISR, int_status);	/* Clear ISR status */
1218 
1219 	if (netif_msg_intr(db))
1220 		dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1221 
1222 	/* Received the coming packet */
1223 	if (int_status & ISR_PRS)
1224 		dm9000_rx(dev);
1225 
1226 	/* Trnasmit Interrupt check */
1227 	if (int_status & ISR_PTS)
1228 		dm9000_tx_done(dev, db);
1229 
1230 	if (db->type != TYPE_DM9000E) {
1231 		if (int_status & ISR_LNKCHNG) {
1232 			/* fire a link-change request */
1233 			schedule_delayed_work(&db->phy_poll, 1);
1234 		}
1235 	}
1236 
1237 	dm9000_unmask_interrupts(db);
1238 	/* Restore previous register address */
1239 	writeb(reg_save, db->io_addr);
1240 
1241 	spin_unlock_irqrestore(&db->lock, flags);
1242 
1243 	return IRQ_HANDLED;
1244 }
1245 
1246 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1247 {
1248 	struct net_device *dev = dev_id;
1249 	struct board_info *db = netdev_priv(dev);
1250 	unsigned long flags;
1251 	unsigned nsr, wcr;
1252 
1253 	spin_lock_irqsave(&db->lock, flags);
1254 
1255 	nsr = ior(db, DM9000_NSR);
1256 	wcr = ior(db, DM9000_WCR);
1257 
1258 	dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1259 
1260 	if (nsr & NSR_WAKEST) {
1261 		/* clear, so we can avoid */
1262 		iow(db, DM9000_NSR, NSR_WAKEST);
1263 
1264 		if (wcr & WCR_LINKST)
1265 			dev_info(db->dev, "wake by link status change\n");
1266 		if (wcr & WCR_SAMPLEST)
1267 			dev_info(db->dev, "wake by sample packet\n");
1268 		if (wcr & WCR_MAGICST)
1269 			dev_info(db->dev, "wake by magic packet\n");
1270 		if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1271 			dev_err(db->dev, "wake signalled with no reason? "
1272 				"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1273 	}
1274 
1275 	spin_unlock_irqrestore(&db->lock, flags);
1276 
1277 	return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1278 }
1279 
1280 #ifdef CONFIG_NET_POLL_CONTROLLER
1281 /*
1282  *Used by netconsole
1283  */
1284 static void dm9000_poll_controller(struct net_device *dev)
1285 {
1286 	disable_irq(dev->irq);
1287 	dm9000_interrupt(dev->irq, dev);
1288 	enable_irq(dev->irq);
1289 }
1290 #endif
1291 
1292 /*
1293  *  Open the interface.
1294  *  The interface is opened whenever "ifconfig" actives it.
1295  */
1296 static int
1297 dm9000_open(struct net_device *dev)
1298 {
1299 	struct board_info *db = netdev_priv(dev);
1300 	unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1301 
1302 	if (netif_msg_ifup(db))
1303 		dev_dbg(db->dev, "enabling %s\n", dev->name);
1304 
1305 	/* If there is no IRQ type specified, default to something that
1306 	 * may work, and tell the user that this is a problem */
1307 
1308 	if (irqflags == IRQF_TRIGGER_NONE)
1309 		irqflags = irq_get_trigger_type(dev->irq);
1310 
1311 	if (irqflags == IRQF_TRIGGER_NONE)
1312 		dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1313 
1314 	irqflags |= IRQF_SHARED;
1315 
1316 	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1317 	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
1318 	mdelay(1); /* delay needs by DM9000B */
1319 
1320 	/* Initialize DM9000 board */
1321 	dm9000_init_dm9000(dev);
1322 
1323 	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1324 		return -EAGAIN;
1325 	/* Now that we have an interrupt handler hooked up we can unmask
1326 	 * our interrupts
1327 	 */
1328 	dm9000_unmask_interrupts(db);
1329 
1330 	/* Init driver variable */
1331 	db->dbug_cnt = 0;
1332 
1333 	mii_check_media(&db->mii, netif_msg_link(db), 1);
1334 	netif_start_queue(dev);
1335 
1336 	/* Poll initial link status */
1337 	schedule_delayed_work(&db->phy_poll, 1);
1338 
1339 	return 0;
1340 }
1341 
1342 static void
1343 dm9000_shutdown(struct net_device *dev)
1344 {
1345 	struct board_info *db = netdev_priv(dev);
1346 
1347 	/* RESET device */
1348 	dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);	/* PHY RESET */
1349 	iow(db, DM9000_GPR, 0x01);	/* Power-Down PHY */
1350 	dm9000_mask_interrupts(db);
1351 	iow(db, DM9000_RCR, 0x00);	/* Disable RX */
1352 }
1353 
1354 /*
1355  * Stop the interface.
1356  * The interface is stopped when it is brought.
1357  */
1358 static int
1359 dm9000_stop(struct net_device *ndev)
1360 {
1361 	struct board_info *db = netdev_priv(ndev);
1362 
1363 	if (netif_msg_ifdown(db))
1364 		dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1365 
1366 	cancel_delayed_work_sync(&db->phy_poll);
1367 
1368 	netif_stop_queue(ndev);
1369 	netif_carrier_off(ndev);
1370 
1371 	/* free interrupt */
1372 	free_irq(ndev->irq, ndev);
1373 
1374 	dm9000_shutdown(ndev);
1375 
1376 	return 0;
1377 }
1378 
1379 static const struct net_device_ops dm9000_netdev_ops = {
1380 	.ndo_open		= dm9000_open,
1381 	.ndo_stop		= dm9000_stop,
1382 	.ndo_start_xmit		= dm9000_start_xmit,
1383 	.ndo_tx_timeout		= dm9000_timeout,
1384 	.ndo_set_rx_mode	= dm9000_hash_table,
1385 	.ndo_do_ioctl		= dm9000_ioctl,
1386 	.ndo_change_mtu		= eth_change_mtu,
1387 	.ndo_set_features	= dm9000_set_features,
1388 	.ndo_validate_addr	= eth_validate_addr,
1389 	.ndo_set_mac_address	= eth_mac_addr,
1390 #ifdef CONFIG_NET_POLL_CONTROLLER
1391 	.ndo_poll_controller	= dm9000_poll_controller,
1392 #endif
1393 };
1394 
1395 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1396 {
1397 	struct dm9000_plat_data *pdata;
1398 	struct device_node *np = dev->of_node;
1399 	const void *mac_addr;
1400 
1401 	if (!IS_ENABLED(CONFIG_OF) || !np)
1402 		return NULL;
1403 
1404 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1405 	if (!pdata)
1406 		return ERR_PTR(-ENOMEM);
1407 
1408 	if (of_find_property(np, "davicom,ext-phy", NULL))
1409 		pdata->flags |= DM9000_PLATF_EXT_PHY;
1410 	if (of_find_property(np, "davicom,no-eeprom", NULL))
1411 		pdata->flags |= DM9000_PLATF_NO_EEPROM;
1412 
1413 	mac_addr = of_get_mac_address(np);
1414 	if (mac_addr)
1415 		memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1416 
1417 	return pdata;
1418 }
1419 
1420 /*
1421  * Search DM9000 board, allocate space and register it
1422  */
1423 static int
1424 dm9000_probe(struct platform_device *pdev)
1425 {
1426 	struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1427 	struct board_info *db;	/* Point a board information structure */
1428 	struct net_device *ndev;
1429 	const unsigned char *mac_src;
1430 	int ret = 0;
1431 	int iosize;
1432 	int i;
1433 	u32 id_val;
1434 
1435 	if (!pdata) {
1436 		pdata = dm9000_parse_dt(&pdev->dev);
1437 		if (IS_ERR(pdata))
1438 			return PTR_ERR(pdata);
1439 	}
1440 
1441 	/* Init network device */
1442 	ndev = alloc_etherdev(sizeof(struct board_info));
1443 	if (!ndev)
1444 		return -ENOMEM;
1445 
1446 	SET_NETDEV_DEV(ndev, &pdev->dev);
1447 
1448 	dev_dbg(&pdev->dev, "dm9000_probe()\n");
1449 
1450 	/* setup board info structure */
1451 	db = netdev_priv(ndev);
1452 
1453 	db->dev = &pdev->dev;
1454 	db->ndev = ndev;
1455 
1456 	spin_lock_init(&db->lock);
1457 	mutex_init(&db->addr_lock);
1458 
1459 	INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1460 
1461 	db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1462 	db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1463 	db->irq_res  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1464 
1465 	if (db->addr_res == NULL || db->data_res == NULL ||
1466 	    db->irq_res == NULL) {
1467 		dev_err(db->dev, "insufficient resources\n");
1468 		ret = -ENOENT;
1469 		goto out;
1470 	}
1471 
1472 	db->irq_wake = platform_get_irq(pdev, 1);
1473 	if (db->irq_wake >= 0) {
1474 		dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1475 
1476 		ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1477 				  IRQF_SHARED, dev_name(db->dev), ndev);
1478 		if (ret) {
1479 			dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1480 		} else {
1481 
1482 			/* test to see if irq is really wakeup capable */
1483 			ret = irq_set_irq_wake(db->irq_wake, 1);
1484 			if (ret) {
1485 				dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1486 					db->irq_wake, ret);
1487 				ret = 0;
1488 			} else {
1489 				irq_set_irq_wake(db->irq_wake, 0);
1490 				db->wake_supported = 1;
1491 			}
1492 		}
1493 	}
1494 
1495 	iosize = resource_size(db->addr_res);
1496 	db->addr_req = request_mem_region(db->addr_res->start, iosize,
1497 					  pdev->name);
1498 
1499 	if (db->addr_req == NULL) {
1500 		dev_err(db->dev, "cannot claim address reg area\n");
1501 		ret = -EIO;
1502 		goto out;
1503 	}
1504 
1505 	db->io_addr = ioremap(db->addr_res->start, iosize);
1506 
1507 	if (db->io_addr == NULL) {
1508 		dev_err(db->dev, "failed to ioremap address reg\n");
1509 		ret = -EINVAL;
1510 		goto out;
1511 	}
1512 
1513 	iosize = resource_size(db->data_res);
1514 	db->data_req = request_mem_region(db->data_res->start, iosize,
1515 					  pdev->name);
1516 
1517 	if (db->data_req == NULL) {
1518 		dev_err(db->dev, "cannot claim data reg area\n");
1519 		ret = -EIO;
1520 		goto out;
1521 	}
1522 
1523 	db->io_data = ioremap(db->data_res->start, iosize);
1524 
1525 	if (db->io_data == NULL) {
1526 		dev_err(db->dev, "failed to ioremap data reg\n");
1527 		ret = -EINVAL;
1528 		goto out;
1529 	}
1530 
1531 	/* fill in parameters for net-dev structure */
1532 	ndev->base_addr = (unsigned long)db->io_addr;
1533 	ndev->irq	= db->irq_res->start;
1534 
1535 	/* ensure at least we have a default set of IO routines */
1536 	dm9000_set_io(db, iosize);
1537 
1538 	/* check to see if anything is being over-ridden */
1539 	if (pdata != NULL) {
1540 		/* check to see if the driver wants to over-ride the
1541 		 * default IO width */
1542 
1543 		if (pdata->flags & DM9000_PLATF_8BITONLY)
1544 			dm9000_set_io(db, 1);
1545 
1546 		if (pdata->flags & DM9000_PLATF_16BITONLY)
1547 			dm9000_set_io(db, 2);
1548 
1549 		if (pdata->flags & DM9000_PLATF_32BITONLY)
1550 			dm9000_set_io(db, 4);
1551 
1552 		/* check to see if there are any IO routine
1553 		 * over-rides */
1554 
1555 		if (pdata->inblk != NULL)
1556 			db->inblk = pdata->inblk;
1557 
1558 		if (pdata->outblk != NULL)
1559 			db->outblk = pdata->outblk;
1560 
1561 		if (pdata->dumpblk != NULL)
1562 			db->dumpblk = pdata->dumpblk;
1563 
1564 		db->flags = pdata->flags;
1565 	}
1566 
1567 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1568 	db->flags |= DM9000_PLATF_SIMPLE_PHY;
1569 #endif
1570 
1571 	dm9000_reset(db);
1572 
1573 	/* try multiple times, DM9000 sometimes gets the read wrong */
1574 	for (i = 0; i < 8; i++) {
1575 		id_val  = ior(db, DM9000_VIDL);
1576 		id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1577 		id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1578 		id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1579 
1580 		if (id_val == DM9000_ID)
1581 			break;
1582 		dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1583 	}
1584 
1585 	if (id_val != DM9000_ID) {
1586 		dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1587 		ret = -ENODEV;
1588 		goto out;
1589 	}
1590 
1591 	/* Identify what type of DM9000 we are working on */
1592 
1593 	id_val = ior(db, DM9000_CHIPR);
1594 	dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1595 
1596 	switch (id_val) {
1597 	case CHIPR_DM9000A:
1598 		db->type = TYPE_DM9000A;
1599 		break;
1600 	case CHIPR_DM9000B:
1601 		db->type = TYPE_DM9000B;
1602 		break;
1603 	default:
1604 		dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1605 		db->type = TYPE_DM9000E;
1606 	}
1607 
1608 	/* dm9000a/b are capable of hardware checksum offload */
1609 	if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1610 		ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1611 		ndev->features |= ndev->hw_features;
1612 	}
1613 
1614 	/* from this point we assume that we have found a DM9000 */
1615 
1616 	/* driver system function */
1617 	ether_setup(ndev);
1618 
1619 	ndev->netdev_ops	= &dm9000_netdev_ops;
1620 	ndev->watchdog_timeo	= msecs_to_jiffies(watchdog);
1621 	ndev->ethtool_ops	= &dm9000_ethtool_ops;
1622 
1623 	db->msg_enable       = NETIF_MSG_LINK;
1624 	db->mii.phy_id_mask  = 0x1f;
1625 	db->mii.reg_num_mask = 0x1f;
1626 	db->mii.force_media  = 0;
1627 	db->mii.full_duplex  = 0;
1628 	db->mii.dev	     = ndev;
1629 	db->mii.mdio_read    = dm9000_phy_read;
1630 	db->mii.mdio_write   = dm9000_phy_write;
1631 
1632 	mac_src = "eeprom";
1633 
1634 	/* try reading the node address from the attached EEPROM */
1635 	for (i = 0; i < 6; i += 2)
1636 		dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1637 
1638 	if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1639 		mac_src = "platform data";
1640 		memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1641 	}
1642 
1643 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1644 		/* try reading from mac */
1645 
1646 		mac_src = "chip";
1647 		for (i = 0; i < 6; i++)
1648 			ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1649 	}
1650 
1651 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1652 		dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1653 			 "set using ifconfig\n", ndev->name);
1654 
1655 		eth_hw_addr_random(ndev);
1656 		mac_src = "random";
1657 	}
1658 
1659 
1660 	platform_set_drvdata(pdev, ndev);
1661 	ret = register_netdev(ndev);
1662 
1663 	if (ret == 0)
1664 		printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1665 		       ndev->name, dm9000_type_to_char(db->type),
1666 		       db->io_addr, db->io_data, ndev->irq,
1667 		       ndev->dev_addr, mac_src);
1668 	return 0;
1669 
1670 out:
1671 	dev_err(db->dev, "not found (%d).\n", ret);
1672 
1673 	dm9000_release_board(pdev, db);
1674 	free_netdev(ndev);
1675 
1676 	return ret;
1677 }
1678 
1679 static int
1680 dm9000_drv_suspend(struct device *dev)
1681 {
1682 	struct platform_device *pdev = to_platform_device(dev);
1683 	struct net_device *ndev = platform_get_drvdata(pdev);
1684 	struct board_info *db;
1685 
1686 	if (ndev) {
1687 		db = netdev_priv(ndev);
1688 		db->in_suspend = 1;
1689 
1690 		if (!netif_running(ndev))
1691 			return 0;
1692 
1693 		netif_device_detach(ndev);
1694 
1695 		/* only shutdown if not using WoL */
1696 		if (!db->wake_state)
1697 			dm9000_shutdown(ndev);
1698 	}
1699 	return 0;
1700 }
1701 
1702 static int
1703 dm9000_drv_resume(struct device *dev)
1704 {
1705 	struct platform_device *pdev = to_platform_device(dev);
1706 	struct net_device *ndev = platform_get_drvdata(pdev);
1707 	struct board_info *db = netdev_priv(ndev);
1708 
1709 	if (ndev) {
1710 		if (netif_running(ndev)) {
1711 			/* reset if we were not in wake mode to ensure if
1712 			 * the device was powered off it is in a known state */
1713 			if (!db->wake_state) {
1714 				dm9000_init_dm9000(ndev);
1715 				dm9000_unmask_interrupts(db);
1716 			}
1717 
1718 			netif_device_attach(ndev);
1719 		}
1720 
1721 		db->in_suspend = 0;
1722 	}
1723 	return 0;
1724 }
1725 
1726 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1727 	.suspend	= dm9000_drv_suspend,
1728 	.resume		= dm9000_drv_resume,
1729 };
1730 
1731 static int
1732 dm9000_drv_remove(struct platform_device *pdev)
1733 {
1734 	struct net_device *ndev = platform_get_drvdata(pdev);
1735 
1736 	unregister_netdev(ndev);
1737 	dm9000_release_board(pdev, netdev_priv(ndev));
1738 	free_netdev(ndev);		/* free device structure */
1739 
1740 	dev_dbg(&pdev->dev, "released and freed device\n");
1741 	return 0;
1742 }
1743 
1744 #ifdef CONFIG_OF
1745 static const struct of_device_id dm9000_of_matches[] = {
1746 	{ .compatible = "davicom,dm9000", },
1747 	{ /* sentinel */ }
1748 };
1749 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1750 #endif
1751 
1752 static struct platform_driver dm9000_driver = {
1753 	.driver	= {
1754 		.name    = "dm9000",
1755 		.owner	 = THIS_MODULE,
1756 		.pm	 = &dm9000_drv_pm_ops,
1757 		.of_match_table = of_match_ptr(dm9000_of_matches),
1758 	},
1759 	.probe   = dm9000_probe,
1760 	.remove  = dm9000_drv_remove,
1761 };
1762 
1763 module_platform_driver(dm9000_driver);
1764 
1765 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1766 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1767 MODULE_LICENSE("GPL");
1768 MODULE_ALIAS("platform:dm9000");
1769