1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Davicom DM9000 Fast Ethernet driver for Linux.
4 * Copyright (C) 1997 Sten Wang
5 *
6 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
7 *
8 * Additional updates, Copyright:
9 * Ben Dooks <ben@simtec.co.uk>
10 * Sascha Hauer <s.hauer@pengutronix.de>
11 */
12
13 #include <linux/module.h>
14 #include <linux/ioport.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/mii.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/ethtool.h>
25 #include <linux/dm9000.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/gpio/consumer.h>
32
33 #include <asm/delay.h>
34 #include <asm/irq.h>
35 #include <asm/io.h>
36
37 #include "dm9000.h"
38
39 /* Board/System/Debug information/definition ---------------- */
40
41 #define DM9000_PHY 0x40 /* PHY address 0x01 */
42
43 #define CARDNAME "dm9000"
44
45 /*
46 * Transmit timeout, default 5 seconds.
47 */
48 static int watchdog = 5000;
49 module_param(watchdog, int, 0400);
50 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
51
52 /*
53 * Debug messages level
54 */
55 static int debug;
56 module_param(debug, int, 0644);
57 MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
58
59 /* DM9000 register address locking.
60 *
61 * The DM9000 uses an address register to control where data written
62 * to the data register goes. This means that the address register
63 * must be preserved over interrupts or similar calls.
64 *
65 * During interrupt and other critical calls, a spinlock is used to
66 * protect the system, but the calls themselves save the address
67 * in the address register in case they are interrupting another
68 * access to the device.
69 *
70 * For general accesses a lock is provided so that calls which are
71 * allowed to sleep are serialised so that the address register does
72 * not need to be saved. This lock also serves to serialise access
73 * to the EEPROM and PHY access registers which are shared between
74 * these two devices.
75 */
76
77 /* The driver supports the original DM9000E, and now the two newer
78 * devices, DM9000A and DM9000B.
79 */
80
81 enum dm9000_type {
82 TYPE_DM9000E, /* original DM9000 */
83 TYPE_DM9000A,
84 TYPE_DM9000B
85 };
86
87 /* Structure/enum declaration ------------------------------- */
88 struct board_info {
89
90 void __iomem *io_addr; /* Register I/O base address */
91 void __iomem *io_data; /* Data I/O address */
92 u16 irq; /* IRQ */
93
94 u16 tx_pkt_cnt;
95 u16 queue_pkt_len;
96 u16 queue_start_addr;
97 u16 queue_ip_summed;
98 u16 dbug_cnt;
99 u8 io_mode; /* 0:word, 2:byte */
100 u8 phy_addr;
101 u8 imr_all;
102
103 unsigned int flags;
104 unsigned int in_timeout:1;
105 unsigned int in_suspend:1;
106 unsigned int wake_supported:1;
107
108 enum dm9000_type type;
109
110 void (*inblk)(void __iomem *port, void *data, int length);
111 void (*outblk)(void __iomem *port, void *data, int length);
112 void (*dumpblk)(void __iomem *port, int length);
113
114 struct device *dev; /* parent device */
115
116 struct resource *addr_res; /* resources found */
117 struct resource *data_res;
118 struct resource *addr_req; /* resources requested */
119 struct resource *data_req;
120
121 int irq_wake;
122
123 struct mutex addr_lock; /* phy and eeprom access lock */
124
125 struct delayed_work phy_poll;
126 struct net_device *ndev;
127
128 spinlock_t lock;
129
130 struct mii_if_info mii;
131 u32 msg_enable;
132 u32 wake_state;
133
134 int ip_summed;
135
136 struct regulator *power_supply;
137 };
138
139 /* debug code */
140
141 #define dm9000_dbg(db, lev, msg...) do { \
142 if ((lev) < debug) { \
143 dev_dbg(db->dev, msg); \
144 } \
145 } while (0)
146
to_dm9000_board(struct net_device * dev)147 static inline struct board_info *to_dm9000_board(struct net_device *dev)
148 {
149 return netdev_priv(dev);
150 }
151
152 /* DM9000 network board routine ---------------------------- */
153
154 /*
155 * Read a byte from I/O port
156 */
157 static u8
ior(struct board_info * db,int reg)158 ior(struct board_info *db, int reg)
159 {
160 writeb(reg, db->io_addr);
161 return readb(db->io_data);
162 }
163
164 /*
165 * Write a byte to I/O port
166 */
167
168 static void
iow(struct board_info * db,int reg,int value)169 iow(struct board_info *db, int reg, int value)
170 {
171 writeb(reg, db->io_addr);
172 writeb(value, db->io_data);
173 }
174
175 static void
dm9000_reset(struct board_info * db)176 dm9000_reset(struct board_info *db)
177 {
178 dev_dbg(db->dev, "resetting device\n");
179
180 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
181 * The essential point is that we have to do a double reset, and the
182 * instruction is to set LBK into MAC internal loopback mode.
183 */
184 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
185 udelay(100); /* Application note says at least 20 us */
186 if (ior(db, DM9000_NCR) & 1)
187 dev_err(db->dev, "dm9000 did not respond to first reset\n");
188
189 iow(db, DM9000_NCR, 0);
190 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
191 udelay(100);
192 if (ior(db, DM9000_NCR) & 1)
193 dev_err(db->dev, "dm9000 did not respond to second reset\n");
194 }
195
196 /* routines for sending block to chip */
197
dm9000_outblk_8bit(void __iomem * reg,void * data,int count)198 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
199 {
200 iowrite8_rep(reg, data, count);
201 }
202
dm9000_outblk_16bit(void __iomem * reg,void * data,int count)203 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
204 {
205 iowrite16_rep(reg, data, (count+1) >> 1);
206 }
207
dm9000_outblk_32bit(void __iomem * reg,void * data,int count)208 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
209 {
210 iowrite32_rep(reg, data, (count+3) >> 2);
211 }
212
213 /* input block from chip to memory */
214
dm9000_inblk_8bit(void __iomem * reg,void * data,int count)215 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
216 {
217 ioread8_rep(reg, data, count);
218 }
219
220
dm9000_inblk_16bit(void __iomem * reg,void * data,int count)221 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
222 {
223 ioread16_rep(reg, data, (count+1) >> 1);
224 }
225
dm9000_inblk_32bit(void __iomem * reg,void * data,int count)226 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
227 {
228 ioread32_rep(reg, data, (count+3) >> 2);
229 }
230
231 /* dump block from chip to null */
232
dm9000_dumpblk_8bit(void __iomem * reg,int count)233 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
234 {
235 int i;
236
237 for (i = 0; i < count; i++)
238 readb(reg);
239 }
240
dm9000_dumpblk_16bit(void __iomem * reg,int count)241 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
242 {
243 int i;
244
245 count = (count + 1) >> 1;
246
247 for (i = 0; i < count; i++)
248 readw(reg);
249 }
250
dm9000_dumpblk_32bit(void __iomem * reg,int count)251 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
252 {
253 int i;
254
255 count = (count + 3) >> 2;
256
257 for (i = 0; i < count; i++)
258 readl(reg);
259 }
260
261 /*
262 * Sleep, either by using msleep() or if we are suspending, then
263 * use mdelay() to sleep.
264 */
dm9000_msleep(struct board_info * db,unsigned int ms)265 static void dm9000_msleep(struct board_info *db, unsigned int ms)
266 {
267 if (db->in_suspend || db->in_timeout)
268 mdelay(ms);
269 else
270 msleep(ms);
271 }
272
273 /* Read a word from phyxcer */
274 static int
dm9000_phy_read(struct net_device * dev,int phy_reg_unused,int reg)275 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
276 {
277 struct board_info *db = netdev_priv(dev);
278 unsigned long flags;
279 unsigned int reg_save;
280 int ret;
281
282 mutex_lock(&db->addr_lock);
283
284 spin_lock_irqsave(&db->lock, flags);
285
286 /* Save previous register address */
287 reg_save = readb(db->io_addr);
288
289 /* Fill the phyxcer register into REG_0C */
290 iow(db, DM9000_EPAR, DM9000_PHY | reg);
291
292 /* Issue phyxcer read command */
293 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
294
295 writeb(reg_save, db->io_addr);
296 spin_unlock_irqrestore(&db->lock, flags);
297
298 dm9000_msleep(db, 1); /* Wait read complete */
299
300 spin_lock_irqsave(&db->lock, flags);
301 reg_save = readb(db->io_addr);
302
303 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
304
305 /* The read data keeps on REG_0D & REG_0E */
306 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
307
308 /* restore the previous address */
309 writeb(reg_save, db->io_addr);
310 spin_unlock_irqrestore(&db->lock, flags);
311
312 mutex_unlock(&db->addr_lock);
313
314 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
315 return ret;
316 }
317
318 /* Write a word to phyxcer */
319 static void
dm9000_phy_write(struct net_device * dev,int phyaddr_unused,int reg,int value)320 dm9000_phy_write(struct net_device *dev,
321 int phyaddr_unused, int reg, int value)
322 {
323 struct board_info *db = netdev_priv(dev);
324 unsigned long flags;
325 unsigned long reg_save;
326
327 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
328 if (!db->in_timeout)
329 mutex_lock(&db->addr_lock);
330
331 spin_lock_irqsave(&db->lock, flags);
332
333 /* Save previous register address */
334 reg_save = readb(db->io_addr);
335
336 /* Fill the phyxcer register into REG_0C */
337 iow(db, DM9000_EPAR, DM9000_PHY | reg);
338
339 /* Fill the written data into REG_0D & REG_0E */
340 iow(db, DM9000_EPDRL, value);
341 iow(db, DM9000_EPDRH, value >> 8);
342
343 /* Issue phyxcer write command */
344 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
345
346 writeb(reg_save, db->io_addr);
347 spin_unlock_irqrestore(&db->lock, flags);
348
349 dm9000_msleep(db, 1); /* Wait write complete */
350
351 spin_lock_irqsave(&db->lock, flags);
352 reg_save = readb(db->io_addr);
353
354 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
355
356 /* restore the previous address */
357 writeb(reg_save, db->io_addr);
358
359 spin_unlock_irqrestore(&db->lock, flags);
360 if (!db->in_timeout)
361 mutex_unlock(&db->addr_lock);
362 }
363
364 /* dm9000_set_io
365 *
366 * select the specified set of io routines to use with the
367 * device
368 */
369
dm9000_set_io(struct board_info * db,int byte_width)370 static void dm9000_set_io(struct board_info *db, int byte_width)
371 {
372 /* use the size of the data resource to work out what IO
373 * routines we want to use
374 */
375
376 switch (byte_width) {
377 case 1:
378 db->dumpblk = dm9000_dumpblk_8bit;
379 db->outblk = dm9000_outblk_8bit;
380 db->inblk = dm9000_inblk_8bit;
381 break;
382
383
384 case 3:
385 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
386 fallthrough;
387 case 2:
388 db->dumpblk = dm9000_dumpblk_16bit;
389 db->outblk = dm9000_outblk_16bit;
390 db->inblk = dm9000_inblk_16bit;
391 break;
392
393 case 4:
394 default:
395 db->dumpblk = dm9000_dumpblk_32bit;
396 db->outblk = dm9000_outblk_32bit;
397 db->inblk = dm9000_inblk_32bit;
398 break;
399 }
400 }
401
dm9000_schedule_poll(struct board_info * db)402 static void dm9000_schedule_poll(struct board_info *db)
403 {
404 if (db->type == TYPE_DM9000E)
405 schedule_delayed_work(&db->phy_poll, HZ * 2);
406 }
407
dm9000_ioctl(struct net_device * dev,struct ifreq * req,int cmd)408 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
409 {
410 struct board_info *dm = to_dm9000_board(dev);
411
412 if (!netif_running(dev))
413 return -EINVAL;
414
415 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
416 }
417
418 static unsigned int
dm9000_read_locked(struct board_info * db,int reg)419 dm9000_read_locked(struct board_info *db, int reg)
420 {
421 unsigned long flags;
422 unsigned int ret;
423
424 spin_lock_irqsave(&db->lock, flags);
425 ret = ior(db, reg);
426 spin_unlock_irqrestore(&db->lock, flags);
427
428 return ret;
429 }
430
dm9000_wait_eeprom(struct board_info * db)431 static int dm9000_wait_eeprom(struct board_info *db)
432 {
433 unsigned int status;
434 int timeout = 8; /* wait max 8msec */
435
436 /* The DM9000 data sheets say we should be able to
437 * poll the ERRE bit in EPCR to wait for the EEPROM
438 * operation. From testing several chips, this bit
439 * does not seem to work.
440 *
441 * We attempt to use the bit, but fall back to the
442 * timeout (which is why we do not return an error
443 * on expiry) to say that the EEPROM operation has
444 * completed.
445 */
446
447 while (1) {
448 status = dm9000_read_locked(db, DM9000_EPCR);
449
450 if ((status & EPCR_ERRE) == 0)
451 break;
452
453 msleep(1);
454
455 if (timeout-- < 0) {
456 dev_dbg(db->dev, "timeout waiting EEPROM\n");
457 break;
458 }
459 }
460
461 return 0;
462 }
463
464 /*
465 * Read a word data from EEPROM
466 */
467 static void
dm9000_read_eeprom(struct board_info * db,int offset,u8 * to)468 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
469 {
470 unsigned long flags;
471
472 if (db->flags & DM9000_PLATF_NO_EEPROM) {
473 to[0] = 0xff;
474 to[1] = 0xff;
475 return;
476 }
477
478 mutex_lock(&db->addr_lock);
479
480 spin_lock_irqsave(&db->lock, flags);
481
482 iow(db, DM9000_EPAR, offset);
483 iow(db, DM9000_EPCR, EPCR_ERPRR);
484
485 spin_unlock_irqrestore(&db->lock, flags);
486
487 dm9000_wait_eeprom(db);
488
489 /* delay for at-least 150uS */
490 msleep(1);
491
492 spin_lock_irqsave(&db->lock, flags);
493
494 iow(db, DM9000_EPCR, 0x0);
495
496 to[0] = ior(db, DM9000_EPDRL);
497 to[1] = ior(db, DM9000_EPDRH);
498
499 spin_unlock_irqrestore(&db->lock, flags);
500
501 mutex_unlock(&db->addr_lock);
502 }
503
504 /*
505 * Write a word data to SROM
506 */
507 static void
dm9000_write_eeprom(struct board_info * db,int offset,u8 * data)508 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
509 {
510 unsigned long flags;
511
512 if (db->flags & DM9000_PLATF_NO_EEPROM)
513 return;
514
515 mutex_lock(&db->addr_lock);
516
517 spin_lock_irqsave(&db->lock, flags);
518 iow(db, DM9000_EPAR, offset);
519 iow(db, DM9000_EPDRH, data[1]);
520 iow(db, DM9000_EPDRL, data[0]);
521 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
522 spin_unlock_irqrestore(&db->lock, flags);
523
524 dm9000_wait_eeprom(db);
525
526 mdelay(1); /* wait at least 150uS to clear */
527
528 spin_lock_irqsave(&db->lock, flags);
529 iow(db, DM9000_EPCR, 0);
530 spin_unlock_irqrestore(&db->lock, flags);
531
532 mutex_unlock(&db->addr_lock);
533 }
534
535 /* ethtool ops */
536
dm9000_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)537 static void dm9000_get_drvinfo(struct net_device *dev,
538 struct ethtool_drvinfo *info)
539 {
540 struct board_info *dm = to_dm9000_board(dev);
541
542 strscpy(info->driver, CARDNAME, sizeof(info->driver));
543 strscpy(info->bus_info, to_platform_device(dm->dev)->name,
544 sizeof(info->bus_info));
545 }
546
dm9000_get_msglevel(struct net_device * dev)547 static u32 dm9000_get_msglevel(struct net_device *dev)
548 {
549 struct board_info *dm = to_dm9000_board(dev);
550
551 return dm->msg_enable;
552 }
553
dm9000_set_msglevel(struct net_device * dev,u32 value)554 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
555 {
556 struct board_info *dm = to_dm9000_board(dev);
557
558 dm->msg_enable = value;
559 }
560
dm9000_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)561 static int dm9000_get_link_ksettings(struct net_device *dev,
562 struct ethtool_link_ksettings *cmd)
563 {
564 struct board_info *dm = to_dm9000_board(dev);
565
566 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
567 return 0;
568 }
569
dm9000_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)570 static int dm9000_set_link_ksettings(struct net_device *dev,
571 const struct ethtool_link_ksettings *cmd)
572 {
573 struct board_info *dm = to_dm9000_board(dev);
574
575 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
576 }
577
dm9000_nway_reset(struct net_device * dev)578 static int dm9000_nway_reset(struct net_device *dev)
579 {
580 struct board_info *dm = to_dm9000_board(dev);
581 return mii_nway_restart(&dm->mii);
582 }
583
dm9000_set_features(struct net_device * dev,netdev_features_t features)584 static int dm9000_set_features(struct net_device *dev,
585 netdev_features_t features)
586 {
587 struct board_info *dm = to_dm9000_board(dev);
588 netdev_features_t changed = dev->features ^ features;
589 unsigned long flags;
590
591 if (!(changed & NETIF_F_RXCSUM))
592 return 0;
593
594 spin_lock_irqsave(&dm->lock, flags);
595 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
596 spin_unlock_irqrestore(&dm->lock, flags);
597
598 return 0;
599 }
600
dm9000_get_link(struct net_device * dev)601 static u32 dm9000_get_link(struct net_device *dev)
602 {
603 struct board_info *dm = to_dm9000_board(dev);
604 u32 ret;
605
606 if (dm->flags & DM9000_PLATF_EXT_PHY)
607 ret = mii_link_ok(&dm->mii);
608 else
609 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
610
611 return ret;
612 }
613
614 #define DM_EEPROM_MAGIC (0x444D394B)
615
dm9000_get_eeprom_len(struct net_device * dev)616 static int dm9000_get_eeprom_len(struct net_device *dev)
617 {
618 return 128;
619 }
620
dm9000_get_eeprom(struct net_device * dev,struct ethtool_eeprom * ee,u8 * data)621 static int dm9000_get_eeprom(struct net_device *dev,
622 struct ethtool_eeprom *ee, u8 *data)
623 {
624 struct board_info *dm = to_dm9000_board(dev);
625 int offset = ee->offset;
626 int len = ee->len;
627 int i;
628
629 /* EEPROM access is aligned to two bytes */
630
631 if ((len & 1) != 0 || (offset & 1) != 0)
632 return -EINVAL;
633
634 if (dm->flags & DM9000_PLATF_NO_EEPROM)
635 return -ENOENT;
636
637 ee->magic = DM_EEPROM_MAGIC;
638
639 for (i = 0; i < len; i += 2)
640 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
641
642 return 0;
643 }
644
dm9000_set_eeprom(struct net_device * dev,struct ethtool_eeprom * ee,u8 * data)645 static int dm9000_set_eeprom(struct net_device *dev,
646 struct ethtool_eeprom *ee, u8 *data)
647 {
648 struct board_info *dm = to_dm9000_board(dev);
649 int offset = ee->offset;
650 int len = ee->len;
651 int done;
652
653 /* EEPROM access is aligned to two bytes */
654
655 if (dm->flags & DM9000_PLATF_NO_EEPROM)
656 return -ENOENT;
657
658 if (ee->magic != DM_EEPROM_MAGIC)
659 return -EINVAL;
660
661 while (len > 0) {
662 if (len & 1 || offset & 1) {
663 int which = offset & 1;
664 u8 tmp[2];
665
666 dm9000_read_eeprom(dm, offset / 2, tmp);
667 tmp[which] = *data;
668 dm9000_write_eeprom(dm, offset / 2, tmp);
669
670 done = 1;
671 } else {
672 dm9000_write_eeprom(dm, offset / 2, data);
673 done = 2;
674 }
675
676 data += done;
677 offset += done;
678 len -= done;
679 }
680
681 return 0;
682 }
683
dm9000_get_wol(struct net_device * dev,struct ethtool_wolinfo * w)684 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
685 {
686 struct board_info *dm = to_dm9000_board(dev);
687
688 memset(w, 0, sizeof(struct ethtool_wolinfo));
689
690 /* note, we could probably support wake-phy too */
691 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
692 w->wolopts = dm->wake_state;
693 }
694
dm9000_set_wol(struct net_device * dev,struct ethtool_wolinfo * w)695 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
696 {
697 struct board_info *dm = to_dm9000_board(dev);
698 unsigned long flags;
699 u32 opts = w->wolopts;
700 u32 wcr = 0;
701
702 if (!dm->wake_supported)
703 return -EOPNOTSUPP;
704
705 if (opts & ~WAKE_MAGIC)
706 return -EINVAL;
707
708 if (opts & WAKE_MAGIC)
709 wcr |= WCR_MAGICEN;
710
711 mutex_lock(&dm->addr_lock);
712
713 spin_lock_irqsave(&dm->lock, flags);
714 iow(dm, DM9000_WCR, wcr);
715 spin_unlock_irqrestore(&dm->lock, flags);
716
717 mutex_unlock(&dm->addr_lock);
718
719 if (dm->wake_state != opts) {
720 /* change in wol state, update IRQ state */
721
722 if (!dm->wake_state)
723 irq_set_irq_wake(dm->irq_wake, 1);
724 else if (dm->wake_state && !opts)
725 irq_set_irq_wake(dm->irq_wake, 0);
726 }
727
728 dm->wake_state = opts;
729 return 0;
730 }
731
732 static const struct ethtool_ops dm9000_ethtool_ops = {
733 .get_drvinfo = dm9000_get_drvinfo,
734 .get_msglevel = dm9000_get_msglevel,
735 .set_msglevel = dm9000_set_msglevel,
736 .nway_reset = dm9000_nway_reset,
737 .get_link = dm9000_get_link,
738 .get_wol = dm9000_get_wol,
739 .set_wol = dm9000_set_wol,
740 .get_eeprom_len = dm9000_get_eeprom_len,
741 .get_eeprom = dm9000_get_eeprom,
742 .set_eeprom = dm9000_set_eeprom,
743 .get_link_ksettings = dm9000_get_link_ksettings,
744 .set_link_ksettings = dm9000_set_link_ksettings,
745 };
746
dm9000_show_carrier(struct board_info * db,unsigned carrier,unsigned nsr)747 static void dm9000_show_carrier(struct board_info *db,
748 unsigned carrier, unsigned nsr)
749 {
750 int lpa;
751 struct net_device *ndev = db->ndev;
752 struct mii_if_info *mii = &db->mii;
753 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
754
755 if (carrier) {
756 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
757 dev_info(db->dev,
758 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
759 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
760 (ncr & NCR_FDX) ? "full" : "half", lpa);
761 } else {
762 dev_info(db->dev, "%s: link down\n", ndev->name);
763 }
764 }
765
766 static void
dm9000_poll_work(struct work_struct * w)767 dm9000_poll_work(struct work_struct *w)
768 {
769 struct delayed_work *dw = to_delayed_work(w);
770 struct board_info *db = container_of(dw, struct board_info, phy_poll);
771 struct net_device *ndev = db->ndev;
772
773 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
774 !(db->flags & DM9000_PLATF_EXT_PHY)) {
775 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
776 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
777 unsigned new_carrier;
778
779 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
780
781 if (old_carrier != new_carrier) {
782 if (netif_msg_link(db))
783 dm9000_show_carrier(db, new_carrier, nsr);
784
785 if (!new_carrier)
786 netif_carrier_off(ndev);
787 else
788 netif_carrier_on(ndev);
789 }
790 } else
791 mii_check_media(&db->mii, netif_msg_link(db), 0);
792
793 if (netif_running(ndev))
794 dm9000_schedule_poll(db);
795 }
796
797 /* dm9000_release_board
798 *
799 * release a board, and any mapped resources
800 */
801
802 static void
dm9000_release_board(struct platform_device * pdev,struct board_info * db)803 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
804 {
805 /* unmap our resources */
806
807 iounmap(db->io_addr);
808 iounmap(db->io_data);
809
810 /* release the resources */
811
812 if (db->data_req)
813 release_resource(db->data_req);
814 kfree(db->data_req);
815
816 if (db->addr_req)
817 release_resource(db->addr_req);
818 kfree(db->addr_req);
819 }
820
dm9000_type_to_char(enum dm9000_type type)821 static unsigned char dm9000_type_to_char(enum dm9000_type type)
822 {
823 switch (type) {
824 case TYPE_DM9000E: return 'e';
825 case TYPE_DM9000A: return 'a';
826 case TYPE_DM9000B: return 'b';
827 }
828
829 return '?';
830 }
831
832 /*
833 * Set DM9000 multicast address
834 */
835 static void
dm9000_hash_table_unlocked(struct net_device * dev)836 dm9000_hash_table_unlocked(struct net_device *dev)
837 {
838 struct board_info *db = netdev_priv(dev);
839 struct netdev_hw_addr *ha;
840 int i, oft;
841 u32 hash_val;
842 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
843 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
844
845 dm9000_dbg(db, 1, "entering %s\n", __func__);
846
847 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
848 iow(db, oft, dev->dev_addr[i]);
849
850 if (dev->flags & IFF_PROMISC)
851 rcr |= RCR_PRMSC;
852
853 if (dev->flags & IFF_ALLMULTI)
854 rcr |= RCR_ALL;
855
856 /* the multicast address in Hash Table : 64 bits */
857 netdev_for_each_mc_addr(ha, dev) {
858 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
859 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
860 }
861
862 /* Write the hash table to MAC MD table */
863 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
864 iow(db, oft++, hash_table[i]);
865 iow(db, oft++, hash_table[i] >> 8);
866 }
867
868 iow(db, DM9000_RCR, rcr);
869 }
870
871 static void
dm9000_hash_table(struct net_device * dev)872 dm9000_hash_table(struct net_device *dev)
873 {
874 struct board_info *db = netdev_priv(dev);
875 unsigned long flags;
876
877 spin_lock_irqsave(&db->lock, flags);
878 dm9000_hash_table_unlocked(dev);
879 spin_unlock_irqrestore(&db->lock, flags);
880 }
881
882 static void
dm9000_mask_interrupts(struct board_info * db)883 dm9000_mask_interrupts(struct board_info *db)
884 {
885 iow(db, DM9000_IMR, IMR_PAR);
886 }
887
888 static void
dm9000_unmask_interrupts(struct board_info * db)889 dm9000_unmask_interrupts(struct board_info *db)
890 {
891 iow(db, DM9000_IMR, db->imr_all);
892 }
893
894 /*
895 * Initialize dm9000 board
896 */
897 static void
dm9000_init_dm9000(struct net_device * dev)898 dm9000_init_dm9000(struct net_device *dev)
899 {
900 struct board_info *db = netdev_priv(dev);
901 unsigned int imr;
902 unsigned int ncr;
903
904 dm9000_dbg(db, 1, "entering %s\n", __func__);
905
906 dm9000_reset(db);
907 dm9000_mask_interrupts(db);
908
909 /* I/O mode */
910 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
911
912 /* Checksum mode */
913 if (dev->hw_features & NETIF_F_RXCSUM)
914 iow(db, DM9000_RCSR,
915 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
916
917 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
918 iow(db, DM9000_GPR, 0);
919
920 /* If we are dealing with DM9000B, some extra steps are required: a
921 * manual phy reset, and setting init params.
922 */
923 if (db->type == TYPE_DM9000B) {
924 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
925 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
926 }
927
928 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
929
930 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
931 * up dumping the wake events if we disable this. There is already
932 * a wake-mask in DM9000_WCR */
933 if (db->wake_supported)
934 ncr |= NCR_WAKEEN;
935
936 iow(db, DM9000_NCR, ncr);
937
938 /* Program operating register */
939 iow(db, DM9000_TCR, 0); /* TX Polling clear */
940 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
941 iow(db, DM9000_FCR, 0xff); /* Flow Control */
942 iow(db, DM9000_SMCR, 0); /* Special Mode */
943 /* clear TX status */
944 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
945 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
946
947 /* Set address filter table */
948 dm9000_hash_table_unlocked(dev);
949
950 imr = IMR_PAR | IMR_PTM | IMR_PRM;
951 if (db->type != TYPE_DM9000E)
952 imr |= IMR_LNKCHNG;
953
954 db->imr_all = imr;
955
956 /* Init Driver variable */
957 db->tx_pkt_cnt = 0;
958 db->queue_pkt_len = 0;
959 netif_trans_update(dev);
960 }
961
962 /* Our watchdog timed out. Called by the networking layer */
dm9000_timeout(struct net_device * dev,unsigned int txqueue)963 static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
964 {
965 struct board_info *db = netdev_priv(dev);
966 u8 reg_save;
967 unsigned long flags;
968
969 /* Save previous register address */
970 spin_lock_irqsave(&db->lock, flags);
971 db->in_timeout = 1;
972 reg_save = readb(db->io_addr);
973
974 netif_stop_queue(dev);
975 dm9000_init_dm9000(dev);
976 dm9000_unmask_interrupts(db);
977 /* We can accept TX packets again */
978 netif_trans_update(dev); /* prevent tx timeout */
979 netif_wake_queue(dev);
980
981 /* Restore previous register address */
982 writeb(reg_save, db->io_addr);
983 db->in_timeout = 0;
984 spin_unlock_irqrestore(&db->lock, flags);
985 }
986
dm9000_send_packet(struct net_device * dev,int ip_summed,u16 pkt_len)987 static void dm9000_send_packet(struct net_device *dev,
988 int ip_summed,
989 u16 pkt_len)
990 {
991 struct board_info *dm = to_dm9000_board(dev);
992
993 /* The DM9000 is not smart enough to leave fragmented packets alone. */
994 if (dm->ip_summed != ip_summed) {
995 if (ip_summed == CHECKSUM_NONE)
996 iow(dm, DM9000_TCCR, 0);
997 else
998 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
999 dm->ip_summed = ip_summed;
1000 }
1001
1002 /* Set TX length to DM9000 */
1003 iow(dm, DM9000_TXPLL, pkt_len);
1004 iow(dm, DM9000_TXPLH, pkt_len >> 8);
1005
1006 /* Issue TX polling command */
1007 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
1008 }
1009
1010 /*
1011 * Hardware start transmission.
1012 * Send a packet to media from the upper layer.
1013 */
1014 static netdev_tx_t
dm9000_start_xmit(struct sk_buff * skb,struct net_device * dev)1015 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1016 {
1017 unsigned long flags;
1018 struct board_info *db = netdev_priv(dev);
1019
1020 dm9000_dbg(db, 3, "%s:\n", __func__);
1021
1022 if (db->tx_pkt_cnt > 1)
1023 return NETDEV_TX_BUSY;
1024
1025 spin_lock_irqsave(&db->lock, flags);
1026
1027 /* Move data to DM9000 TX RAM */
1028 writeb(DM9000_MWCMD, db->io_addr);
1029
1030 (db->outblk)(db->io_data, skb->data, skb->len);
1031 dev->stats.tx_bytes += skb->len;
1032
1033 db->tx_pkt_cnt++;
1034 /* TX control: First packet immediately send, second packet queue */
1035 if (db->tx_pkt_cnt == 1) {
1036 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1037 } else {
1038 /* Second packet */
1039 db->queue_pkt_len = skb->len;
1040 db->queue_ip_summed = skb->ip_summed;
1041 netif_stop_queue(dev);
1042 }
1043
1044 spin_unlock_irqrestore(&db->lock, flags);
1045
1046 /* free this SKB */
1047 dev_consume_skb_any(skb);
1048
1049 return NETDEV_TX_OK;
1050 }
1051
1052 /*
1053 * DM9000 interrupt handler
1054 * receive the packet to upper layer, free the transmitted packet
1055 */
1056
dm9000_tx_done(struct net_device * dev,struct board_info * db)1057 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1058 {
1059 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
1060
1061 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1062 /* One packet sent complete */
1063 db->tx_pkt_cnt--;
1064 dev->stats.tx_packets++;
1065
1066 if (netif_msg_tx_done(db))
1067 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1068
1069 /* Queue packet check & send */
1070 if (db->tx_pkt_cnt > 0)
1071 dm9000_send_packet(dev, db->queue_ip_summed,
1072 db->queue_pkt_len);
1073 netif_wake_queue(dev);
1074 }
1075 }
1076
1077 struct dm9000_rxhdr {
1078 u8 RxPktReady;
1079 u8 RxStatus;
1080 __le16 RxLen;
1081 } __packed;
1082
1083 /*
1084 * Received a packet and pass to upper layer
1085 */
1086 static void
dm9000_rx(struct net_device * dev)1087 dm9000_rx(struct net_device *dev)
1088 {
1089 struct board_info *db = netdev_priv(dev);
1090 struct dm9000_rxhdr rxhdr;
1091 struct sk_buff *skb;
1092 u8 rxbyte, *rdptr;
1093 bool GoodPacket;
1094 int RxLen;
1095
1096 /* Check packet ready or not */
1097 do {
1098 ior(db, DM9000_MRCMDX); /* Dummy read */
1099
1100 /* Get most updated data */
1101 rxbyte = readb(db->io_data);
1102
1103 /* Status check: this byte must be 0 or 1 */
1104 if (rxbyte & DM9000_PKT_ERR) {
1105 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1106 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1107 return;
1108 }
1109
1110 if (!(rxbyte & DM9000_PKT_RDY))
1111 return;
1112
1113 /* A packet ready now & Get status/length */
1114 GoodPacket = true;
1115 writeb(DM9000_MRCMD, db->io_addr);
1116
1117 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1118
1119 RxLen = le16_to_cpu(rxhdr.RxLen);
1120
1121 if (netif_msg_rx_status(db))
1122 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1123 rxhdr.RxStatus, RxLen);
1124
1125 /* Packet Status check */
1126 if (RxLen < 0x40) {
1127 GoodPacket = false;
1128 if (netif_msg_rx_err(db))
1129 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1130 }
1131
1132 if (RxLen > DM9000_PKT_MAX) {
1133 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1134 }
1135
1136 /* rxhdr.RxStatus is identical to RSR register. */
1137 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1138 RSR_PLE | RSR_RWTO |
1139 RSR_LCS | RSR_RF)) {
1140 GoodPacket = false;
1141 if (rxhdr.RxStatus & RSR_FOE) {
1142 if (netif_msg_rx_err(db))
1143 dev_dbg(db->dev, "fifo error\n");
1144 dev->stats.rx_fifo_errors++;
1145 }
1146 if (rxhdr.RxStatus & RSR_CE) {
1147 if (netif_msg_rx_err(db))
1148 dev_dbg(db->dev, "crc error\n");
1149 dev->stats.rx_crc_errors++;
1150 }
1151 if (rxhdr.RxStatus & RSR_RF) {
1152 if (netif_msg_rx_err(db))
1153 dev_dbg(db->dev, "length error\n");
1154 dev->stats.rx_length_errors++;
1155 }
1156 }
1157
1158 /* Move data from DM9000 */
1159 if (GoodPacket &&
1160 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1161 skb_reserve(skb, 2);
1162 rdptr = skb_put(skb, RxLen - 4);
1163
1164 /* Read received packet from RX SRAM */
1165
1166 (db->inblk)(db->io_data, rdptr, RxLen);
1167 dev->stats.rx_bytes += RxLen;
1168
1169 /* Pass to upper layer */
1170 skb->protocol = eth_type_trans(skb, dev);
1171 if (dev->features & NETIF_F_RXCSUM) {
1172 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1173 skb->ip_summed = CHECKSUM_UNNECESSARY;
1174 else
1175 skb_checksum_none_assert(skb);
1176 }
1177 netif_rx(skb);
1178 dev->stats.rx_packets++;
1179
1180 } else {
1181 /* need to dump the packet's data */
1182
1183 (db->dumpblk)(db->io_data, RxLen);
1184 }
1185 } while (rxbyte & DM9000_PKT_RDY);
1186 }
1187
dm9000_interrupt(int irq,void * dev_id)1188 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1189 {
1190 struct net_device *dev = dev_id;
1191 struct board_info *db = netdev_priv(dev);
1192 int int_status;
1193 unsigned long flags;
1194 u8 reg_save;
1195
1196 dm9000_dbg(db, 3, "entering %s\n", __func__);
1197
1198 /* A real interrupt coming */
1199
1200 /* holders of db->lock must always block IRQs */
1201 spin_lock_irqsave(&db->lock, flags);
1202
1203 /* Save previous register address */
1204 reg_save = readb(db->io_addr);
1205
1206 dm9000_mask_interrupts(db);
1207 /* Got DM9000 interrupt status */
1208 int_status = ior(db, DM9000_ISR); /* Got ISR */
1209 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1210
1211 if (netif_msg_intr(db))
1212 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1213
1214 /* Received the coming packet */
1215 if (int_status & ISR_PRS)
1216 dm9000_rx(dev);
1217
1218 /* Transmit Interrupt check */
1219 if (int_status & ISR_PTS)
1220 dm9000_tx_done(dev, db);
1221
1222 if (db->type != TYPE_DM9000E) {
1223 if (int_status & ISR_LNKCHNG) {
1224 /* fire a link-change request */
1225 schedule_delayed_work(&db->phy_poll, 1);
1226 }
1227 }
1228
1229 dm9000_unmask_interrupts(db);
1230 /* Restore previous register address */
1231 writeb(reg_save, db->io_addr);
1232
1233 spin_unlock_irqrestore(&db->lock, flags);
1234
1235 return IRQ_HANDLED;
1236 }
1237
dm9000_wol_interrupt(int irq,void * dev_id)1238 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1239 {
1240 struct net_device *dev = dev_id;
1241 struct board_info *db = netdev_priv(dev);
1242 unsigned long flags;
1243 unsigned nsr, wcr;
1244
1245 spin_lock_irqsave(&db->lock, flags);
1246
1247 nsr = ior(db, DM9000_NSR);
1248 wcr = ior(db, DM9000_WCR);
1249
1250 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1251
1252 if (nsr & NSR_WAKEST) {
1253 /* clear, so we can avoid */
1254 iow(db, DM9000_NSR, NSR_WAKEST);
1255
1256 if (wcr & WCR_LINKST)
1257 dev_info(db->dev, "wake by link status change\n");
1258 if (wcr & WCR_SAMPLEST)
1259 dev_info(db->dev, "wake by sample packet\n");
1260 if (wcr & WCR_MAGICST)
1261 dev_info(db->dev, "wake by magic packet\n");
1262 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1263 dev_err(db->dev, "wake signalled with no reason? "
1264 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1265 }
1266
1267 spin_unlock_irqrestore(&db->lock, flags);
1268
1269 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1270 }
1271
1272 #ifdef CONFIG_NET_POLL_CONTROLLER
1273 /*
1274 *Used by netconsole
1275 */
dm9000_poll_controller(struct net_device * dev)1276 static void dm9000_poll_controller(struct net_device *dev)
1277 {
1278 disable_irq(dev->irq);
1279 dm9000_interrupt(dev->irq, dev);
1280 enable_irq(dev->irq);
1281 }
1282 #endif
1283
1284 /*
1285 * Open the interface.
1286 * The interface is opened whenever "ifconfig" actives it.
1287 */
1288 static int
dm9000_open(struct net_device * dev)1289 dm9000_open(struct net_device *dev)
1290 {
1291 struct board_info *db = netdev_priv(dev);
1292 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1293
1294 if (netif_msg_ifup(db))
1295 dev_dbg(db->dev, "enabling %s\n", dev->name);
1296
1297 /* If there is no IRQ type specified, tell the user that this is a
1298 * problem
1299 */
1300 if (irq_flags == IRQF_TRIGGER_NONE)
1301 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1302
1303 irq_flags |= IRQF_SHARED;
1304
1305 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1306 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1307 mdelay(1); /* delay needs by DM9000B */
1308
1309 /* Initialize DM9000 board */
1310 dm9000_init_dm9000(dev);
1311
1312 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1313 return -EAGAIN;
1314 /* Now that we have an interrupt handler hooked up we can unmask
1315 * our interrupts
1316 */
1317 dm9000_unmask_interrupts(db);
1318
1319 /* Init driver variable */
1320 db->dbug_cnt = 0;
1321
1322 mii_check_media(&db->mii, netif_msg_link(db), 1);
1323 netif_start_queue(dev);
1324
1325 /* Poll initial link status */
1326 schedule_delayed_work(&db->phy_poll, 1);
1327
1328 return 0;
1329 }
1330
1331 static void
dm9000_shutdown(struct net_device * dev)1332 dm9000_shutdown(struct net_device *dev)
1333 {
1334 struct board_info *db = netdev_priv(dev);
1335
1336 /* RESET device */
1337 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1338 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1339 dm9000_mask_interrupts(db);
1340 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1341 }
1342
1343 /*
1344 * Stop the interface.
1345 * The interface is stopped when it is brought.
1346 */
1347 static int
dm9000_stop(struct net_device * ndev)1348 dm9000_stop(struct net_device *ndev)
1349 {
1350 struct board_info *db = netdev_priv(ndev);
1351
1352 if (netif_msg_ifdown(db))
1353 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1354
1355 cancel_delayed_work_sync(&db->phy_poll);
1356
1357 netif_stop_queue(ndev);
1358 netif_carrier_off(ndev);
1359
1360 /* free interrupt */
1361 free_irq(ndev->irq, ndev);
1362
1363 dm9000_shutdown(ndev);
1364
1365 return 0;
1366 }
1367
1368 static const struct net_device_ops dm9000_netdev_ops = {
1369 .ndo_open = dm9000_open,
1370 .ndo_stop = dm9000_stop,
1371 .ndo_start_xmit = dm9000_start_xmit,
1372 .ndo_tx_timeout = dm9000_timeout,
1373 .ndo_set_rx_mode = dm9000_hash_table,
1374 .ndo_eth_ioctl = dm9000_ioctl,
1375 .ndo_set_features = dm9000_set_features,
1376 .ndo_validate_addr = eth_validate_addr,
1377 .ndo_set_mac_address = eth_mac_addr,
1378 #ifdef CONFIG_NET_POLL_CONTROLLER
1379 .ndo_poll_controller = dm9000_poll_controller,
1380 #endif
1381 };
1382
dm9000_parse_dt(struct device * dev)1383 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1384 {
1385 struct dm9000_plat_data *pdata;
1386 struct device_node *np = dev->of_node;
1387 int ret;
1388
1389 if (!IS_ENABLED(CONFIG_OF) || !np)
1390 return ERR_PTR(-ENXIO);
1391
1392 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1393 if (!pdata)
1394 return ERR_PTR(-ENOMEM);
1395
1396 if (of_property_read_bool(np, "davicom,ext-phy"))
1397 pdata->flags |= DM9000_PLATF_EXT_PHY;
1398 if (of_property_read_bool(np, "davicom,no-eeprom"))
1399 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1400
1401 ret = of_get_mac_address(np, pdata->dev_addr);
1402 if (ret == -EPROBE_DEFER)
1403 return ERR_PTR(ret);
1404
1405 return pdata;
1406 }
1407
1408 /*
1409 * Search DM9000 board, allocate space and register it
1410 */
1411 static int
dm9000_probe(struct platform_device * pdev)1412 dm9000_probe(struct platform_device *pdev)
1413 {
1414 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1415 struct board_info *db; /* Point a board information structure */
1416 struct net_device *ndev;
1417 struct device *dev = &pdev->dev;
1418 const unsigned char *mac_src;
1419 int ret = 0;
1420 int iosize;
1421 int i;
1422 u32 id_val;
1423 struct gpio_desc *reset_gpio;
1424 struct regulator *power;
1425 bool inv_mac_addr = false;
1426 u8 addr[ETH_ALEN];
1427
1428 power = devm_regulator_get(dev, "vcc");
1429 if (IS_ERR(power)) {
1430 if (PTR_ERR(power) == -EPROBE_DEFER)
1431 return -EPROBE_DEFER;
1432 dev_dbg(dev, "no regulator provided\n");
1433 } else {
1434 ret = regulator_enable(power);
1435 if (ret != 0) {
1436 dev_err(dev,
1437 "Failed to enable power regulator: %d\n", ret);
1438 return ret;
1439 }
1440 dev_dbg(dev, "regulator enabled\n");
1441 }
1442
1443 reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
1444 ret = PTR_ERR_OR_ZERO(reset_gpio);
1445 if (ret) {
1446 dev_err(dev, "failed to request reset gpio: %d\n", ret);
1447 goto out_regulator_disable;
1448 }
1449
1450 if (reset_gpio) {
1451 ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
1452 if (ret) {
1453 dev_err(dev, "failed to set reset gpio name: %d\n",
1454 ret);
1455 goto out_regulator_disable;
1456 }
1457
1458 /* According to manual PWRST# Low Period Min 1ms */
1459 msleep(2);
1460 gpiod_set_value_cansleep(reset_gpio, 0);
1461 /* Needs 3ms to read eeprom when PWRST is deasserted */
1462 msleep(4);
1463 }
1464
1465 if (!pdata) {
1466 pdata = dm9000_parse_dt(&pdev->dev);
1467 if (IS_ERR(pdata)) {
1468 ret = PTR_ERR(pdata);
1469 goto out_regulator_disable;
1470 }
1471 }
1472
1473 /* Init network device */
1474 ndev = alloc_etherdev(sizeof(struct board_info));
1475 if (!ndev) {
1476 ret = -ENOMEM;
1477 goto out_regulator_disable;
1478 }
1479
1480 SET_NETDEV_DEV(ndev, &pdev->dev);
1481
1482 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1483
1484 /* setup board info structure */
1485 db = netdev_priv(ndev);
1486
1487 db->dev = &pdev->dev;
1488 db->ndev = ndev;
1489 if (!IS_ERR(power))
1490 db->power_supply = power;
1491
1492 spin_lock_init(&db->lock);
1493 mutex_init(&db->addr_lock);
1494
1495 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1496
1497 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1498 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1499
1500 if (!db->addr_res || !db->data_res) {
1501 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1502 db->addr_res, db->data_res);
1503 ret = -ENOENT;
1504 goto out;
1505 }
1506
1507 ndev->irq = platform_get_irq(pdev, 0);
1508 if (ndev->irq < 0) {
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1513 db->irq_wake = platform_get_irq_optional(pdev, 1);
1514 if (db->irq_wake >= 0) {
1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1516
1517 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1518 IRQF_SHARED, dev_name(db->dev), ndev);
1519 if (ret) {
1520 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1521 } else {
1522
1523 /* test to see if irq is really wakeup capable */
1524 ret = irq_set_irq_wake(db->irq_wake, 1);
1525 if (ret) {
1526 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1527 db->irq_wake, ret);
1528 } else {
1529 irq_set_irq_wake(db->irq_wake, 0);
1530 db->wake_supported = 1;
1531 }
1532 }
1533 }
1534
1535 iosize = resource_size(db->addr_res);
1536 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1537 pdev->name);
1538
1539 if (db->addr_req == NULL) {
1540 dev_err(db->dev, "cannot claim address reg area\n");
1541 ret = -EIO;
1542 goto out;
1543 }
1544
1545 db->io_addr = ioremap(db->addr_res->start, iosize);
1546
1547 if (db->io_addr == NULL) {
1548 dev_err(db->dev, "failed to ioremap address reg\n");
1549 ret = -EINVAL;
1550 goto out;
1551 }
1552
1553 iosize = resource_size(db->data_res);
1554 db->data_req = request_mem_region(db->data_res->start, iosize,
1555 pdev->name);
1556
1557 if (db->data_req == NULL) {
1558 dev_err(db->dev, "cannot claim data reg area\n");
1559 ret = -EIO;
1560 goto out;
1561 }
1562
1563 db->io_data = ioremap(db->data_res->start, iosize);
1564
1565 if (db->io_data == NULL) {
1566 dev_err(db->dev, "failed to ioremap data reg\n");
1567 ret = -EINVAL;
1568 goto out;
1569 }
1570
1571 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr;
1573
1574 /* ensure at least we have a default set of IO routines */
1575 dm9000_set_io(db, iosize);
1576
1577 /* check to see if anything is being over-ridden */
1578 if (pdata != NULL) {
1579 /* check to see if the driver wants to over-ride the
1580 * default IO width */
1581
1582 if (pdata->flags & DM9000_PLATF_8BITONLY)
1583 dm9000_set_io(db, 1);
1584
1585 if (pdata->flags & DM9000_PLATF_16BITONLY)
1586 dm9000_set_io(db, 2);
1587
1588 if (pdata->flags & DM9000_PLATF_32BITONLY)
1589 dm9000_set_io(db, 4);
1590
1591 /* check to see if there are any IO routine
1592 * over-rides */
1593
1594 if (pdata->inblk != NULL)
1595 db->inblk = pdata->inblk;
1596
1597 if (pdata->outblk != NULL)
1598 db->outblk = pdata->outblk;
1599
1600 if (pdata->dumpblk != NULL)
1601 db->dumpblk = pdata->dumpblk;
1602
1603 db->flags = pdata->flags;
1604 }
1605
1606 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1607 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1608 #endif
1609
1610 dm9000_reset(db);
1611
1612 /* try multiple times, DM9000 sometimes gets the read wrong */
1613 for (i = 0; i < 8; i++) {
1614 id_val = ior(db, DM9000_VIDL);
1615 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1616 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1617 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1618
1619 if (id_val == DM9000_ID)
1620 break;
1621 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1622 }
1623
1624 if (id_val != DM9000_ID) {
1625 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1626 ret = -ENODEV;
1627 goto out;
1628 }
1629
1630 /* Identify what type of DM9000 we are working on */
1631
1632 id_val = ior(db, DM9000_CHIPR);
1633 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1634
1635 switch (id_val) {
1636 case CHIPR_DM9000A:
1637 db->type = TYPE_DM9000A;
1638 break;
1639 case CHIPR_DM9000B:
1640 db->type = TYPE_DM9000B;
1641 break;
1642 default:
1643 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1644 db->type = TYPE_DM9000E;
1645 }
1646
1647 /* dm9000a/b are capable of hardware checksum offload */
1648 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1649 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1650 ndev->features |= ndev->hw_features;
1651 }
1652
1653 /* from this point we assume that we have found a DM9000 */
1654
1655 ndev->netdev_ops = &dm9000_netdev_ops;
1656 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1657 ndev->ethtool_ops = &dm9000_ethtool_ops;
1658
1659 db->msg_enable = NETIF_MSG_LINK;
1660 db->mii.phy_id_mask = 0x1f;
1661 db->mii.reg_num_mask = 0x1f;
1662 db->mii.force_media = 0;
1663 db->mii.full_duplex = 0;
1664 db->mii.dev = ndev;
1665 db->mii.mdio_read = dm9000_phy_read;
1666 db->mii.mdio_write = dm9000_phy_write;
1667
1668 mac_src = "eeprom";
1669
1670 /* try reading the node address from the attached EEPROM */
1671 for (i = 0; i < 6; i += 2)
1672 dm9000_read_eeprom(db, i / 2, addr + i);
1673 eth_hw_addr_set(ndev, addr);
1674
1675 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1676 mac_src = "platform data";
1677 eth_hw_addr_set(ndev, pdata->dev_addr);
1678 }
1679
1680 if (!is_valid_ether_addr(ndev->dev_addr)) {
1681 /* try reading from mac */
1682
1683 mac_src = "chip";
1684 for (i = 0; i < 6; i++)
1685 addr[i] = ior(db, i + DM9000_PAR);
1686 eth_hw_addr_set(ndev, pdata->dev_addr);
1687 }
1688
1689 if (!is_valid_ether_addr(ndev->dev_addr)) {
1690 inv_mac_addr = true;
1691 eth_hw_addr_random(ndev);
1692 mac_src = "random";
1693 }
1694
1695
1696 platform_set_drvdata(pdev, ndev);
1697 ret = register_netdev(ndev);
1698
1699 if (ret == 0) {
1700 if (inv_mac_addr)
1701 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1702 ndev->name);
1703 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1704 ndev->name, dm9000_type_to_char(db->type),
1705 db->io_addr, db->io_data, ndev->irq,
1706 ndev->dev_addr, mac_src);
1707 }
1708 return 0;
1709
1710 out:
1711 dev_err(db->dev, "not found (%d).\n", ret);
1712
1713 dm9000_release_board(pdev, db);
1714 free_netdev(ndev);
1715
1716 out_regulator_disable:
1717 if (!IS_ERR(power))
1718 regulator_disable(power);
1719
1720 return ret;
1721 }
1722
1723 static int
dm9000_drv_suspend(struct device * dev)1724 dm9000_drv_suspend(struct device *dev)
1725 {
1726 struct net_device *ndev = dev_get_drvdata(dev);
1727 struct board_info *db;
1728
1729 if (ndev) {
1730 db = netdev_priv(ndev);
1731 db->in_suspend = 1;
1732
1733 if (!netif_running(ndev))
1734 return 0;
1735
1736 netif_device_detach(ndev);
1737
1738 /* only shutdown if not using WoL */
1739 if (!db->wake_state)
1740 dm9000_shutdown(ndev);
1741 }
1742 return 0;
1743 }
1744
1745 static int
dm9000_drv_resume(struct device * dev)1746 dm9000_drv_resume(struct device *dev)
1747 {
1748 struct net_device *ndev = dev_get_drvdata(dev);
1749 struct board_info *db = netdev_priv(ndev);
1750
1751 if (ndev) {
1752 if (netif_running(ndev)) {
1753 /* reset if we were not in wake mode to ensure if
1754 * the device was powered off it is in a known state */
1755 if (!db->wake_state) {
1756 dm9000_init_dm9000(ndev);
1757 dm9000_unmask_interrupts(db);
1758 }
1759
1760 netif_device_attach(ndev);
1761 }
1762
1763 db->in_suspend = 0;
1764 }
1765 return 0;
1766 }
1767
1768 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1769 .suspend = dm9000_drv_suspend,
1770 .resume = dm9000_drv_resume,
1771 };
1772
dm9000_drv_remove(struct platform_device * pdev)1773 static void dm9000_drv_remove(struct platform_device *pdev)
1774 {
1775 struct net_device *ndev = platform_get_drvdata(pdev);
1776 struct board_info *dm = to_dm9000_board(ndev);
1777
1778 unregister_netdev(ndev);
1779 dm9000_release_board(pdev, dm);
1780 free_netdev(ndev); /* free device structure */
1781 if (dm->power_supply)
1782 regulator_disable(dm->power_supply);
1783
1784 dev_dbg(&pdev->dev, "released and freed device\n");
1785 }
1786
1787 #ifdef CONFIG_OF
1788 static const struct of_device_id dm9000_of_matches[] = {
1789 { .compatible = "davicom,dm9000", },
1790 { /* sentinel */ }
1791 };
1792 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1793 #endif
1794
1795 static struct platform_driver dm9000_driver = {
1796 .driver = {
1797 .name = "dm9000",
1798 .pm = &dm9000_drv_pm_ops,
1799 .of_match_table = of_match_ptr(dm9000_of_matches),
1800 },
1801 .probe = dm9000_probe,
1802 .remove_new = dm9000_drv_remove,
1803 };
1804
1805 module_platform_driver(dm9000_driver);
1806
1807 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1808 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1809 MODULE_LICENSE("GPL");
1810 MODULE_ALIAS("platform:dm9000");
1811