1 /* 2 * Davicom DM9000 Fast Ethernet driver for Linux. 3 * Copyright (C) 1997 Sten Wang 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 16 * 17 * Additional updates, Copyright: 18 * Ben Dooks <ben@simtec.co.uk> 19 * Sascha Hauer <s.hauer@pengutronix.de> 20 */ 21 22 #include <linux/module.h> 23 #include <linux/ioport.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/skbuff.h> 29 #include <linux/spinlock.h> 30 #include <linux/crc32.h> 31 #include <linux/mii.h> 32 #include <linux/ethtool.h> 33 #include <linux/dm9000.h> 34 #include <linux/delay.h> 35 #include <linux/platform_device.h> 36 #include <linux/irq.h> 37 #include <linux/slab.h> 38 39 #include <asm/delay.h> 40 #include <asm/irq.h> 41 #include <asm/io.h> 42 43 #include "dm9000.h" 44 45 /* Board/System/Debug information/definition ---------------- */ 46 47 #define DM9000_PHY 0x40 /* PHY address 0x01 */ 48 49 #define CARDNAME "dm9000" 50 #define DRV_VERSION "1.31" 51 52 /* 53 * Transmit timeout, default 5 seconds. 54 */ 55 static int watchdog = 5000; 56 module_param(watchdog, int, 0400); 57 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 58 59 /* 60 * Debug messages level 61 */ 62 static int debug; 63 module_param(debug, int, 0644); 64 MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); 65 66 /* DM9000 register address locking. 67 * 68 * The DM9000 uses an address register to control where data written 69 * to the data register goes. This means that the address register 70 * must be preserved over interrupts or similar calls. 71 * 72 * During interrupt and other critical calls, a spinlock is used to 73 * protect the system, but the calls themselves save the address 74 * in the address register in case they are interrupting another 75 * access to the device. 76 * 77 * For general accesses a lock is provided so that calls which are 78 * allowed to sleep are serialised so that the address register does 79 * not need to be saved. This lock also serves to serialise access 80 * to the EEPROM and PHY access registers which are shared between 81 * these two devices. 82 */ 83 84 /* The driver supports the original DM9000E, and now the two newer 85 * devices, DM9000A and DM9000B. 86 */ 87 88 enum dm9000_type { 89 TYPE_DM9000E, /* original DM9000 */ 90 TYPE_DM9000A, 91 TYPE_DM9000B 92 }; 93 94 /* Structure/enum declaration ------------------------------- */ 95 typedef struct board_info { 96 97 void __iomem *io_addr; /* Register I/O base address */ 98 void __iomem *io_data; /* Data I/O address */ 99 u16 irq; /* IRQ */ 100 101 u16 tx_pkt_cnt; 102 u16 queue_pkt_len; 103 u16 queue_start_addr; 104 u16 queue_ip_summed; 105 u16 dbug_cnt; 106 u8 io_mode; /* 0:word, 2:byte */ 107 u8 phy_addr; 108 u8 imr_all; 109 110 unsigned int flags; 111 unsigned int in_suspend :1; 112 unsigned int wake_supported :1; 113 114 enum dm9000_type type; 115 116 void (*inblk)(void __iomem *port, void *data, int length); 117 void (*outblk)(void __iomem *port, void *data, int length); 118 void (*dumpblk)(void __iomem *port, int length); 119 120 struct device *dev; /* parent device */ 121 122 struct resource *addr_res; /* resources found */ 123 struct resource *data_res; 124 struct resource *addr_req; /* resources requested */ 125 struct resource *data_req; 126 struct resource *irq_res; 127 128 int irq_wake; 129 130 struct mutex addr_lock; /* phy and eeprom access lock */ 131 132 struct delayed_work phy_poll; 133 struct net_device *ndev; 134 135 spinlock_t lock; 136 137 struct mii_if_info mii; 138 u32 msg_enable; 139 u32 wake_state; 140 141 int ip_summed; 142 } board_info_t; 143 144 /* debug code */ 145 146 #define dm9000_dbg(db, lev, msg...) do { \ 147 if ((lev) < debug) { \ 148 dev_dbg(db->dev, msg); \ 149 } \ 150 } while (0) 151 152 static inline board_info_t *to_dm9000_board(struct net_device *dev) 153 { 154 return netdev_priv(dev); 155 } 156 157 /* DM9000 network board routine ---------------------------- */ 158 159 static void 160 dm9000_reset(board_info_t * db) 161 { 162 dev_dbg(db->dev, "resetting device\n"); 163 164 /* RESET device */ 165 writeb(DM9000_NCR, db->io_addr); 166 udelay(200); 167 writeb(NCR_RST, db->io_data); 168 udelay(200); 169 } 170 171 /* 172 * Read a byte from I/O port 173 */ 174 static u8 175 ior(board_info_t * db, int reg) 176 { 177 writeb(reg, db->io_addr); 178 return readb(db->io_data); 179 } 180 181 /* 182 * Write a byte to I/O port 183 */ 184 185 static void 186 iow(board_info_t * db, int reg, int value) 187 { 188 writeb(reg, db->io_addr); 189 writeb(value, db->io_data); 190 } 191 192 /* routines for sending block to chip */ 193 194 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 195 { 196 writesb(reg, data, count); 197 } 198 199 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) 200 { 201 writesw(reg, data, (count+1) >> 1); 202 } 203 204 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) 205 { 206 writesl(reg, data, (count+3) >> 2); 207 } 208 209 /* input block from chip to memory */ 210 211 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) 212 { 213 readsb(reg, data, count); 214 } 215 216 217 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) 218 { 219 readsw(reg, data, (count+1) >> 1); 220 } 221 222 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) 223 { 224 readsl(reg, data, (count+3) >> 2); 225 } 226 227 /* dump block from chip to null */ 228 229 static void dm9000_dumpblk_8bit(void __iomem *reg, int count) 230 { 231 int i; 232 int tmp; 233 234 for (i = 0; i < count; i++) 235 tmp = readb(reg); 236 } 237 238 static void dm9000_dumpblk_16bit(void __iomem *reg, int count) 239 { 240 int i; 241 int tmp; 242 243 count = (count + 1) >> 1; 244 245 for (i = 0; i < count; i++) 246 tmp = readw(reg); 247 } 248 249 static void dm9000_dumpblk_32bit(void __iomem *reg, int count) 250 { 251 int i; 252 int tmp; 253 254 count = (count + 3) >> 2; 255 256 for (i = 0; i < count; i++) 257 tmp = readl(reg); 258 } 259 260 /* dm9000_set_io 261 * 262 * select the specified set of io routines to use with the 263 * device 264 */ 265 266 static void dm9000_set_io(struct board_info *db, int byte_width) 267 { 268 /* use the size of the data resource to work out what IO 269 * routines we want to use 270 */ 271 272 switch (byte_width) { 273 case 1: 274 db->dumpblk = dm9000_dumpblk_8bit; 275 db->outblk = dm9000_outblk_8bit; 276 db->inblk = dm9000_inblk_8bit; 277 break; 278 279 280 case 3: 281 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); 282 case 2: 283 db->dumpblk = dm9000_dumpblk_16bit; 284 db->outblk = dm9000_outblk_16bit; 285 db->inblk = dm9000_inblk_16bit; 286 break; 287 288 case 4: 289 default: 290 db->dumpblk = dm9000_dumpblk_32bit; 291 db->outblk = dm9000_outblk_32bit; 292 db->inblk = dm9000_inblk_32bit; 293 break; 294 } 295 } 296 297 static void dm9000_schedule_poll(board_info_t *db) 298 { 299 if (db->type == TYPE_DM9000E) 300 schedule_delayed_work(&db->phy_poll, HZ * 2); 301 } 302 303 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 304 { 305 board_info_t *dm = to_dm9000_board(dev); 306 307 if (!netif_running(dev)) 308 return -EINVAL; 309 310 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); 311 } 312 313 static unsigned int 314 dm9000_read_locked(board_info_t *db, int reg) 315 { 316 unsigned long flags; 317 unsigned int ret; 318 319 spin_lock_irqsave(&db->lock, flags); 320 ret = ior(db, reg); 321 spin_unlock_irqrestore(&db->lock, flags); 322 323 return ret; 324 } 325 326 static int dm9000_wait_eeprom(board_info_t *db) 327 { 328 unsigned int status; 329 int timeout = 8; /* wait max 8msec */ 330 331 /* The DM9000 data sheets say we should be able to 332 * poll the ERRE bit in EPCR to wait for the EEPROM 333 * operation. From testing several chips, this bit 334 * does not seem to work. 335 * 336 * We attempt to use the bit, but fall back to the 337 * timeout (which is why we do not return an error 338 * on expiry) to say that the EEPROM operation has 339 * completed. 340 */ 341 342 while (1) { 343 status = dm9000_read_locked(db, DM9000_EPCR); 344 345 if ((status & EPCR_ERRE) == 0) 346 break; 347 348 msleep(1); 349 350 if (timeout-- < 0) { 351 dev_dbg(db->dev, "timeout waiting EEPROM\n"); 352 break; 353 } 354 } 355 356 return 0; 357 } 358 359 /* 360 * Read a word data from EEPROM 361 */ 362 static void 363 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) 364 { 365 unsigned long flags; 366 367 if (db->flags & DM9000_PLATF_NO_EEPROM) { 368 to[0] = 0xff; 369 to[1] = 0xff; 370 return; 371 } 372 373 mutex_lock(&db->addr_lock); 374 375 spin_lock_irqsave(&db->lock, flags); 376 377 iow(db, DM9000_EPAR, offset); 378 iow(db, DM9000_EPCR, EPCR_ERPRR); 379 380 spin_unlock_irqrestore(&db->lock, flags); 381 382 dm9000_wait_eeprom(db); 383 384 /* delay for at-least 150uS */ 385 msleep(1); 386 387 spin_lock_irqsave(&db->lock, flags); 388 389 iow(db, DM9000_EPCR, 0x0); 390 391 to[0] = ior(db, DM9000_EPDRL); 392 to[1] = ior(db, DM9000_EPDRH); 393 394 spin_unlock_irqrestore(&db->lock, flags); 395 396 mutex_unlock(&db->addr_lock); 397 } 398 399 /* 400 * Write a word data to SROM 401 */ 402 static void 403 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) 404 { 405 unsigned long flags; 406 407 if (db->flags & DM9000_PLATF_NO_EEPROM) 408 return; 409 410 mutex_lock(&db->addr_lock); 411 412 spin_lock_irqsave(&db->lock, flags); 413 iow(db, DM9000_EPAR, offset); 414 iow(db, DM9000_EPDRH, data[1]); 415 iow(db, DM9000_EPDRL, data[0]); 416 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 417 spin_unlock_irqrestore(&db->lock, flags); 418 419 dm9000_wait_eeprom(db); 420 421 mdelay(1); /* wait at least 150uS to clear */ 422 423 spin_lock_irqsave(&db->lock, flags); 424 iow(db, DM9000_EPCR, 0); 425 spin_unlock_irqrestore(&db->lock, flags); 426 427 mutex_unlock(&db->addr_lock); 428 } 429 430 /* ethtool ops */ 431 432 static void dm9000_get_drvinfo(struct net_device *dev, 433 struct ethtool_drvinfo *info) 434 { 435 board_info_t *dm = to_dm9000_board(dev); 436 437 strcpy(info->driver, CARDNAME); 438 strcpy(info->version, DRV_VERSION); 439 strcpy(info->bus_info, to_platform_device(dm->dev)->name); 440 } 441 442 static u32 dm9000_get_msglevel(struct net_device *dev) 443 { 444 board_info_t *dm = to_dm9000_board(dev); 445 446 return dm->msg_enable; 447 } 448 449 static void dm9000_set_msglevel(struct net_device *dev, u32 value) 450 { 451 board_info_t *dm = to_dm9000_board(dev); 452 453 dm->msg_enable = value; 454 } 455 456 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 457 { 458 board_info_t *dm = to_dm9000_board(dev); 459 460 mii_ethtool_gset(&dm->mii, cmd); 461 return 0; 462 } 463 464 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 465 { 466 board_info_t *dm = to_dm9000_board(dev); 467 468 return mii_ethtool_sset(&dm->mii, cmd); 469 } 470 471 static int dm9000_nway_reset(struct net_device *dev) 472 { 473 board_info_t *dm = to_dm9000_board(dev); 474 return mii_nway_restart(&dm->mii); 475 } 476 477 static int dm9000_set_features(struct net_device *dev, u32 features) 478 { 479 board_info_t *dm = to_dm9000_board(dev); 480 u32 changed = dev->features ^ features; 481 unsigned long flags; 482 483 if (!(changed & NETIF_F_RXCSUM)) 484 return 0; 485 486 spin_lock_irqsave(&dm->lock, flags); 487 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 488 spin_unlock_irqrestore(&dm->lock, flags); 489 490 return 0; 491 } 492 493 static u32 dm9000_get_link(struct net_device *dev) 494 { 495 board_info_t *dm = to_dm9000_board(dev); 496 u32 ret; 497 498 if (dm->flags & DM9000_PLATF_EXT_PHY) 499 ret = mii_link_ok(&dm->mii); 500 else 501 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; 502 503 return ret; 504 } 505 506 #define DM_EEPROM_MAGIC (0x444D394B) 507 508 static int dm9000_get_eeprom_len(struct net_device *dev) 509 { 510 return 128; 511 } 512 513 static int dm9000_get_eeprom(struct net_device *dev, 514 struct ethtool_eeprom *ee, u8 *data) 515 { 516 board_info_t *dm = to_dm9000_board(dev); 517 int offset = ee->offset; 518 int len = ee->len; 519 int i; 520 521 /* EEPROM access is aligned to two bytes */ 522 523 if ((len & 1) != 0 || (offset & 1) != 0) 524 return -EINVAL; 525 526 if (dm->flags & DM9000_PLATF_NO_EEPROM) 527 return -ENOENT; 528 529 ee->magic = DM_EEPROM_MAGIC; 530 531 for (i = 0; i < len; i += 2) 532 dm9000_read_eeprom(dm, (offset + i) / 2, data + i); 533 534 return 0; 535 } 536 537 static int dm9000_set_eeprom(struct net_device *dev, 538 struct ethtool_eeprom *ee, u8 *data) 539 { 540 board_info_t *dm = to_dm9000_board(dev); 541 int offset = ee->offset; 542 int len = ee->len; 543 int done; 544 545 /* EEPROM access is aligned to two bytes */ 546 547 if (dm->flags & DM9000_PLATF_NO_EEPROM) 548 return -ENOENT; 549 550 if (ee->magic != DM_EEPROM_MAGIC) 551 return -EINVAL; 552 553 while (len > 0) { 554 if (len & 1 || offset & 1) { 555 int which = offset & 1; 556 u8 tmp[2]; 557 558 dm9000_read_eeprom(dm, offset / 2, tmp); 559 tmp[which] = *data; 560 dm9000_write_eeprom(dm, offset / 2, tmp); 561 562 done = 1; 563 } else { 564 dm9000_write_eeprom(dm, offset / 2, data); 565 done = 2; 566 } 567 568 data += done; 569 offset += done; 570 len -= done; 571 } 572 573 return 0; 574 } 575 576 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 577 { 578 board_info_t *dm = to_dm9000_board(dev); 579 580 memset(w, 0, sizeof(struct ethtool_wolinfo)); 581 582 /* note, we could probably support wake-phy too */ 583 w->supported = dm->wake_supported ? WAKE_MAGIC : 0; 584 w->wolopts = dm->wake_state; 585 } 586 587 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 588 { 589 board_info_t *dm = to_dm9000_board(dev); 590 unsigned long flags; 591 u32 opts = w->wolopts; 592 u32 wcr = 0; 593 594 if (!dm->wake_supported) 595 return -EOPNOTSUPP; 596 597 if (opts & ~WAKE_MAGIC) 598 return -EINVAL; 599 600 if (opts & WAKE_MAGIC) 601 wcr |= WCR_MAGICEN; 602 603 mutex_lock(&dm->addr_lock); 604 605 spin_lock_irqsave(&dm->lock, flags); 606 iow(dm, DM9000_WCR, wcr); 607 spin_unlock_irqrestore(&dm->lock, flags); 608 609 mutex_unlock(&dm->addr_lock); 610 611 if (dm->wake_state != opts) { 612 /* change in wol state, update IRQ state */ 613 614 if (!dm->wake_state) 615 irq_set_irq_wake(dm->irq_wake, 1); 616 else if (dm->wake_state && !opts) 617 irq_set_irq_wake(dm->irq_wake, 0); 618 } 619 620 dm->wake_state = opts; 621 return 0; 622 } 623 624 static const struct ethtool_ops dm9000_ethtool_ops = { 625 .get_drvinfo = dm9000_get_drvinfo, 626 .get_settings = dm9000_get_settings, 627 .set_settings = dm9000_set_settings, 628 .get_msglevel = dm9000_get_msglevel, 629 .set_msglevel = dm9000_set_msglevel, 630 .nway_reset = dm9000_nway_reset, 631 .get_link = dm9000_get_link, 632 .get_wol = dm9000_get_wol, 633 .set_wol = dm9000_set_wol, 634 .get_eeprom_len = dm9000_get_eeprom_len, 635 .get_eeprom = dm9000_get_eeprom, 636 .set_eeprom = dm9000_set_eeprom, 637 }; 638 639 static void dm9000_show_carrier(board_info_t *db, 640 unsigned carrier, unsigned nsr) 641 { 642 struct net_device *ndev = db->ndev; 643 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 644 645 if (carrier) 646 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", 647 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 648 (ncr & NCR_FDX) ? "full" : "half"); 649 else 650 dev_info(db->dev, "%s: link down\n", ndev->name); 651 } 652 653 static void 654 dm9000_poll_work(struct work_struct *w) 655 { 656 struct delayed_work *dw = to_delayed_work(w); 657 board_info_t *db = container_of(dw, board_info_t, phy_poll); 658 struct net_device *ndev = db->ndev; 659 660 if (db->flags & DM9000_PLATF_SIMPLE_PHY && 661 !(db->flags & DM9000_PLATF_EXT_PHY)) { 662 unsigned nsr = dm9000_read_locked(db, DM9000_NSR); 663 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; 664 unsigned new_carrier; 665 666 new_carrier = (nsr & NSR_LINKST) ? 1 : 0; 667 668 if (old_carrier != new_carrier) { 669 if (netif_msg_link(db)) 670 dm9000_show_carrier(db, new_carrier, nsr); 671 672 if (!new_carrier) 673 netif_carrier_off(ndev); 674 else 675 netif_carrier_on(ndev); 676 } 677 } else 678 mii_check_media(&db->mii, netif_msg_link(db), 0); 679 680 if (netif_running(ndev)) 681 dm9000_schedule_poll(db); 682 } 683 684 /* dm9000_release_board 685 * 686 * release a board, and any mapped resources 687 */ 688 689 static void 690 dm9000_release_board(struct platform_device *pdev, struct board_info *db) 691 { 692 /* unmap our resources */ 693 694 iounmap(db->io_addr); 695 iounmap(db->io_data); 696 697 /* release the resources */ 698 699 release_resource(db->data_req); 700 kfree(db->data_req); 701 702 release_resource(db->addr_req); 703 kfree(db->addr_req); 704 } 705 706 static unsigned char dm9000_type_to_char(enum dm9000_type type) 707 { 708 switch (type) { 709 case TYPE_DM9000E: return 'e'; 710 case TYPE_DM9000A: return 'a'; 711 case TYPE_DM9000B: return 'b'; 712 } 713 714 return '?'; 715 } 716 717 /* 718 * Set DM9000 multicast address 719 */ 720 static void 721 dm9000_hash_table_unlocked(struct net_device *dev) 722 { 723 board_info_t *db = netdev_priv(dev); 724 struct netdev_hw_addr *ha; 725 int i, oft; 726 u32 hash_val; 727 u16 hash_table[4]; 728 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 729 730 dm9000_dbg(db, 1, "entering %s\n", __func__); 731 732 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 733 iow(db, oft, dev->dev_addr[i]); 734 735 /* Clear Hash Table */ 736 for (i = 0; i < 4; i++) 737 hash_table[i] = 0x0; 738 739 /* broadcast address */ 740 hash_table[3] = 0x8000; 741 742 if (dev->flags & IFF_PROMISC) 743 rcr |= RCR_PRMSC; 744 745 if (dev->flags & IFF_ALLMULTI) 746 rcr |= RCR_ALL; 747 748 /* the multicast address in Hash Table : 64 bits */ 749 netdev_for_each_mc_addr(ha, dev) { 750 hash_val = ether_crc_le(6, ha->addr) & 0x3f; 751 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 752 } 753 754 /* Write the hash table to MAC MD table */ 755 for (i = 0, oft = DM9000_MAR; i < 4; i++) { 756 iow(db, oft++, hash_table[i]); 757 iow(db, oft++, hash_table[i] >> 8); 758 } 759 760 iow(db, DM9000_RCR, rcr); 761 } 762 763 static void 764 dm9000_hash_table(struct net_device *dev) 765 { 766 board_info_t *db = netdev_priv(dev); 767 unsigned long flags; 768 769 spin_lock_irqsave(&db->lock, flags); 770 dm9000_hash_table_unlocked(dev); 771 spin_unlock_irqrestore(&db->lock, flags); 772 } 773 774 /* 775 * Initialize dm9000 board 776 */ 777 static void 778 dm9000_init_dm9000(struct net_device *dev) 779 { 780 board_info_t *db = netdev_priv(dev); 781 unsigned int imr; 782 unsigned int ncr; 783 784 dm9000_dbg(db, 1, "entering %s\n", __func__); 785 786 /* I/O mode */ 787 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 788 789 /* Checksum mode */ 790 if (dev->hw_features & NETIF_F_RXCSUM) 791 iow(db, DM9000_RCSR, 792 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 793 794 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 795 796 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 797 798 /* if wol is needed, then always set NCR_WAKEEN otherwise we end 799 * up dumping the wake events if we disable this. There is already 800 * a wake-mask in DM9000_WCR */ 801 if (db->wake_supported) 802 ncr |= NCR_WAKEEN; 803 804 iow(db, DM9000_NCR, ncr); 805 806 /* Program operating register */ 807 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 808 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 809 iow(db, DM9000_FCR, 0xff); /* Flow Control */ 810 iow(db, DM9000_SMCR, 0); /* Special Mode */ 811 /* clear TX status */ 812 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); 813 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ 814 815 /* Set address filter table */ 816 dm9000_hash_table_unlocked(dev); 817 818 imr = IMR_PAR | IMR_PTM | IMR_PRM; 819 if (db->type != TYPE_DM9000E) 820 imr |= IMR_LNKCHNG; 821 822 db->imr_all = imr; 823 824 /* Enable TX/RX interrupt mask */ 825 iow(db, DM9000_IMR, imr); 826 827 /* Init Driver variable */ 828 db->tx_pkt_cnt = 0; 829 db->queue_pkt_len = 0; 830 dev->trans_start = jiffies; 831 } 832 833 /* Our watchdog timed out. Called by the networking layer */ 834 static void dm9000_timeout(struct net_device *dev) 835 { 836 board_info_t *db = netdev_priv(dev); 837 u8 reg_save; 838 unsigned long flags; 839 840 /* Save previous register address */ 841 spin_lock_irqsave(&db->lock, flags); 842 reg_save = readb(db->io_addr); 843 844 netif_stop_queue(dev); 845 dm9000_reset(db); 846 dm9000_init_dm9000(dev); 847 /* We can accept TX packets again */ 848 dev->trans_start = jiffies; /* prevent tx timeout */ 849 netif_wake_queue(dev); 850 851 /* Restore previous register address */ 852 writeb(reg_save, db->io_addr); 853 spin_unlock_irqrestore(&db->lock, flags); 854 } 855 856 static void dm9000_send_packet(struct net_device *dev, 857 int ip_summed, 858 u16 pkt_len) 859 { 860 board_info_t *dm = to_dm9000_board(dev); 861 862 /* The DM9000 is not smart enough to leave fragmented packets alone. */ 863 if (dm->ip_summed != ip_summed) { 864 if (ip_summed == CHECKSUM_NONE) 865 iow(dm, DM9000_TCCR, 0); 866 else 867 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); 868 dm->ip_summed = ip_summed; 869 } 870 871 /* Set TX length to DM9000 */ 872 iow(dm, DM9000_TXPLL, pkt_len); 873 iow(dm, DM9000_TXPLH, pkt_len >> 8); 874 875 /* Issue TX polling command */ 876 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 877 } 878 879 /* 880 * Hardware start transmission. 881 * Send a packet to media from the upper layer. 882 */ 883 static int 884 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 885 { 886 unsigned long flags; 887 board_info_t *db = netdev_priv(dev); 888 889 dm9000_dbg(db, 3, "%s:\n", __func__); 890 891 if (db->tx_pkt_cnt > 1) 892 return NETDEV_TX_BUSY; 893 894 spin_lock_irqsave(&db->lock, flags); 895 896 /* Move data to DM9000 TX RAM */ 897 writeb(DM9000_MWCMD, db->io_addr); 898 899 (db->outblk)(db->io_data, skb->data, skb->len); 900 dev->stats.tx_bytes += skb->len; 901 902 db->tx_pkt_cnt++; 903 /* TX control: First packet immediately send, second packet queue */ 904 if (db->tx_pkt_cnt == 1) { 905 dm9000_send_packet(dev, skb->ip_summed, skb->len); 906 } else { 907 /* Second packet */ 908 db->queue_pkt_len = skb->len; 909 db->queue_ip_summed = skb->ip_summed; 910 netif_stop_queue(dev); 911 } 912 913 spin_unlock_irqrestore(&db->lock, flags); 914 915 /* free this SKB */ 916 dev_kfree_skb(skb); 917 918 return NETDEV_TX_OK; 919 } 920 921 /* 922 * DM9000 interrupt handler 923 * receive the packet to upper layer, free the transmitted packet 924 */ 925 926 static void dm9000_tx_done(struct net_device *dev, board_info_t *db) 927 { 928 int tx_status = ior(db, DM9000_NSR); /* Got TX status */ 929 930 if (tx_status & (NSR_TX2END | NSR_TX1END)) { 931 /* One packet sent complete */ 932 db->tx_pkt_cnt--; 933 dev->stats.tx_packets++; 934 935 if (netif_msg_tx_done(db)) 936 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); 937 938 /* Queue packet check & send */ 939 if (db->tx_pkt_cnt > 0) 940 dm9000_send_packet(dev, db->queue_ip_summed, 941 db->queue_pkt_len); 942 netif_wake_queue(dev); 943 } 944 } 945 946 struct dm9000_rxhdr { 947 u8 RxPktReady; 948 u8 RxStatus; 949 __le16 RxLen; 950 } __packed; 951 952 /* 953 * Received a packet and pass to upper layer 954 */ 955 static void 956 dm9000_rx(struct net_device *dev) 957 { 958 board_info_t *db = netdev_priv(dev); 959 struct dm9000_rxhdr rxhdr; 960 struct sk_buff *skb; 961 u8 rxbyte, *rdptr; 962 bool GoodPacket; 963 int RxLen; 964 965 /* Check packet ready or not */ 966 do { 967 ior(db, DM9000_MRCMDX); /* Dummy read */ 968 969 /* Get most updated data */ 970 rxbyte = readb(db->io_data); 971 972 /* Status check: this byte must be 0 or 1 */ 973 if (rxbyte & DM9000_PKT_ERR) { 974 dev_warn(db->dev, "status check fail: %d\n", rxbyte); 975 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 976 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */ 977 return; 978 } 979 980 if (!(rxbyte & DM9000_PKT_RDY)) 981 return; 982 983 /* A packet ready now & Get status/length */ 984 GoodPacket = true; 985 writeb(DM9000_MRCMD, db->io_addr); 986 987 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 988 989 RxLen = le16_to_cpu(rxhdr.RxLen); 990 991 if (netif_msg_rx_status(db)) 992 dev_dbg(db->dev, "RX: status %02x, length %04x\n", 993 rxhdr.RxStatus, RxLen); 994 995 /* Packet Status check */ 996 if (RxLen < 0x40) { 997 GoodPacket = false; 998 if (netif_msg_rx_err(db)) 999 dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); 1000 } 1001 1002 if (RxLen > DM9000_PKT_MAX) { 1003 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); 1004 } 1005 1006 /* rxhdr.RxStatus is identical to RSR register. */ 1007 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | 1008 RSR_PLE | RSR_RWTO | 1009 RSR_LCS | RSR_RF)) { 1010 GoodPacket = false; 1011 if (rxhdr.RxStatus & RSR_FOE) { 1012 if (netif_msg_rx_err(db)) 1013 dev_dbg(db->dev, "fifo error\n"); 1014 dev->stats.rx_fifo_errors++; 1015 } 1016 if (rxhdr.RxStatus & RSR_CE) { 1017 if (netif_msg_rx_err(db)) 1018 dev_dbg(db->dev, "crc error\n"); 1019 dev->stats.rx_crc_errors++; 1020 } 1021 if (rxhdr.RxStatus & RSR_RF) { 1022 if (netif_msg_rx_err(db)) 1023 dev_dbg(db->dev, "length error\n"); 1024 dev->stats.rx_length_errors++; 1025 } 1026 } 1027 1028 /* Move data from DM9000 */ 1029 if (GoodPacket && 1030 ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { 1031 skb_reserve(skb, 2); 1032 rdptr = (u8 *) skb_put(skb, RxLen - 4); 1033 1034 /* Read received packet from RX SRAM */ 1035 1036 (db->inblk)(db->io_data, rdptr, RxLen); 1037 dev->stats.rx_bytes += RxLen; 1038 1039 /* Pass to upper layer */ 1040 skb->protocol = eth_type_trans(skb, dev); 1041 if (dev->features & NETIF_F_RXCSUM) { 1042 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1043 skb->ip_summed = CHECKSUM_UNNECESSARY; 1044 else 1045 skb_checksum_none_assert(skb); 1046 } 1047 netif_rx(skb); 1048 dev->stats.rx_packets++; 1049 1050 } else { 1051 /* need to dump the packet's data */ 1052 1053 (db->dumpblk)(db->io_data, RxLen); 1054 } 1055 } while (rxbyte & DM9000_PKT_RDY); 1056 } 1057 1058 static irqreturn_t dm9000_interrupt(int irq, void *dev_id) 1059 { 1060 struct net_device *dev = dev_id; 1061 board_info_t *db = netdev_priv(dev); 1062 int int_status; 1063 unsigned long flags; 1064 u8 reg_save; 1065 1066 dm9000_dbg(db, 3, "entering %s\n", __func__); 1067 1068 /* A real interrupt coming */ 1069 1070 /* holders of db->lock must always block IRQs */ 1071 spin_lock_irqsave(&db->lock, flags); 1072 1073 /* Save previous register address */ 1074 reg_save = readb(db->io_addr); 1075 1076 /* Disable all interrupts */ 1077 iow(db, DM9000_IMR, IMR_PAR); 1078 1079 /* Got DM9000 interrupt status */ 1080 int_status = ior(db, DM9000_ISR); /* Got ISR */ 1081 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 1082 1083 if (netif_msg_intr(db)) 1084 dev_dbg(db->dev, "interrupt status %02x\n", int_status); 1085 1086 /* Received the coming packet */ 1087 if (int_status & ISR_PRS) 1088 dm9000_rx(dev); 1089 1090 /* Trnasmit Interrupt check */ 1091 if (int_status & ISR_PTS) 1092 dm9000_tx_done(dev, db); 1093 1094 if (db->type != TYPE_DM9000E) { 1095 if (int_status & ISR_LNKCHNG) { 1096 /* fire a link-change request */ 1097 schedule_delayed_work(&db->phy_poll, 1); 1098 } 1099 } 1100 1101 /* Re-enable interrupt mask */ 1102 iow(db, DM9000_IMR, db->imr_all); 1103 1104 /* Restore previous register address */ 1105 writeb(reg_save, db->io_addr); 1106 1107 spin_unlock_irqrestore(&db->lock, flags); 1108 1109 return IRQ_HANDLED; 1110 } 1111 1112 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) 1113 { 1114 struct net_device *dev = dev_id; 1115 board_info_t *db = netdev_priv(dev); 1116 unsigned long flags; 1117 unsigned nsr, wcr; 1118 1119 spin_lock_irqsave(&db->lock, flags); 1120 1121 nsr = ior(db, DM9000_NSR); 1122 wcr = ior(db, DM9000_WCR); 1123 1124 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); 1125 1126 if (nsr & NSR_WAKEST) { 1127 /* clear, so we can avoid */ 1128 iow(db, DM9000_NSR, NSR_WAKEST); 1129 1130 if (wcr & WCR_LINKST) 1131 dev_info(db->dev, "wake by link status change\n"); 1132 if (wcr & WCR_SAMPLEST) 1133 dev_info(db->dev, "wake by sample packet\n"); 1134 if (wcr & WCR_MAGICST ) 1135 dev_info(db->dev, "wake by magic packet\n"); 1136 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) 1137 dev_err(db->dev, "wake signalled with no reason? " 1138 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); 1139 1140 } 1141 1142 spin_unlock_irqrestore(&db->lock, flags); 1143 1144 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; 1145 } 1146 1147 #ifdef CONFIG_NET_POLL_CONTROLLER 1148 /* 1149 *Used by netconsole 1150 */ 1151 static void dm9000_poll_controller(struct net_device *dev) 1152 { 1153 disable_irq(dev->irq); 1154 dm9000_interrupt(dev->irq, dev); 1155 enable_irq(dev->irq); 1156 } 1157 #endif 1158 1159 /* 1160 * Open the interface. 1161 * The interface is opened whenever "ifconfig" actives it. 1162 */ 1163 static int 1164 dm9000_open(struct net_device *dev) 1165 { 1166 board_info_t *db = netdev_priv(dev); 1167 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; 1168 1169 if (netif_msg_ifup(db)) 1170 dev_dbg(db->dev, "enabling %s\n", dev->name); 1171 1172 /* If there is no IRQ type specified, default to something that 1173 * may work, and tell the user that this is a problem */ 1174 1175 if (irqflags == IRQF_TRIGGER_NONE) 1176 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1177 1178 irqflags |= IRQF_SHARED; 1179 1180 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1181 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1182 mdelay(1); /* delay needs by DM9000B */ 1183 1184 /* Initialize DM9000 board */ 1185 dm9000_reset(db); 1186 dm9000_init_dm9000(dev); 1187 1188 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1189 return -EAGAIN; 1190 1191 /* Init driver variable */ 1192 db->dbug_cnt = 0; 1193 1194 mii_check_media(&db->mii, netif_msg_link(db), 1); 1195 netif_start_queue(dev); 1196 1197 dm9000_schedule_poll(db); 1198 1199 return 0; 1200 } 1201 1202 /* 1203 * Sleep, either by using msleep() or if we are suspending, then 1204 * use mdelay() to sleep. 1205 */ 1206 static void dm9000_msleep(board_info_t *db, unsigned int ms) 1207 { 1208 if (db->in_suspend) 1209 mdelay(ms); 1210 else 1211 msleep(ms); 1212 } 1213 1214 /* 1215 * Read a word from phyxcer 1216 */ 1217 static int 1218 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 1219 { 1220 board_info_t *db = netdev_priv(dev); 1221 unsigned long flags; 1222 unsigned int reg_save; 1223 int ret; 1224 1225 mutex_lock(&db->addr_lock); 1226 1227 spin_lock_irqsave(&db->lock,flags); 1228 1229 /* Save previous register address */ 1230 reg_save = readb(db->io_addr); 1231 1232 /* Fill the phyxcer register into REG_0C */ 1233 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1234 1235 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ 1236 1237 writeb(reg_save, db->io_addr); 1238 spin_unlock_irqrestore(&db->lock,flags); 1239 1240 dm9000_msleep(db, 1); /* Wait read complete */ 1241 1242 spin_lock_irqsave(&db->lock,flags); 1243 reg_save = readb(db->io_addr); 1244 1245 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 1246 1247 /* The read data keeps on REG_0D & REG_0E */ 1248 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 1249 1250 /* restore the previous address */ 1251 writeb(reg_save, db->io_addr); 1252 spin_unlock_irqrestore(&db->lock,flags); 1253 1254 mutex_unlock(&db->addr_lock); 1255 1256 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 1257 return ret; 1258 } 1259 1260 /* 1261 * Write a word to phyxcer 1262 */ 1263 static void 1264 dm9000_phy_write(struct net_device *dev, 1265 int phyaddr_unused, int reg, int value) 1266 { 1267 board_info_t *db = netdev_priv(dev); 1268 unsigned long flags; 1269 unsigned long reg_save; 1270 1271 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 1272 mutex_lock(&db->addr_lock); 1273 1274 spin_lock_irqsave(&db->lock,flags); 1275 1276 /* Save previous register address */ 1277 reg_save = readb(db->io_addr); 1278 1279 /* Fill the phyxcer register into REG_0C */ 1280 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1281 1282 /* Fill the written data into REG_0D & REG_0E */ 1283 iow(db, DM9000_EPDRL, value); 1284 iow(db, DM9000_EPDRH, value >> 8); 1285 1286 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ 1287 1288 writeb(reg_save, db->io_addr); 1289 spin_unlock_irqrestore(&db->lock, flags); 1290 1291 dm9000_msleep(db, 1); /* Wait write complete */ 1292 1293 spin_lock_irqsave(&db->lock,flags); 1294 reg_save = readb(db->io_addr); 1295 1296 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1297 1298 /* restore the previous address */ 1299 writeb(reg_save, db->io_addr); 1300 1301 spin_unlock_irqrestore(&db->lock, flags); 1302 mutex_unlock(&db->addr_lock); 1303 } 1304 1305 static void 1306 dm9000_shutdown(struct net_device *dev) 1307 { 1308 board_info_t *db = netdev_priv(dev); 1309 1310 /* RESET device */ 1311 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1312 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ 1313 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */ 1314 iow(db, DM9000_RCR, 0x00); /* Disable RX */ 1315 } 1316 1317 /* 1318 * Stop the interface. 1319 * The interface is stopped when it is brought. 1320 */ 1321 static int 1322 dm9000_stop(struct net_device *ndev) 1323 { 1324 board_info_t *db = netdev_priv(ndev); 1325 1326 if (netif_msg_ifdown(db)) 1327 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 1328 1329 cancel_delayed_work_sync(&db->phy_poll); 1330 1331 netif_stop_queue(ndev); 1332 netif_carrier_off(ndev); 1333 1334 /* free interrupt */ 1335 free_irq(ndev->irq, ndev); 1336 1337 dm9000_shutdown(ndev); 1338 1339 return 0; 1340 } 1341 1342 static const struct net_device_ops dm9000_netdev_ops = { 1343 .ndo_open = dm9000_open, 1344 .ndo_stop = dm9000_stop, 1345 .ndo_start_xmit = dm9000_start_xmit, 1346 .ndo_tx_timeout = dm9000_timeout, 1347 .ndo_set_rx_mode = dm9000_hash_table, 1348 .ndo_do_ioctl = dm9000_ioctl, 1349 .ndo_change_mtu = eth_change_mtu, 1350 .ndo_set_features = dm9000_set_features, 1351 .ndo_validate_addr = eth_validate_addr, 1352 .ndo_set_mac_address = eth_mac_addr, 1353 #ifdef CONFIG_NET_POLL_CONTROLLER 1354 .ndo_poll_controller = dm9000_poll_controller, 1355 #endif 1356 }; 1357 1358 /* 1359 * Search DM9000 board, allocate space and register it 1360 */ 1361 static int __devinit 1362 dm9000_probe(struct platform_device *pdev) 1363 { 1364 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 1365 struct board_info *db; /* Point a board information structure */ 1366 struct net_device *ndev; 1367 const unsigned char *mac_src; 1368 int ret = 0; 1369 int iosize; 1370 int i; 1371 u32 id_val; 1372 1373 /* Init network device */ 1374 ndev = alloc_etherdev(sizeof(struct board_info)); 1375 if (!ndev) { 1376 dev_err(&pdev->dev, "could not allocate device.\n"); 1377 return -ENOMEM; 1378 } 1379 1380 SET_NETDEV_DEV(ndev, &pdev->dev); 1381 1382 dev_dbg(&pdev->dev, "dm9000_probe()\n"); 1383 1384 /* setup board info structure */ 1385 db = netdev_priv(ndev); 1386 1387 db->dev = &pdev->dev; 1388 db->ndev = ndev; 1389 1390 spin_lock_init(&db->lock); 1391 mutex_init(&db->addr_lock); 1392 1393 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 1394 1395 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1396 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1397 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1398 1399 if (db->addr_res == NULL || db->data_res == NULL || 1400 db->irq_res == NULL) { 1401 dev_err(db->dev, "insufficient resources\n"); 1402 ret = -ENOENT; 1403 goto out; 1404 } 1405 1406 db->irq_wake = platform_get_irq(pdev, 1); 1407 if (db->irq_wake >= 0) { 1408 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1409 1410 ret = request_irq(db->irq_wake, dm9000_wol_interrupt, 1411 IRQF_SHARED, dev_name(db->dev), ndev); 1412 if (ret) { 1413 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); 1414 } else { 1415 1416 /* test to see if irq is really wakeup capable */ 1417 ret = irq_set_irq_wake(db->irq_wake, 1); 1418 if (ret) { 1419 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", 1420 db->irq_wake, ret); 1421 ret = 0; 1422 } else { 1423 irq_set_irq_wake(db->irq_wake, 0); 1424 db->wake_supported = 1; 1425 } 1426 } 1427 } 1428 1429 iosize = resource_size(db->addr_res); 1430 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1431 pdev->name); 1432 1433 if (db->addr_req == NULL) { 1434 dev_err(db->dev, "cannot claim address reg area\n"); 1435 ret = -EIO; 1436 goto out; 1437 } 1438 1439 db->io_addr = ioremap(db->addr_res->start, iosize); 1440 1441 if (db->io_addr == NULL) { 1442 dev_err(db->dev, "failed to ioremap address reg\n"); 1443 ret = -EINVAL; 1444 goto out; 1445 } 1446 1447 iosize = resource_size(db->data_res); 1448 db->data_req = request_mem_region(db->data_res->start, iosize, 1449 pdev->name); 1450 1451 if (db->data_req == NULL) { 1452 dev_err(db->dev, "cannot claim data reg area\n"); 1453 ret = -EIO; 1454 goto out; 1455 } 1456 1457 db->io_data = ioremap(db->data_res->start, iosize); 1458 1459 if (db->io_data == NULL) { 1460 dev_err(db->dev, "failed to ioremap data reg\n"); 1461 ret = -EINVAL; 1462 goto out; 1463 } 1464 1465 /* fill in parameters for net-dev structure */ 1466 ndev->base_addr = (unsigned long)db->io_addr; 1467 ndev->irq = db->irq_res->start; 1468 1469 /* ensure at least we have a default set of IO routines */ 1470 dm9000_set_io(db, iosize); 1471 1472 /* check to see if anything is being over-ridden */ 1473 if (pdata != NULL) { 1474 /* check to see if the driver wants to over-ride the 1475 * default IO width */ 1476 1477 if (pdata->flags & DM9000_PLATF_8BITONLY) 1478 dm9000_set_io(db, 1); 1479 1480 if (pdata->flags & DM9000_PLATF_16BITONLY) 1481 dm9000_set_io(db, 2); 1482 1483 if (pdata->flags & DM9000_PLATF_32BITONLY) 1484 dm9000_set_io(db, 4); 1485 1486 /* check to see if there are any IO routine 1487 * over-rides */ 1488 1489 if (pdata->inblk != NULL) 1490 db->inblk = pdata->inblk; 1491 1492 if (pdata->outblk != NULL) 1493 db->outblk = pdata->outblk; 1494 1495 if (pdata->dumpblk != NULL) 1496 db->dumpblk = pdata->dumpblk; 1497 1498 db->flags = pdata->flags; 1499 } 1500 1501 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL 1502 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1503 #endif 1504 1505 dm9000_reset(db); 1506 1507 /* try multiple times, DM9000 sometimes gets the read wrong */ 1508 for (i = 0; i < 8; i++) { 1509 id_val = ior(db, DM9000_VIDL); 1510 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 1511 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 1512 id_val |= (u32)ior(db, DM9000_PIDH) << 24; 1513 1514 if (id_val == DM9000_ID) 1515 break; 1516 dev_err(db->dev, "read wrong id 0x%08x\n", id_val); 1517 } 1518 1519 if (id_val != DM9000_ID) { 1520 dev_err(db->dev, "wrong id: 0x%08x\n", id_val); 1521 ret = -ENODEV; 1522 goto out; 1523 } 1524 1525 /* Identify what type of DM9000 we are working on */ 1526 1527 id_val = ior(db, DM9000_CHIPR); 1528 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); 1529 1530 switch (id_val) { 1531 case CHIPR_DM9000A: 1532 db->type = TYPE_DM9000A; 1533 break; 1534 case CHIPR_DM9000B: 1535 db->type = TYPE_DM9000B; 1536 break; 1537 default: 1538 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); 1539 db->type = TYPE_DM9000E; 1540 } 1541 1542 /* dm9000a/b are capable of hardware checksum offload */ 1543 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { 1544 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 1545 ndev->features |= ndev->hw_features; 1546 } 1547 1548 /* from this point we assume that we have found a DM9000 */ 1549 1550 /* driver system function */ 1551 ether_setup(ndev); 1552 1553 ndev->netdev_ops = &dm9000_netdev_ops; 1554 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1555 ndev->ethtool_ops = &dm9000_ethtool_ops; 1556 1557 db->msg_enable = NETIF_MSG_LINK; 1558 db->mii.phy_id_mask = 0x1f; 1559 db->mii.reg_num_mask = 0x1f; 1560 db->mii.force_media = 0; 1561 db->mii.full_duplex = 0; 1562 db->mii.dev = ndev; 1563 db->mii.mdio_read = dm9000_phy_read; 1564 db->mii.mdio_write = dm9000_phy_write; 1565 1566 mac_src = "eeprom"; 1567 1568 /* try reading the node address from the attached EEPROM */ 1569 for (i = 0; i < 6; i += 2) 1570 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1571 1572 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1573 mac_src = "platform data"; 1574 memcpy(ndev->dev_addr, pdata->dev_addr, 6); 1575 } 1576 1577 if (!is_valid_ether_addr(ndev->dev_addr)) { 1578 /* try reading from mac */ 1579 1580 mac_src = "chip"; 1581 for (i = 0; i < 6; i++) 1582 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1583 } 1584 1585 if (!is_valid_ether_addr(ndev->dev_addr)) { 1586 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1587 "set using ifconfig\n", ndev->name); 1588 1589 random_ether_addr(ndev->dev_addr); 1590 mac_src = "random"; 1591 } 1592 1593 1594 platform_set_drvdata(pdev, ndev); 1595 ret = register_netdev(ndev); 1596 1597 if (ret == 0) 1598 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", 1599 ndev->name, dm9000_type_to_char(db->type), 1600 db->io_addr, db->io_data, ndev->irq, 1601 ndev->dev_addr, mac_src); 1602 return 0; 1603 1604 out: 1605 dev_err(db->dev, "not found (%d).\n", ret); 1606 1607 dm9000_release_board(pdev, db); 1608 free_netdev(ndev); 1609 1610 return ret; 1611 } 1612 1613 static int 1614 dm9000_drv_suspend(struct device *dev) 1615 { 1616 struct platform_device *pdev = to_platform_device(dev); 1617 struct net_device *ndev = platform_get_drvdata(pdev); 1618 board_info_t *db; 1619 1620 if (ndev) { 1621 db = netdev_priv(ndev); 1622 db->in_suspend = 1; 1623 1624 if (!netif_running(ndev)) 1625 return 0; 1626 1627 netif_device_detach(ndev); 1628 1629 /* only shutdown if not using WoL */ 1630 if (!db->wake_state) 1631 dm9000_shutdown(ndev); 1632 } 1633 return 0; 1634 } 1635 1636 static int 1637 dm9000_drv_resume(struct device *dev) 1638 { 1639 struct platform_device *pdev = to_platform_device(dev); 1640 struct net_device *ndev = platform_get_drvdata(pdev); 1641 board_info_t *db = netdev_priv(ndev); 1642 1643 if (ndev) { 1644 if (netif_running(ndev)) { 1645 /* reset if we were not in wake mode to ensure if 1646 * the device was powered off it is in a known state */ 1647 if (!db->wake_state) { 1648 dm9000_reset(db); 1649 dm9000_init_dm9000(ndev); 1650 } 1651 1652 netif_device_attach(ndev); 1653 } 1654 1655 db->in_suspend = 0; 1656 } 1657 return 0; 1658 } 1659 1660 static const struct dev_pm_ops dm9000_drv_pm_ops = { 1661 .suspend = dm9000_drv_suspend, 1662 .resume = dm9000_drv_resume, 1663 }; 1664 1665 static int __devexit 1666 dm9000_drv_remove(struct platform_device *pdev) 1667 { 1668 struct net_device *ndev = platform_get_drvdata(pdev); 1669 1670 platform_set_drvdata(pdev, NULL); 1671 1672 unregister_netdev(ndev); 1673 dm9000_release_board(pdev, netdev_priv(ndev)); 1674 free_netdev(ndev); /* free device structure */ 1675 1676 dev_dbg(&pdev->dev, "released and freed device\n"); 1677 return 0; 1678 } 1679 1680 static struct platform_driver dm9000_driver = { 1681 .driver = { 1682 .name = "dm9000", 1683 .owner = THIS_MODULE, 1684 .pm = &dm9000_drv_pm_ops, 1685 }, 1686 .probe = dm9000_probe, 1687 .remove = __devexit_p(dm9000_drv_remove), 1688 }; 1689 1690 static int __init 1691 dm9000_init(void) 1692 { 1693 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION); 1694 1695 return platform_driver_register(&dm9000_driver); 1696 } 1697 1698 static void __exit 1699 dm9000_cleanup(void) 1700 { 1701 platform_driver_unregister(&dm9000_driver); 1702 } 1703 1704 module_init(dm9000_init); 1705 module_exit(dm9000_cleanup); 1706 1707 MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1708 MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1709 MODULE_LICENSE("GPL"); 1710 MODULE_ALIAS("platform:dm9000"); 1711