1 /* 2 * Amiga Linux/68k A2065 Ethernet Driver 3 * 4 * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org> 5 * 6 * Fixes and tips by: 7 * - Janos Farkas (CHEXUM@sparta.banki.hu) 8 * - Jes Degn Soerensen (jds@kom.auc.dk) 9 * - Matt Domsch (Matt_Domsch@dell.com) 10 * 11 * ---------------------------------------------------------------------------- 12 * 13 * This program is based on 14 * 15 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver 16 * (C) Copyright 1995 by Geert Uytterhoeven, 17 * Peter De Schrijver 18 * 19 * lance.c: An AMD LANCE ethernet driver for linux. 20 * Written 1993-94 by Donald Becker. 21 * 22 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller 23 * Advanced Micro Devices 24 * Publication #16907, Rev. B, Amendment/0, May 1994 25 * 26 * ---------------------------------------------------------------------------- 27 * 28 * This file is subject to the terms and conditions of the GNU General Public 29 * License. See the file COPYING in the main directory of the Linux 30 * distribution for more details. 31 * 32 * ---------------------------------------------------------------------------- 33 * 34 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: 35 * 36 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with 37 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors 38 */ 39 40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 41 42 /*#define DEBUG*/ 43 /*#define TEST_HITS*/ 44 45 #include <linux/errno.h> 46 #include <linux/netdevice.h> 47 #include <linux/etherdevice.h> 48 #include <linux/module.h> 49 #include <linux/stddef.h> 50 #include <linux/kernel.h> 51 #include <linux/interrupt.h> 52 #include <linux/ioport.h> 53 #include <linux/skbuff.h> 54 #include <linux/string.h> 55 #include <linux/init.h> 56 #include <linux/crc32.h> 57 #include <linux/zorro.h> 58 #include <linux/bitops.h> 59 60 #include <asm/byteorder.h> 61 #include <asm/irq.h> 62 #include <asm/amigaints.h> 63 #include <asm/amigahw.h> 64 65 #include "a2065.h" 66 67 /* Transmit/Receive Ring Definitions */ 68 69 #define LANCE_LOG_TX_BUFFERS (2) 70 #define LANCE_LOG_RX_BUFFERS (4) 71 72 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) 73 #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) 74 75 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 76 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 77 78 #define PKT_BUF_SIZE (1544) 79 #define RX_BUFF_SIZE PKT_BUF_SIZE 80 #define TX_BUFF_SIZE PKT_BUF_SIZE 81 82 /* Layout of the Lance's RAM Buffer */ 83 84 struct lance_init_block { 85 unsigned short mode; /* Pre-set mode (reg. 15) */ 86 unsigned char phys_addr[6]; /* Physical ethernet address */ 87 unsigned filter[2]; /* Multicast filter. */ 88 89 /* Receive and transmit ring base, along with extra bits. */ 90 unsigned short rx_ptr; /* receive descriptor addr */ 91 unsigned short rx_len; /* receive len and high addr */ 92 unsigned short tx_ptr; /* transmit descriptor addr */ 93 unsigned short tx_len; /* transmit len and high addr */ 94 95 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ 96 struct lance_rx_desc brx_ring[RX_RING_SIZE]; 97 struct lance_tx_desc btx_ring[TX_RING_SIZE]; 98 99 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; 100 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; 101 }; 102 103 /* Private Device Data */ 104 105 struct lance_private { 106 char *name; 107 volatile struct lance_regs *ll; 108 volatile struct lance_init_block *init_block; /* Hosts view */ 109 volatile struct lance_init_block *lance_init_block; /* Lance view */ 110 111 int rx_new, tx_new; 112 int rx_old, tx_old; 113 114 int lance_log_rx_bufs, lance_log_tx_bufs; 115 int rx_ring_mod_mask, tx_ring_mod_mask; 116 117 int tpe; /* cable-selection is TPE */ 118 int auto_select; /* cable-selection by carrier */ 119 unsigned short busmaster_regval; 120 121 #ifdef CONFIG_SUNLANCE 122 struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ 123 int burst_sizes; /* ledma SBus burst sizes */ 124 #endif 125 struct timer_list multicast_timer; 126 struct net_device *dev; 127 }; 128 129 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) 130 131 /* Load the CSR registers */ 132 static void load_csrs(struct lance_private *lp) 133 { 134 volatile struct lance_regs *ll = lp->ll; 135 volatile struct lance_init_block *aib = lp->lance_init_block; 136 int leptr = LANCE_ADDR(aib); 137 138 ll->rap = LE_CSR1; 139 ll->rdp = (leptr & 0xFFFF); 140 ll->rap = LE_CSR2; 141 ll->rdp = leptr >> 16; 142 ll->rap = LE_CSR3; 143 ll->rdp = lp->busmaster_regval; 144 145 /* Point back to csr0 */ 146 ll->rap = LE_CSR0; 147 } 148 149 /* Setup the Lance Rx and Tx rings */ 150 static void lance_init_ring(struct net_device *dev) 151 { 152 struct lance_private *lp = netdev_priv(dev); 153 volatile struct lance_init_block *ib = lp->init_block; 154 volatile struct lance_init_block *aib = lp->lance_init_block; 155 /* for LANCE_ADDR computations */ 156 int leptr; 157 int i; 158 159 /* Lock out other processes while setting up hardware */ 160 netif_stop_queue(dev); 161 lp->rx_new = lp->tx_new = 0; 162 lp->rx_old = lp->tx_old = 0; 163 164 ib->mode = 0; 165 166 /* Copy the ethernet address to the lance init block 167 * Note that on the sparc you need to swap the ethernet address. 168 */ 169 ib->phys_addr[0] = dev->dev_addr[1]; 170 ib->phys_addr[1] = dev->dev_addr[0]; 171 ib->phys_addr[2] = dev->dev_addr[3]; 172 ib->phys_addr[3] = dev->dev_addr[2]; 173 ib->phys_addr[4] = dev->dev_addr[5]; 174 ib->phys_addr[5] = dev->dev_addr[4]; 175 176 /* Setup the Tx ring entries */ 177 netdev_dbg(dev, "TX rings:\n"); 178 for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { 179 leptr = LANCE_ADDR(&aib->tx_buf[i][0]); 180 ib->btx_ring[i].tmd0 = leptr; 181 ib->btx_ring[i].tmd1_hadr = leptr >> 16; 182 ib->btx_ring[i].tmd1_bits = 0; 183 ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ 184 ib->btx_ring[i].misc = 0; 185 if (i < 3) 186 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 187 } 188 189 /* Setup the Rx ring entries */ 190 netdev_dbg(dev, "RX rings:\n"); 191 for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { 192 leptr = LANCE_ADDR(&aib->rx_buf[i][0]); 193 194 ib->brx_ring[i].rmd0 = leptr; 195 ib->brx_ring[i].rmd1_hadr = leptr >> 16; 196 ib->brx_ring[i].rmd1_bits = LE_R1_OWN; 197 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; 198 ib->brx_ring[i].mblength = 0; 199 if (i < 3) 200 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 201 } 202 203 /* Setup the initialization block */ 204 205 /* Setup rx descriptor pointer */ 206 leptr = LANCE_ADDR(&aib->brx_ring); 207 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); 208 ib->rx_ptr = leptr; 209 netdev_dbg(dev, "RX ptr: %08x\n", leptr); 210 211 /* Setup tx descriptor pointer */ 212 leptr = LANCE_ADDR(&aib->btx_ring); 213 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); 214 ib->tx_ptr = leptr; 215 netdev_dbg(dev, "TX ptr: %08x\n", leptr); 216 217 /* Clear the multicast filter */ 218 ib->filter[0] = 0; 219 ib->filter[1] = 0; 220 } 221 222 static int init_restart_lance(struct lance_private *lp) 223 { 224 volatile struct lance_regs *ll = lp->ll; 225 int i; 226 227 ll->rap = LE_CSR0; 228 ll->rdp = LE_C0_INIT; 229 230 /* Wait for the lance to complete initialization */ 231 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) 232 barrier(); 233 if ((i == 100) || (ll->rdp & LE_C0_ERR)) { 234 pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); 235 return -EIO; 236 } 237 238 /* Clear IDON by writing a "1", enable interrupts and start lance */ 239 ll->rdp = LE_C0_IDON; 240 ll->rdp = LE_C0_INEA | LE_C0_STRT; 241 242 return 0; 243 } 244 245 static int lance_rx(struct net_device *dev) 246 { 247 struct lance_private *lp = netdev_priv(dev); 248 volatile struct lance_init_block *ib = lp->init_block; 249 volatile struct lance_regs *ll = lp->ll; 250 volatile struct lance_rx_desc *rd; 251 unsigned char bits; 252 253 #ifdef TEST_HITS 254 int i; 255 char buf[RX_RING_SIZE + 1]; 256 257 for (i = 0; i < RX_RING_SIZE; i++) { 258 char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; 259 if (i == lp->rx_new) 260 buf[i] = r1_own ? '_' : 'X'; 261 else 262 buf[i] = r1_own ? '.' : '1'; 263 } 264 buf[RX_RING_SIZE] = 0; 265 266 pr_debug("RxRing TestHits: [%s]\n", buf); 267 #endif 268 269 ll->rdp = LE_C0_RINT | LE_C0_INEA; 270 for (rd = &ib->brx_ring[lp->rx_new]; 271 !((bits = rd->rmd1_bits) & LE_R1_OWN); 272 rd = &ib->brx_ring[lp->rx_new]) { 273 274 /* We got an incomplete frame? */ 275 if ((bits & LE_R1_POK) != LE_R1_POK) { 276 dev->stats.rx_over_errors++; 277 dev->stats.rx_errors++; 278 continue; 279 } else if (bits & LE_R1_ERR) { 280 /* Count only the end frame as a rx error, 281 * not the beginning 282 */ 283 if (bits & LE_R1_BUF) 284 dev->stats.rx_fifo_errors++; 285 if (bits & LE_R1_CRC) 286 dev->stats.rx_crc_errors++; 287 if (bits & LE_R1_OFL) 288 dev->stats.rx_over_errors++; 289 if (bits & LE_R1_FRA) 290 dev->stats.rx_frame_errors++; 291 if (bits & LE_R1_EOP) 292 dev->stats.rx_errors++; 293 } else { 294 int len = (rd->mblength & 0xfff) - 4; 295 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 296 297 if (!skb) { 298 dev->stats.rx_dropped++; 299 rd->mblength = 0; 300 rd->rmd1_bits = LE_R1_OWN; 301 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 302 return 0; 303 } 304 305 skb_reserve(skb, 2); /* 16 byte align */ 306 skb_put(skb, len); /* make room */ 307 skb_copy_to_linear_data(skb, 308 (unsigned char *)&ib->rx_buf[lp->rx_new][0], 309 len); 310 skb->protocol = eth_type_trans(skb, dev); 311 netif_rx(skb); 312 dev->stats.rx_packets++; 313 dev->stats.rx_bytes += len; 314 } 315 316 /* Return the packet to the pool */ 317 rd->mblength = 0; 318 rd->rmd1_bits = LE_R1_OWN; 319 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 320 } 321 return 0; 322 } 323 324 static int lance_tx(struct net_device *dev) 325 { 326 struct lance_private *lp = netdev_priv(dev); 327 volatile struct lance_init_block *ib = lp->init_block; 328 volatile struct lance_regs *ll = lp->ll; 329 volatile struct lance_tx_desc *td; 330 int i, j; 331 int status; 332 333 /* csr0 is 2f3 */ 334 ll->rdp = LE_C0_TINT | LE_C0_INEA; 335 /* csr0 is 73 */ 336 337 j = lp->tx_old; 338 for (i = j; i != lp->tx_new; i = j) { 339 td = &ib->btx_ring[i]; 340 341 /* If we hit a packet not owned by us, stop */ 342 if (td->tmd1_bits & LE_T1_OWN) 343 break; 344 345 if (td->tmd1_bits & LE_T1_ERR) { 346 status = td->misc; 347 348 dev->stats.tx_errors++; 349 if (status & LE_T3_RTY) 350 dev->stats.tx_aborted_errors++; 351 if (status & LE_T3_LCOL) 352 dev->stats.tx_window_errors++; 353 354 if (status & LE_T3_CLOS) { 355 dev->stats.tx_carrier_errors++; 356 if (lp->auto_select) { 357 lp->tpe = 1 - lp->tpe; 358 netdev_err(dev, "Carrier Lost, trying %s\n", 359 lp->tpe ? "TPE" : "AUI"); 360 /* Stop the lance */ 361 ll->rap = LE_CSR0; 362 ll->rdp = LE_C0_STOP; 363 lance_init_ring(dev); 364 load_csrs(lp); 365 init_restart_lance(lp); 366 return 0; 367 } 368 } 369 370 /* buffer errors and underflows turn off 371 * the transmitter, so restart the adapter 372 */ 373 if (status & (LE_T3_BUF | LE_T3_UFL)) { 374 dev->stats.tx_fifo_errors++; 375 376 netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); 377 /* Stop the lance */ 378 ll->rap = LE_CSR0; 379 ll->rdp = LE_C0_STOP; 380 lance_init_ring(dev); 381 load_csrs(lp); 382 init_restart_lance(lp); 383 return 0; 384 } 385 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { 386 /* So we don't count the packet more than once. */ 387 td->tmd1_bits &= ~(LE_T1_POK); 388 389 /* One collision before packet was sent. */ 390 if (td->tmd1_bits & LE_T1_EONE) 391 dev->stats.collisions++; 392 393 /* More than one collision, be optimistic. */ 394 if (td->tmd1_bits & LE_T1_EMORE) 395 dev->stats.collisions += 2; 396 397 dev->stats.tx_packets++; 398 } 399 400 j = (j + 1) & lp->tx_ring_mod_mask; 401 } 402 lp->tx_old = j; 403 ll->rdp = LE_C0_TINT | LE_C0_INEA; 404 return 0; 405 } 406 407 static int lance_tx_buffs_avail(struct lance_private *lp) 408 { 409 if (lp->tx_old <= lp->tx_new) 410 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; 411 return lp->tx_old - lp->tx_new - 1; 412 } 413 414 static irqreturn_t lance_interrupt(int irq, void *dev_id) 415 { 416 struct net_device *dev = dev_id; 417 struct lance_private *lp = netdev_priv(dev); 418 volatile struct lance_regs *ll = lp->ll; 419 int csr0; 420 421 ll->rap = LE_CSR0; /* LANCE Controller Status */ 422 csr0 = ll->rdp; 423 424 if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ 425 return IRQ_NONE; /* been generated by the Lance. */ 426 427 /* Acknowledge all the interrupt sources ASAP */ 428 ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | 429 LE_C0_INIT); 430 431 if (csr0 & LE_C0_ERR) { 432 /* Clear the error condition */ 433 ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; 434 } 435 436 if (csr0 & LE_C0_RINT) 437 lance_rx(dev); 438 439 if (csr0 & LE_C0_TINT) 440 lance_tx(dev); 441 442 /* Log misc errors. */ 443 if (csr0 & LE_C0_BABL) 444 dev->stats.tx_errors++; /* Tx babble. */ 445 if (csr0 & LE_C0_MISS) 446 dev->stats.rx_errors++; /* Missed a Rx frame. */ 447 if (csr0 & LE_C0_MERR) { 448 netdev_err(dev, "Bus master arbitration failure, status %04x\n", 449 csr0); 450 /* Restart the chip. */ 451 ll->rdp = LE_C0_STRT; 452 } 453 454 if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) 455 netif_wake_queue(dev); 456 457 ll->rap = LE_CSR0; 458 ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | 459 LE_C0_IDON | LE_C0_INEA); 460 return IRQ_HANDLED; 461 } 462 463 static int lance_open(struct net_device *dev) 464 { 465 struct lance_private *lp = netdev_priv(dev); 466 volatile struct lance_regs *ll = lp->ll; 467 int ret; 468 469 /* Stop the Lance */ 470 ll->rap = LE_CSR0; 471 ll->rdp = LE_C0_STOP; 472 473 /* Install the Interrupt handler */ 474 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, 475 dev->name, dev); 476 if (ret) 477 return ret; 478 479 load_csrs(lp); 480 lance_init_ring(dev); 481 482 netif_start_queue(dev); 483 484 return init_restart_lance(lp); 485 } 486 487 static int lance_close(struct net_device *dev) 488 { 489 struct lance_private *lp = netdev_priv(dev); 490 volatile struct lance_regs *ll = lp->ll; 491 492 netif_stop_queue(dev); 493 del_timer_sync(&lp->multicast_timer); 494 495 /* Stop the card */ 496 ll->rap = LE_CSR0; 497 ll->rdp = LE_C0_STOP; 498 499 free_irq(IRQ_AMIGA_PORTS, dev); 500 return 0; 501 } 502 503 static inline int lance_reset(struct net_device *dev) 504 { 505 struct lance_private *lp = netdev_priv(dev); 506 volatile struct lance_regs *ll = lp->ll; 507 int status; 508 509 /* Stop the lance */ 510 ll->rap = LE_CSR0; 511 ll->rdp = LE_C0_STOP; 512 513 load_csrs(lp); 514 515 lance_init_ring(dev); 516 netif_trans_update(dev); /* prevent tx timeout */ 517 netif_start_queue(dev); 518 519 status = init_restart_lance(lp); 520 netdev_dbg(dev, "Lance restart=%d\n", status); 521 522 return status; 523 } 524 525 static void lance_tx_timeout(struct net_device *dev) 526 { 527 struct lance_private *lp = netdev_priv(dev); 528 volatile struct lance_regs *ll = lp->ll; 529 530 netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); 531 lance_reset(dev); 532 netif_wake_queue(dev); 533 } 534 535 static netdev_tx_t lance_start_xmit(struct sk_buff *skb, 536 struct net_device *dev) 537 { 538 struct lance_private *lp = netdev_priv(dev); 539 volatile struct lance_regs *ll = lp->ll; 540 volatile struct lance_init_block *ib = lp->init_block; 541 int entry, skblen; 542 int status = NETDEV_TX_OK; 543 unsigned long flags; 544 545 if (skb_padto(skb, ETH_ZLEN)) 546 return NETDEV_TX_OK; 547 skblen = max_t(unsigned, skb->len, ETH_ZLEN); 548 549 local_irq_save(flags); 550 551 if (!lance_tx_buffs_avail(lp)) 552 goto out_free; 553 554 #ifdef DEBUG 555 /* dump the packet */ 556 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, 557 16, 1, skb->data, 64, true); 558 #endif 559 entry = lp->tx_new & lp->tx_ring_mod_mask; 560 ib->btx_ring[entry].length = (-skblen) | 0xf000; 561 ib->btx_ring[entry].misc = 0; 562 563 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); 564 565 /* Now, give the packet to the lance */ 566 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); 567 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; 568 dev->stats.tx_bytes += skblen; 569 570 if (lance_tx_buffs_avail(lp) <= 0) 571 netif_stop_queue(dev); 572 573 /* Kick the lance: transmit now */ 574 ll->rdp = LE_C0_INEA | LE_C0_TDMD; 575 out_free: 576 dev_kfree_skb(skb); 577 578 local_irq_restore(flags); 579 580 return status; 581 } 582 583 /* taken from the depca driver */ 584 static void lance_load_multicast(struct net_device *dev) 585 { 586 struct lance_private *lp = netdev_priv(dev); 587 volatile struct lance_init_block *ib = lp->init_block; 588 volatile u16 *mcast_table = (u16 *)&ib->filter; 589 struct netdev_hw_addr *ha; 590 u32 crc; 591 592 /* set all multicast bits */ 593 if (dev->flags & IFF_ALLMULTI) { 594 ib->filter[0] = 0xffffffff; 595 ib->filter[1] = 0xffffffff; 596 return; 597 } 598 /* clear the multicast filter */ 599 ib->filter[0] = 0; 600 ib->filter[1] = 0; 601 602 /* Add addresses */ 603 netdev_for_each_mc_addr(ha, dev) { 604 crc = ether_crc_le(6, ha->addr); 605 crc = crc >> 26; 606 mcast_table[crc >> 4] |= 1 << (crc & 0xf); 607 } 608 } 609 610 static void lance_set_multicast(struct net_device *dev) 611 { 612 struct lance_private *lp = netdev_priv(dev); 613 volatile struct lance_init_block *ib = lp->init_block; 614 volatile struct lance_regs *ll = lp->ll; 615 616 if (!netif_running(dev)) 617 return; 618 619 if (lp->tx_old != lp->tx_new) { 620 mod_timer(&lp->multicast_timer, jiffies + 4); 621 netif_wake_queue(dev); 622 return; 623 } 624 625 netif_stop_queue(dev); 626 627 ll->rap = LE_CSR0; 628 ll->rdp = LE_C0_STOP; 629 lance_init_ring(dev); 630 631 if (dev->flags & IFF_PROMISC) { 632 ib->mode |= LE_MO_PROM; 633 } else { 634 ib->mode &= ~LE_MO_PROM; 635 lance_load_multicast(dev); 636 } 637 load_csrs(lp); 638 init_restart_lance(lp); 639 netif_wake_queue(dev); 640 } 641 642 static void lance_set_multicast_retry(struct timer_list *t) 643 { 644 struct lance_private *lp = from_timer(lp, t, multicast_timer); 645 646 lance_set_multicast(lp->dev); 647 } 648 649 static int a2065_init_one(struct zorro_dev *z, 650 const struct zorro_device_id *ent); 651 static void a2065_remove_one(struct zorro_dev *z); 652 653 654 static const struct zorro_device_id a2065_zorro_tbl[] = { 655 { ZORRO_PROD_CBM_A2065_1 }, 656 { ZORRO_PROD_CBM_A2065_2 }, 657 { ZORRO_PROD_AMERISTAR_A2065 }, 658 { 0 } 659 }; 660 MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); 661 662 static struct zorro_driver a2065_driver = { 663 .name = "a2065", 664 .id_table = a2065_zorro_tbl, 665 .probe = a2065_init_one, 666 .remove = a2065_remove_one, 667 }; 668 669 static const struct net_device_ops lance_netdev_ops = { 670 .ndo_open = lance_open, 671 .ndo_stop = lance_close, 672 .ndo_start_xmit = lance_start_xmit, 673 .ndo_tx_timeout = lance_tx_timeout, 674 .ndo_set_rx_mode = lance_set_multicast, 675 .ndo_validate_addr = eth_validate_addr, 676 .ndo_set_mac_address = eth_mac_addr, 677 }; 678 679 static int a2065_init_one(struct zorro_dev *z, 680 const struct zorro_device_id *ent) 681 { 682 struct net_device *dev; 683 struct lance_private *priv; 684 unsigned long board = z->resource.start; 685 unsigned long base_addr = board + A2065_LANCE; 686 unsigned long mem_start = board + A2065_RAM; 687 struct resource *r1, *r2; 688 u32 serial; 689 int err; 690 691 r1 = request_mem_region(base_addr, sizeof(struct lance_regs), 692 "Am7990"); 693 if (!r1) 694 return -EBUSY; 695 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 696 if (!r2) { 697 release_mem_region(base_addr, sizeof(struct lance_regs)); 698 return -EBUSY; 699 } 700 701 dev = alloc_etherdev(sizeof(struct lance_private)); 702 if (dev == NULL) { 703 release_mem_region(base_addr, sizeof(struct lance_regs)); 704 release_mem_region(mem_start, A2065_RAM_SIZE); 705 return -ENOMEM; 706 } 707 708 priv = netdev_priv(dev); 709 710 r1->name = dev->name; 711 r2->name = dev->name; 712 713 serial = be32_to_cpu(z->rom.er_SerialNumber); 714 dev->dev_addr[0] = 0x00; 715 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ 716 dev->dev_addr[1] = 0x80; 717 dev->dev_addr[2] = 0x10; 718 } else { /* Ameristar */ 719 dev->dev_addr[1] = 0x00; 720 dev->dev_addr[2] = 0x9f; 721 } 722 dev->dev_addr[3] = (serial >> 16) & 0xff; 723 dev->dev_addr[4] = (serial >> 8) & 0xff; 724 dev->dev_addr[5] = serial & 0xff; 725 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); 726 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); 727 dev->mem_end = dev->mem_start + A2065_RAM_SIZE; 728 729 priv->ll = (volatile struct lance_regs *)dev->base_addr; 730 priv->init_block = (struct lance_init_block *)dev->mem_start; 731 priv->lance_init_block = (struct lance_init_block *)A2065_RAM; 732 priv->auto_select = 0; 733 priv->busmaster_regval = LE_C3_BSWP; 734 735 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; 736 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; 737 priv->rx_ring_mod_mask = RX_RING_MOD_MASK; 738 priv->tx_ring_mod_mask = TX_RING_MOD_MASK; 739 priv->dev = dev; 740 741 dev->netdev_ops = &lance_netdev_ops; 742 dev->watchdog_timeo = 5*HZ; 743 dev->dma = 0; 744 745 timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0); 746 747 err = register_netdev(dev); 748 if (err) { 749 release_mem_region(base_addr, sizeof(struct lance_regs)); 750 release_mem_region(mem_start, A2065_RAM_SIZE); 751 free_netdev(dev); 752 return err; 753 } 754 zorro_set_drvdata(z, dev); 755 756 netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", 757 board, dev->dev_addr); 758 759 return 0; 760 } 761 762 763 static void a2065_remove_one(struct zorro_dev *z) 764 { 765 struct net_device *dev = zorro_get_drvdata(z); 766 767 unregister_netdev(dev); 768 release_mem_region(ZTWO_PADDR(dev->base_addr), 769 sizeof(struct lance_regs)); 770 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); 771 free_netdev(dev); 772 } 773 774 static int __init a2065_init_module(void) 775 { 776 return zorro_register_driver(&a2065_driver); 777 } 778 779 static void __exit a2065_cleanup_module(void) 780 { 781 zorro_unregister_driver(&a2065_driver); 782 } 783 784 module_init(a2065_init_module); 785 module_exit(a2065_cleanup_module); 786 787 MODULE_LICENSE("GPL"); 788