1 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. 2 * 3 * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/module.h> 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/fcntl.h> 11 #include <linux/interrupt.h> 12 #include <linux/ioport.h> 13 #include <linux/in.h> 14 #include <linux/string.h> 15 #include <linux/delay.h> 16 #include <linux/init.h> 17 #include <linux/crc32.h> 18 #include <linux/errno.h> 19 #include <linux/ethtool.h> 20 #include <linux/mii.h> 21 #include <linux/netdevice.h> 22 #include <linux/etherdevice.h> 23 #include <linux/skbuff.h> 24 #include <linux/bitops.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/gfp.h> 29 30 #include <asm/auxio.h> 31 #include <asm/byteorder.h> 32 #include <asm/dma.h> 33 #include <asm/idprom.h> 34 #include <asm/io.h> 35 #include <asm/openprom.h> 36 #include <asm/oplib.h> 37 #include <asm/pgtable.h> 38 39 #include "sunbmac.h" 40 41 #define DRV_NAME "sunbmac" 42 #define DRV_VERSION "2.1" 43 #define DRV_RELDATE "August 26, 2008" 44 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" 45 46 static char version[] = 47 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 48 49 MODULE_VERSION(DRV_VERSION); 50 MODULE_AUTHOR(DRV_AUTHOR); 51 MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); 52 MODULE_LICENSE("GPL"); 53 54 #undef DEBUG_PROBE 55 #undef DEBUG_TX 56 #undef DEBUG_IRQ 57 58 #ifdef DEBUG_PROBE 59 #define DP(x) printk x 60 #else 61 #define DP(x) 62 #endif 63 64 #ifdef DEBUG_TX 65 #define DTX(x) printk x 66 #else 67 #define DTX(x) 68 #endif 69 70 #ifdef DEBUG_IRQ 71 #define DIRQ(x) printk x 72 #else 73 #define DIRQ(x) 74 #endif 75 76 #define DEFAULT_JAMSIZE 4 /* Toe jam */ 77 78 #define QEC_RESET_TRIES 200 79 80 static int qec_global_reset(void __iomem *gregs) 81 { 82 int tries = QEC_RESET_TRIES; 83 84 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); 85 while (--tries) { 86 if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { 87 udelay(20); 88 continue; 89 } 90 break; 91 } 92 if (tries) 93 return 0; 94 printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); 95 return -1; 96 } 97 98 static void qec_init(struct bigmac *bp) 99 { 100 struct platform_device *qec_op = bp->qec_op; 101 void __iomem *gregs = bp->gregs; 102 u8 bsizes = bp->bigmac_bursts; 103 u32 regval; 104 105 /* 64byte bursts do not work at the moment, do 106 * not even try to enable them. -DaveM 107 */ 108 if (bsizes & DMA_BURST32) 109 regval = GLOB_CTRL_B32; 110 else 111 regval = GLOB_CTRL_B16; 112 sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); 113 sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); 114 115 /* All of memsize is given to bigmac. */ 116 sbus_writel(resource_size(&qec_op->resource[1]), 117 gregs + GLOB_MSIZE); 118 119 /* Half to the transmitter, half to the receiver. */ 120 sbus_writel(resource_size(&qec_op->resource[1]) >> 1, 121 gregs + GLOB_TSIZE); 122 sbus_writel(resource_size(&qec_op->resource[1]) >> 1, 123 gregs + GLOB_RSIZE); 124 } 125 126 #define TX_RESET_TRIES 32 127 #define RX_RESET_TRIES 32 128 129 static void bigmac_tx_reset(void __iomem *bregs) 130 { 131 int tries = TX_RESET_TRIES; 132 133 sbus_writel(0, bregs + BMAC_TXCFG); 134 135 /* The fifo threshold bit is read-only and does 136 * not clear. -DaveM 137 */ 138 while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && 139 --tries != 0) 140 udelay(20); 141 142 if (!tries) { 143 printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); 144 printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", 145 sbus_readl(bregs + BMAC_TXCFG)); 146 } 147 } 148 149 static void bigmac_rx_reset(void __iomem *bregs) 150 { 151 int tries = RX_RESET_TRIES; 152 153 sbus_writel(0, bregs + BMAC_RXCFG); 154 while (sbus_readl(bregs + BMAC_RXCFG) && --tries) 155 udelay(20); 156 157 if (!tries) { 158 printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); 159 printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", 160 sbus_readl(bregs + BMAC_RXCFG)); 161 } 162 } 163 164 /* Reset the transmitter and receiver. */ 165 static void bigmac_stop(struct bigmac *bp) 166 { 167 bigmac_tx_reset(bp->bregs); 168 bigmac_rx_reset(bp->bregs); 169 } 170 171 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) 172 { 173 struct net_device_stats *stats = &bp->enet_stats; 174 175 stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); 176 sbus_writel(0, bregs + BMAC_RCRCECTR); 177 178 stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); 179 sbus_writel(0, bregs + BMAC_UNALECTR); 180 181 stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); 182 sbus_writel(0, bregs + BMAC_GLECTR); 183 184 stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); 185 186 stats->collisions += 187 (sbus_readl(bregs + BMAC_EXCTR) + 188 sbus_readl(bregs + BMAC_LTCTR)); 189 sbus_writel(0, bregs + BMAC_EXCTR); 190 sbus_writel(0, bregs + BMAC_LTCTR); 191 } 192 193 static void bigmac_clean_rings(struct bigmac *bp) 194 { 195 int i; 196 197 for (i = 0; i < RX_RING_SIZE; i++) { 198 if (bp->rx_skbs[i] != NULL) { 199 dev_kfree_skb_any(bp->rx_skbs[i]); 200 bp->rx_skbs[i] = NULL; 201 } 202 } 203 204 for (i = 0; i < TX_RING_SIZE; i++) { 205 if (bp->tx_skbs[i] != NULL) { 206 dev_kfree_skb_any(bp->tx_skbs[i]); 207 bp->tx_skbs[i] = NULL; 208 } 209 } 210 } 211 212 static void bigmac_init_rings(struct bigmac *bp, int from_irq) 213 { 214 struct bmac_init_block *bb = bp->bmac_block; 215 int i; 216 gfp_t gfp_flags = GFP_KERNEL; 217 218 if (from_irq || in_interrupt()) 219 gfp_flags = GFP_ATOMIC; 220 221 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; 222 223 /* Free any skippy bufs left around in the rings. */ 224 bigmac_clean_rings(bp); 225 226 /* Now get new skbufs for the receive ring. */ 227 for (i = 0; i < RX_RING_SIZE; i++) { 228 struct sk_buff *skb; 229 230 skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); 231 if (!skb) 232 continue; 233 234 bp->rx_skbs[i] = skb; 235 236 /* Because we reserve afterwards. */ 237 skb_put(skb, ETH_FRAME_LEN); 238 skb_reserve(skb, 34); 239 240 bb->be_rxd[i].rx_addr = 241 dma_map_single(&bp->bigmac_op->dev, 242 skb->data, 243 RX_BUF_ALLOC_SIZE - 34, 244 DMA_FROM_DEVICE); 245 bb->be_rxd[i].rx_flags = 246 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 247 } 248 249 for (i = 0; i < TX_RING_SIZE; i++) 250 bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; 251 } 252 253 #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) 254 #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) 255 256 static void idle_transceiver(void __iomem *tregs) 257 { 258 int i = 20; 259 260 while (i--) { 261 sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); 262 sbus_readl(tregs + TCVR_MPAL); 263 sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); 264 sbus_readl(tregs + TCVR_MPAL); 265 } 266 } 267 268 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) 269 { 270 if (bp->tcvr_type == internal) { 271 bit = (bit & 1) << 3; 272 sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), 273 tregs + TCVR_MPAL); 274 sbus_readl(tregs + TCVR_MPAL); 275 sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 276 tregs + TCVR_MPAL); 277 sbus_readl(tregs + TCVR_MPAL); 278 } else if (bp->tcvr_type == external) { 279 bit = (bit & 1) << 2; 280 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, 281 tregs + TCVR_MPAL); 282 sbus_readl(tregs + TCVR_MPAL); 283 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, 284 tregs + TCVR_MPAL); 285 sbus_readl(tregs + TCVR_MPAL); 286 } else { 287 printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); 288 } 289 } 290 291 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) 292 { 293 int retval = 0; 294 295 if (bp->tcvr_type == internal) { 296 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 297 sbus_readl(tregs + TCVR_MPAL); 298 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 299 tregs + TCVR_MPAL); 300 sbus_readl(tregs + TCVR_MPAL); 301 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; 302 } else if (bp->tcvr_type == external) { 303 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); 304 sbus_readl(tregs + TCVR_MPAL); 305 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 306 sbus_readl(tregs + TCVR_MPAL); 307 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; 308 } else { 309 printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); 310 } 311 return retval; 312 } 313 314 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) 315 { 316 int retval = 0; 317 318 if (bp->tcvr_type == internal) { 319 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 320 sbus_readl(tregs + TCVR_MPAL); 321 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; 322 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 323 sbus_readl(tregs + TCVR_MPAL); 324 } else if (bp->tcvr_type == external) { 325 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); 326 sbus_readl(tregs + TCVR_MPAL); 327 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; 328 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 329 sbus_readl(tregs + TCVR_MPAL); 330 } else { 331 printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); 332 } 333 return retval; 334 } 335 336 static void put_tcvr_byte(struct bigmac *bp, 337 void __iomem *tregs, 338 unsigned int byte) 339 { 340 int shift = 4; 341 342 do { 343 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); 344 shift -= 1; 345 } while (shift >= 0); 346 } 347 348 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, 349 int reg, unsigned short val) 350 { 351 int shift; 352 353 reg &= 0xff; 354 val &= 0xffff; 355 switch(bp->tcvr_type) { 356 case internal: 357 case external: 358 break; 359 360 default: 361 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 362 return; 363 } 364 365 idle_transceiver(tregs); 366 write_tcvr_bit(bp, tregs, 0); 367 write_tcvr_bit(bp, tregs, 1); 368 write_tcvr_bit(bp, tregs, 0); 369 write_tcvr_bit(bp, tregs, 1); 370 371 put_tcvr_byte(bp, tregs, 372 ((bp->tcvr_type == internal) ? 373 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); 374 375 put_tcvr_byte(bp, tregs, reg); 376 377 write_tcvr_bit(bp, tregs, 1); 378 write_tcvr_bit(bp, tregs, 0); 379 380 shift = 15; 381 do { 382 write_tcvr_bit(bp, tregs, (val >> shift) & 1); 383 shift -= 1; 384 } while (shift >= 0); 385 } 386 387 static unsigned short bigmac_tcvr_read(struct bigmac *bp, 388 void __iomem *tregs, 389 int reg) 390 { 391 unsigned short retval = 0; 392 393 reg &= 0xff; 394 switch(bp->tcvr_type) { 395 case internal: 396 case external: 397 break; 398 399 default: 400 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 401 return 0xffff; 402 } 403 404 idle_transceiver(tregs); 405 write_tcvr_bit(bp, tregs, 0); 406 write_tcvr_bit(bp, tregs, 1); 407 write_tcvr_bit(bp, tregs, 1); 408 write_tcvr_bit(bp, tregs, 0); 409 410 put_tcvr_byte(bp, tregs, 411 ((bp->tcvr_type == internal) ? 412 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); 413 414 put_tcvr_byte(bp, tregs, reg); 415 416 if (bp->tcvr_type == external) { 417 int shift = 15; 418 419 (void) read_tcvr_bit2(bp, tregs); 420 (void) read_tcvr_bit2(bp, tregs); 421 422 do { 423 int tmp; 424 425 tmp = read_tcvr_bit2(bp, tregs); 426 retval |= ((tmp & 1) << shift); 427 shift -= 1; 428 } while (shift >= 0); 429 430 (void) read_tcvr_bit2(bp, tregs); 431 (void) read_tcvr_bit2(bp, tregs); 432 (void) read_tcvr_bit2(bp, tregs); 433 } else { 434 int shift = 15; 435 436 (void) read_tcvr_bit(bp, tregs); 437 (void) read_tcvr_bit(bp, tregs); 438 439 do { 440 int tmp; 441 442 tmp = read_tcvr_bit(bp, tregs); 443 retval |= ((tmp & 1) << shift); 444 shift -= 1; 445 } while (shift >= 0); 446 447 (void) read_tcvr_bit(bp, tregs); 448 (void) read_tcvr_bit(bp, tregs); 449 (void) read_tcvr_bit(bp, tregs); 450 } 451 return retval; 452 } 453 454 static void bigmac_tcvr_init(struct bigmac *bp) 455 { 456 void __iomem *tregs = bp->tregs; 457 u32 mpal; 458 459 idle_transceiver(tregs); 460 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 461 tregs + TCVR_MPAL); 462 sbus_readl(tregs + TCVR_MPAL); 463 464 /* Only the bit for the present transceiver (internal or 465 * external) will stick, set them both and see what stays. 466 */ 467 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 468 sbus_readl(tregs + TCVR_MPAL); 469 udelay(20); 470 471 mpal = sbus_readl(tregs + TCVR_MPAL); 472 if (mpal & MGMT_PAL_EXT_MDIO) { 473 bp->tcvr_type = external; 474 sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), 475 tregs + TCVR_TPAL); 476 sbus_readl(tregs + TCVR_TPAL); 477 } else if (mpal & MGMT_PAL_INT_MDIO) { 478 bp->tcvr_type = internal; 479 sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | 480 TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), 481 tregs + TCVR_TPAL); 482 sbus_readl(tregs + TCVR_TPAL); 483 } else { 484 printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " 485 "external MDIO available!\n"); 486 printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", 487 sbus_readl(tregs + TCVR_MPAL), 488 sbus_readl(tregs + TCVR_TPAL)); 489 } 490 } 491 492 static int bigmac_init_hw(struct bigmac *, int); 493 494 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) 495 { 496 if (bp->sw_bmcr & BMCR_SPEED100) { 497 int timeout; 498 499 /* Reset the PHY. */ 500 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); 501 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 502 bp->sw_bmcr = (BMCR_RESET); 503 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 504 505 timeout = 64; 506 while (--timeout) { 507 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 508 if ((bp->sw_bmcr & BMCR_RESET) == 0) 509 break; 510 udelay(20); 511 } 512 if (timeout == 0) 513 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); 514 515 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 516 517 /* Now we try 10baseT. */ 518 bp->sw_bmcr &= ~(BMCR_SPEED100); 519 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 520 return 0; 521 } 522 523 /* We've tried them all. */ 524 return -1; 525 } 526 527 static void bigmac_timer(unsigned long data) 528 { 529 struct bigmac *bp = (struct bigmac *) data; 530 void __iomem *tregs = bp->tregs; 531 int restart_timer = 0; 532 533 bp->timer_ticks++; 534 if (bp->timer_state == ltrywait) { 535 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); 536 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 537 if (bp->sw_bmsr & BMSR_LSTATUS) { 538 printk(KERN_INFO "%s: Link is now up at %s.\n", 539 bp->dev->name, 540 (bp->sw_bmcr & BMCR_SPEED100) ? 541 "100baseT" : "10baseT"); 542 bp->timer_state = asleep; 543 restart_timer = 0; 544 } else { 545 if (bp->timer_ticks >= 4) { 546 int ret; 547 548 ret = try_next_permutation(bp, tregs); 549 if (ret == -1) { 550 printk(KERN_ERR "%s: Link down, cable problem?\n", 551 bp->dev->name); 552 ret = bigmac_init_hw(bp, 0); 553 if (ret) { 554 printk(KERN_ERR "%s: Error, cannot re-init the " 555 "BigMAC.\n", bp->dev->name); 556 } 557 return; 558 } 559 bp->timer_ticks = 0; 560 restart_timer = 1; 561 } else { 562 restart_timer = 1; 563 } 564 } 565 } else { 566 /* Can't happens.... */ 567 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", 568 bp->dev->name); 569 restart_timer = 0; 570 bp->timer_ticks = 0; 571 bp->timer_state = asleep; /* foo on you */ 572 } 573 574 if (restart_timer != 0) { 575 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ 576 add_timer(&bp->bigmac_timer); 577 } 578 } 579 580 /* Well, really we just force the chip into 100baseT then 581 * 10baseT, each time checking for a link status. 582 */ 583 static void bigmac_begin_auto_negotiation(struct bigmac *bp) 584 { 585 void __iomem *tregs = bp->tregs; 586 int timeout; 587 588 /* Grab new software copies of PHY registers. */ 589 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); 590 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 591 592 /* Reset the PHY. */ 593 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); 594 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 595 bp->sw_bmcr = (BMCR_RESET); 596 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 597 598 timeout = 64; 599 while (--timeout) { 600 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 601 if ((bp->sw_bmcr & BMCR_RESET) == 0) 602 break; 603 udelay(20); 604 } 605 if (timeout == 0) 606 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); 607 608 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 609 610 /* First we try 100baseT. */ 611 bp->sw_bmcr |= BMCR_SPEED100; 612 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 613 614 bp->timer_state = ltrywait; 615 bp->timer_ticks = 0; 616 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; 617 bp->bigmac_timer.data = (unsigned long) bp; 618 bp->bigmac_timer.function = bigmac_timer; 619 add_timer(&bp->bigmac_timer); 620 } 621 622 static int bigmac_init_hw(struct bigmac *bp, int from_irq) 623 { 624 void __iomem *gregs = bp->gregs; 625 void __iomem *cregs = bp->creg; 626 void __iomem *bregs = bp->bregs; 627 unsigned char *e = &bp->dev->dev_addr[0]; 628 629 /* Latch current counters into statistics. */ 630 bigmac_get_counters(bp, bregs); 631 632 /* Reset QEC. */ 633 qec_global_reset(gregs); 634 635 /* Init QEC. */ 636 qec_init(bp); 637 638 /* Alloc and reset the tx/rx descriptor chains. */ 639 bigmac_init_rings(bp, from_irq); 640 641 /* Initialize the PHY. */ 642 bigmac_tcvr_init(bp); 643 644 /* Stop transmitter and receiver. */ 645 bigmac_stop(bp); 646 647 /* Set hardware ethernet address. */ 648 sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); 649 sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); 650 sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); 651 652 /* Clear the hash table until mc upload occurs. */ 653 sbus_writel(0, bregs + BMAC_HTABLE3); 654 sbus_writel(0, bregs + BMAC_HTABLE2); 655 sbus_writel(0, bregs + BMAC_HTABLE1); 656 sbus_writel(0, bregs + BMAC_HTABLE0); 657 658 /* Enable Big Mac hash table filter. */ 659 sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, 660 bregs + BMAC_RXCFG); 661 udelay(20); 662 663 /* Ok, configure the Big Mac transmitter. */ 664 sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); 665 666 /* The HME docs recommend to use the 10LSB of our MAC here. */ 667 sbus_writel(((e[5] | e[4] << 8) & 0x3ff), 668 bregs + BMAC_RSEED); 669 670 /* Enable the output drivers no matter what. */ 671 sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, 672 bregs + BMAC_XIFCFG); 673 674 /* Tell the QEC where the ring descriptors are. */ 675 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), 676 cregs + CREG_RXDS); 677 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), 678 cregs + CREG_TXDS); 679 680 /* Setup the FIFO pointers into QEC local memory. */ 681 sbus_writel(0, cregs + CREG_RXRBUFPTR); 682 sbus_writel(0, cregs + CREG_RXWBUFPTR); 683 sbus_writel(sbus_readl(gregs + GLOB_RSIZE), 684 cregs + CREG_TXRBUFPTR); 685 sbus_writel(sbus_readl(gregs + GLOB_RSIZE), 686 cregs + CREG_TXWBUFPTR); 687 688 /* Tell bigmac what interrupts we don't want to hear about. */ 689 sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, 690 bregs + BMAC_IMASK); 691 692 /* Enable the various other irq's. */ 693 sbus_writel(0, cregs + CREG_RIMASK); 694 sbus_writel(0, cregs + CREG_TIMASK); 695 sbus_writel(0, cregs + CREG_QMASK); 696 sbus_writel(0, cregs + CREG_BMASK); 697 698 /* Set jam size to a reasonable default. */ 699 sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); 700 701 /* Clear collision counter. */ 702 sbus_writel(0, cregs + CREG_CCNT); 703 704 /* Enable transmitter and receiver. */ 705 sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, 706 bregs + BMAC_TXCFG); 707 sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, 708 bregs + BMAC_RXCFG); 709 710 /* Ok, start detecting link speed/duplex. */ 711 bigmac_begin_auto_negotiation(bp); 712 713 /* Success. */ 714 return 0; 715 } 716 717 /* Error interrupts get sent here. */ 718 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) 719 { 720 printk(KERN_ERR "bigmac_is_medium_rare: "); 721 if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { 722 if (qec_status & GLOB_STAT_ER) 723 printk("QEC_ERROR, "); 724 if (qec_status & GLOB_STAT_BM) 725 printk("QEC_BMAC_ERROR, "); 726 } 727 if (bmac_status & CREG_STAT_ERRORS) { 728 if (bmac_status & CREG_STAT_BERROR) 729 printk("BMAC_ERROR, "); 730 if (bmac_status & CREG_STAT_TXDERROR) 731 printk("TXD_ERROR, "); 732 if (bmac_status & CREG_STAT_TXLERR) 733 printk("TX_LATE_ERROR, "); 734 if (bmac_status & CREG_STAT_TXPERR) 735 printk("TX_PARITY_ERROR, "); 736 if (bmac_status & CREG_STAT_TXSERR) 737 printk("TX_SBUS_ERROR, "); 738 739 if (bmac_status & CREG_STAT_RXDROP) 740 printk("RX_DROP_ERROR, "); 741 742 if (bmac_status & CREG_STAT_RXSMALL) 743 printk("RX_SMALL_ERROR, "); 744 if (bmac_status & CREG_STAT_RXLERR) 745 printk("RX_LATE_ERROR, "); 746 if (bmac_status & CREG_STAT_RXPERR) 747 printk("RX_PARITY_ERROR, "); 748 if (bmac_status & CREG_STAT_RXSERR) 749 printk("RX_SBUS_ERROR, "); 750 } 751 752 printk(" RESET\n"); 753 bigmac_init_hw(bp, 1); 754 } 755 756 /* BigMAC transmit complete service routines. */ 757 static void bigmac_tx(struct bigmac *bp) 758 { 759 struct be_txd *txbase = &bp->bmac_block->be_txd[0]; 760 struct net_device *dev = bp->dev; 761 int elem; 762 763 spin_lock(&bp->lock); 764 765 elem = bp->tx_old; 766 DTX(("bigmac_tx: tx_old[%d] ", elem)); 767 while (elem != bp->tx_new) { 768 struct sk_buff *skb; 769 struct be_txd *this = &txbase[elem]; 770 771 DTX(("this(%p) [flags(%08x)addr(%08x)]", 772 this, this->tx_flags, this->tx_addr)); 773 774 if (this->tx_flags & TXD_OWN) 775 break; 776 skb = bp->tx_skbs[elem]; 777 bp->enet_stats.tx_packets++; 778 bp->enet_stats.tx_bytes += skb->len; 779 dma_unmap_single(&bp->bigmac_op->dev, 780 this->tx_addr, skb->len, 781 DMA_TO_DEVICE); 782 783 DTX(("skb(%p) ", skb)); 784 bp->tx_skbs[elem] = NULL; 785 dev_kfree_skb_irq(skb); 786 787 elem = NEXT_TX(elem); 788 } 789 DTX((" DONE, tx_old=%d\n", elem)); 790 bp->tx_old = elem; 791 792 if (netif_queue_stopped(dev) && 793 TX_BUFFS_AVAIL(bp) > 0) 794 netif_wake_queue(bp->dev); 795 796 spin_unlock(&bp->lock); 797 } 798 799 /* BigMAC receive complete service routines. */ 800 static void bigmac_rx(struct bigmac *bp) 801 { 802 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; 803 struct be_rxd *this; 804 int elem = bp->rx_new, drops = 0; 805 u32 flags; 806 807 this = &rxbase[elem]; 808 while (!((flags = this->rx_flags) & RXD_OWN)) { 809 struct sk_buff *skb; 810 int len = (flags & RXD_LENGTH); /* FCS not included */ 811 812 /* Check for errors. */ 813 if (len < ETH_ZLEN) { 814 bp->enet_stats.rx_errors++; 815 bp->enet_stats.rx_length_errors++; 816 817 drop_it: 818 /* Return it to the BigMAC. */ 819 bp->enet_stats.rx_dropped++; 820 this->rx_flags = 821 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 822 goto next; 823 } 824 skb = bp->rx_skbs[elem]; 825 if (len > RX_COPY_THRESHOLD) { 826 struct sk_buff *new_skb; 827 828 /* Now refill the entry, if we can. */ 829 new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 830 if (new_skb == NULL) { 831 drops++; 832 goto drop_it; 833 } 834 dma_unmap_single(&bp->bigmac_op->dev, 835 this->rx_addr, 836 RX_BUF_ALLOC_SIZE - 34, 837 DMA_FROM_DEVICE); 838 bp->rx_skbs[elem] = new_skb; 839 skb_put(new_skb, ETH_FRAME_LEN); 840 skb_reserve(new_skb, 34); 841 this->rx_addr = 842 dma_map_single(&bp->bigmac_op->dev, 843 new_skb->data, 844 RX_BUF_ALLOC_SIZE - 34, 845 DMA_FROM_DEVICE); 846 this->rx_flags = 847 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 848 849 /* Trim the original skb for the netif. */ 850 skb_trim(skb, len); 851 } else { 852 struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); 853 854 if (copy_skb == NULL) { 855 drops++; 856 goto drop_it; 857 } 858 skb_reserve(copy_skb, 2); 859 skb_put(copy_skb, len); 860 dma_sync_single_for_cpu(&bp->bigmac_op->dev, 861 this->rx_addr, len, 862 DMA_FROM_DEVICE); 863 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); 864 dma_sync_single_for_device(&bp->bigmac_op->dev, 865 this->rx_addr, len, 866 DMA_FROM_DEVICE); 867 868 /* Reuse original ring buffer. */ 869 this->rx_flags = 870 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 871 872 skb = copy_skb; 873 } 874 875 /* No checksums done by the BigMAC ;-( */ 876 skb->protocol = eth_type_trans(skb, bp->dev); 877 netif_rx(skb); 878 bp->enet_stats.rx_packets++; 879 bp->enet_stats.rx_bytes += len; 880 next: 881 elem = NEXT_RX(elem); 882 this = &rxbase[elem]; 883 } 884 bp->rx_new = elem; 885 if (drops) 886 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); 887 } 888 889 static irqreturn_t bigmac_interrupt(int irq, void *dev_id) 890 { 891 struct bigmac *bp = (struct bigmac *) dev_id; 892 u32 qec_status, bmac_status; 893 894 DIRQ(("bigmac_interrupt: ")); 895 896 /* Latch status registers now. */ 897 bmac_status = sbus_readl(bp->creg + CREG_STAT); 898 qec_status = sbus_readl(bp->gregs + GLOB_STAT); 899 900 DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); 901 if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || 902 (bmac_status & CREG_STAT_ERRORS)) 903 bigmac_is_medium_rare(bp, qec_status, bmac_status); 904 905 if (bmac_status & CREG_STAT_TXIRQ) 906 bigmac_tx(bp); 907 908 if (bmac_status & CREG_STAT_RXIRQ) 909 bigmac_rx(bp); 910 911 return IRQ_HANDLED; 912 } 913 914 static int bigmac_open(struct net_device *dev) 915 { 916 struct bigmac *bp = netdev_priv(dev); 917 int ret; 918 919 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); 920 if (ret) { 921 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); 922 return ret; 923 } 924 init_timer(&bp->bigmac_timer); 925 ret = bigmac_init_hw(bp, 0); 926 if (ret) 927 free_irq(dev->irq, bp); 928 return ret; 929 } 930 931 static int bigmac_close(struct net_device *dev) 932 { 933 struct bigmac *bp = netdev_priv(dev); 934 935 del_timer(&bp->bigmac_timer); 936 bp->timer_state = asleep; 937 bp->timer_ticks = 0; 938 939 bigmac_stop(bp); 940 bigmac_clean_rings(bp); 941 free_irq(dev->irq, bp); 942 return 0; 943 } 944 945 static void bigmac_tx_timeout(struct net_device *dev) 946 { 947 struct bigmac *bp = netdev_priv(dev); 948 949 bigmac_init_hw(bp, 0); 950 netif_wake_queue(dev); 951 } 952 953 /* Put a packet on the wire. */ 954 static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 955 { 956 struct bigmac *bp = netdev_priv(dev); 957 int len, entry; 958 u32 mapping; 959 960 len = skb->len; 961 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, 962 len, DMA_TO_DEVICE); 963 964 /* Avoid a race... */ 965 spin_lock_irq(&bp->lock); 966 entry = bp->tx_new; 967 DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); 968 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; 969 bp->tx_skbs[entry] = skb; 970 bp->bmac_block->be_txd[entry].tx_addr = mapping; 971 bp->bmac_block->be_txd[entry].tx_flags = 972 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); 973 bp->tx_new = NEXT_TX(entry); 974 if (TX_BUFFS_AVAIL(bp) <= 0) 975 netif_stop_queue(dev); 976 spin_unlock_irq(&bp->lock); 977 978 /* Get it going. */ 979 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); 980 981 982 return NETDEV_TX_OK; 983 } 984 985 static struct net_device_stats *bigmac_get_stats(struct net_device *dev) 986 { 987 struct bigmac *bp = netdev_priv(dev); 988 989 bigmac_get_counters(bp, bp->bregs); 990 return &bp->enet_stats; 991 } 992 993 static void bigmac_set_multicast(struct net_device *dev) 994 { 995 struct bigmac *bp = netdev_priv(dev); 996 void __iomem *bregs = bp->bregs; 997 struct netdev_hw_addr *ha; 998 u32 tmp, crc; 999 1000 /* Disable the receiver. The bit self-clears when 1001 * the operation is complete. 1002 */ 1003 tmp = sbus_readl(bregs + BMAC_RXCFG); 1004 tmp &= ~(BIGMAC_RXCFG_ENABLE); 1005 sbus_writel(tmp, bregs + BMAC_RXCFG); 1006 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) 1007 udelay(20); 1008 1009 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 1010 sbus_writel(0xffff, bregs + BMAC_HTABLE0); 1011 sbus_writel(0xffff, bregs + BMAC_HTABLE1); 1012 sbus_writel(0xffff, bregs + BMAC_HTABLE2); 1013 sbus_writel(0xffff, bregs + BMAC_HTABLE3); 1014 } else if (dev->flags & IFF_PROMISC) { 1015 tmp = sbus_readl(bregs + BMAC_RXCFG); 1016 tmp |= BIGMAC_RXCFG_PMISC; 1017 sbus_writel(tmp, bregs + BMAC_RXCFG); 1018 } else { 1019 u16 hash_table[4] = { 0 }; 1020 1021 netdev_for_each_mc_addr(ha, dev) { 1022 crc = ether_crc_le(6, ha->addr); 1023 crc >>= 26; 1024 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1025 } 1026 sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); 1027 sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); 1028 sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); 1029 sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); 1030 } 1031 1032 /* Re-enable the receiver. */ 1033 tmp = sbus_readl(bregs + BMAC_RXCFG); 1034 tmp |= BIGMAC_RXCFG_ENABLE; 1035 sbus_writel(tmp, bregs + BMAC_RXCFG); 1036 } 1037 1038 /* Ethtool support... */ 1039 static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1040 { 1041 strlcpy(info->driver, "sunbmac", sizeof(info->driver)); 1042 strlcpy(info->version, "2.0", sizeof(info->version)); 1043 } 1044 1045 static u32 bigmac_get_link(struct net_device *dev) 1046 { 1047 struct bigmac *bp = netdev_priv(dev); 1048 1049 spin_lock_irq(&bp->lock); 1050 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); 1051 spin_unlock_irq(&bp->lock); 1052 1053 return (bp->sw_bmsr & BMSR_LSTATUS); 1054 } 1055 1056 static const struct ethtool_ops bigmac_ethtool_ops = { 1057 .get_drvinfo = bigmac_get_drvinfo, 1058 .get_link = bigmac_get_link, 1059 }; 1060 1061 static const struct net_device_ops bigmac_ops = { 1062 .ndo_open = bigmac_open, 1063 .ndo_stop = bigmac_close, 1064 .ndo_start_xmit = bigmac_start_xmit, 1065 .ndo_get_stats = bigmac_get_stats, 1066 .ndo_set_rx_mode = bigmac_set_multicast, 1067 .ndo_tx_timeout = bigmac_tx_timeout, 1068 .ndo_change_mtu = eth_change_mtu, 1069 .ndo_set_mac_address = eth_mac_addr, 1070 .ndo_validate_addr = eth_validate_addr, 1071 }; 1072 1073 static int bigmac_ether_init(struct platform_device *op, 1074 struct platform_device *qec_op) 1075 { 1076 static int version_printed; 1077 struct net_device *dev; 1078 u8 bsizes, bsizes_more; 1079 struct bigmac *bp; 1080 int i; 1081 1082 /* Get a new device struct for this interface. */ 1083 dev = alloc_etherdev(sizeof(struct bigmac)); 1084 if (!dev) 1085 return -ENOMEM; 1086 1087 if (version_printed++ == 0) 1088 printk(KERN_INFO "%s", version); 1089 1090 for (i = 0; i < 6; i++) 1091 dev->dev_addr[i] = idprom->id_ethaddr[i]; 1092 1093 /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ 1094 bp = netdev_priv(dev); 1095 bp->qec_op = qec_op; 1096 bp->bigmac_op = op; 1097 1098 SET_NETDEV_DEV(dev, &op->dev); 1099 1100 spin_lock_init(&bp->lock); 1101 1102 /* Map in QEC global control registers. */ 1103 bp->gregs = of_ioremap(&qec_op->resource[0], 0, 1104 GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); 1105 if (!bp->gregs) { 1106 printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); 1107 goto fail_and_cleanup; 1108 } 1109 1110 /* Make sure QEC is in BigMAC mode. */ 1111 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { 1112 printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); 1113 goto fail_and_cleanup; 1114 } 1115 1116 /* Reset the QEC. */ 1117 if (qec_global_reset(bp->gregs)) 1118 goto fail_and_cleanup; 1119 1120 /* Get supported SBUS burst sizes. */ 1121 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); 1122 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); 1123 1124 bsizes &= 0xff; 1125 if (bsizes_more != 0xff) 1126 bsizes &= bsizes_more; 1127 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || 1128 (bsizes & DMA_BURST32) == 0) 1129 bsizes = (DMA_BURST32 - 1); 1130 bp->bigmac_bursts = bsizes; 1131 1132 /* Perform QEC initialization. */ 1133 qec_init(bp); 1134 1135 /* Map in the BigMAC channel registers. */ 1136 bp->creg = of_ioremap(&op->resource[0], 0, 1137 CREG_REG_SIZE, "BigMAC QEC Channel Regs"); 1138 if (!bp->creg) { 1139 printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); 1140 goto fail_and_cleanup; 1141 } 1142 1143 /* Map in the BigMAC control registers. */ 1144 bp->bregs = of_ioremap(&op->resource[1], 0, 1145 BMAC_REG_SIZE, "BigMAC Primary Regs"); 1146 if (!bp->bregs) { 1147 printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); 1148 goto fail_and_cleanup; 1149 } 1150 1151 /* Map in the BigMAC transceiver registers, this is how you poke at 1152 * the BigMAC's PHY. 1153 */ 1154 bp->tregs = of_ioremap(&op->resource[2], 0, 1155 TCVR_REG_SIZE, "BigMAC Transceiver Regs"); 1156 if (!bp->tregs) { 1157 printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); 1158 goto fail_and_cleanup; 1159 } 1160 1161 /* Stop the BigMAC. */ 1162 bigmac_stop(bp); 1163 1164 /* Allocate transmit/receive descriptor DVMA block. */ 1165 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, 1166 PAGE_SIZE, 1167 &bp->bblock_dvma, GFP_ATOMIC); 1168 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) 1169 goto fail_and_cleanup; 1170 1171 /* Get the board revision of this BigMAC. */ 1172 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, 1173 "board-version", 1); 1174 1175 /* Init auto-negotiation timer state. */ 1176 init_timer(&bp->bigmac_timer); 1177 bp->timer_state = asleep; 1178 bp->timer_ticks = 0; 1179 1180 /* Backlink to generic net device struct. */ 1181 bp->dev = dev; 1182 1183 /* Set links to our BigMAC open and close routines. */ 1184 dev->ethtool_ops = &bigmac_ethtool_ops; 1185 dev->netdev_ops = &bigmac_ops; 1186 dev->watchdog_timeo = 5*HZ; 1187 1188 /* Finish net device registration. */ 1189 dev->irq = bp->bigmac_op->archdata.irqs[0]; 1190 dev->dma = 0; 1191 1192 if (register_netdev(dev)) { 1193 printk(KERN_ERR "BIGMAC: Cannot register device.\n"); 1194 goto fail_and_cleanup; 1195 } 1196 1197 dev_set_drvdata(&bp->bigmac_op->dev, bp); 1198 1199 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", 1200 dev->name, dev->dev_addr); 1201 1202 return 0; 1203 1204 fail_and_cleanup: 1205 /* Something went wrong, undo whatever we did so far. */ 1206 /* Free register mappings if any. */ 1207 if (bp->gregs) 1208 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); 1209 if (bp->creg) 1210 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); 1211 if (bp->bregs) 1212 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); 1213 if (bp->tregs) 1214 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); 1215 1216 if (bp->bmac_block) 1217 dma_free_coherent(&bp->bigmac_op->dev, 1218 PAGE_SIZE, 1219 bp->bmac_block, 1220 bp->bblock_dvma); 1221 1222 /* This also frees the co-located private data */ 1223 free_netdev(dev); 1224 return -ENODEV; 1225 } 1226 1227 /* QEC can be the parent of either QuadEthernet or a BigMAC. We want 1228 * the latter. 1229 */ 1230 static int bigmac_sbus_probe(struct platform_device *op) 1231 { 1232 struct device *parent = op->dev.parent; 1233 struct platform_device *qec_op; 1234 1235 qec_op = to_platform_device(parent); 1236 1237 return bigmac_ether_init(op, qec_op); 1238 } 1239 1240 static int bigmac_sbus_remove(struct platform_device *op) 1241 { 1242 struct bigmac *bp = platform_get_drvdata(op); 1243 struct device *parent = op->dev.parent; 1244 struct net_device *net_dev = bp->dev; 1245 struct platform_device *qec_op; 1246 1247 qec_op = to_platform_device(parent); 1248 1249 unregister_netdev(net_dev); 1250 1251 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); 1252 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); 1253 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); 1254 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); 1255 dma_free_coherent(&op->dev, 1256 PAGE_SIZE, 1257 bp->bmac_block, 1258 bp->bblock_dvma); 1259 1260 free_netdev(net_dev); 1261 1262 return 0; 1263 } 1264 1265 static const struct of_device_id bigmac_sbus_match[] = { 1266 { 1267 .name = "be", 1268 }, 1269 {}, 1270 }; 1271 1272 MODULE_DEVICE_TABLE(of, bigmac_sbus_match); 1273 1274 static struct platform_driver bigmac_sbus_driver = { 1275 .driver = { 1276 .name = "sunbmac", 1277 .owner = THIS_MODULE, 1278 .of_match_table = bigmac_sbus_match, 1279 }, 1280 .probe = bigmac_sbus_probe, 1281 .remove = bigmac_sbus_remove, 1282 }; 1283 1284 module_platform_driver(bigmac_sbus_driver); 1285