1 /* 2 * linux/drivers/net/ethernet/ethoc.c 3 * 4 * Copyright (C) 2007-2008 Avionic Design Development GmbH 5 * Copyright (C) 2008-2009 Avionic Design GmbH 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Written by Thierry Reding <thierry.reding@avionic-design.de> 12 */ 13 14 #include <linux/dma-mapping.h> 15 #include <linux/etherdevice.h> 16 #include <linux/crc32.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/mii.h> 20 #include <linux/phy.h> 21 #include <linux/platform_device.h> 22 #include <linux/sched.h> 23 #include <linux/slab.h> 24 #include <linux/of.h> 25 #include <linux/module.h> 26 #include <net/ethoc.h> 27 28 static int buffer_size = 0x8000; /* 32 KBytes */ 29 module_param(buffer_size, int, 0); 30 MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); 31 32 /* register offsets */ 33 #define MODER 0x00 34 #define INT_SOURCE 0x04 35 #define INT_MASK 0x08 36 #define IPGT 0x0c 37 #define IPGR1 0x10 38 #define IPGR2 0x14 39 #define PACKETLEN 0x18 40 #define COLLCONF 0x1c 41 #define TX_BD_NUM 0x20 42 #define CTRLMODER 0x24 43 #define MIIMODER 0x28 44 #define MIICOMMAND 0x2c 45 #define MIIADDRESS 0x30 46 #define MIITX_DATA 0x34 47 #define MIIRX_DATA 0x38 48 #define MIISTATUS 0x3c 49 #define MAC_ADDR0 0x40 50 #define MAC_ADDR1 0x44 51 #define ETH_HASH0 0x48 52 #define ETH_HASH1 0x4c 53 #define ETH_TXCTRL 0x50 54 55 /* mode register */ 56 #define MODER_RXEN (1 << 0) /* receive enable */ 57 #define MODER_TXEN (1 << 1) /* transmit enable */ 58 #define MODER_NOPRE (1 << 2) /* no preamble */ 59 #define MODER_BRO (1 << 3) /* broadcast address */ 60 #define MODER_IAM (1 << 4) /* individual address mode */ 61 #define MODER_PRO (1 << 5) /* promiscuous mode */ 62 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ 63 #define MODER_LOOP (1 << 7) /* loopback */ 64 #define MODER_NBO (1 << 8) /* no back-off */ 65 #define MODER_EDE (1 << 9) /* excess defer enable */ 66 #define MODER_FULLD (1 << 10) /* full duplex */ 67 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ 68 #define MODER_DCRC (1 << 12) /* delayed CRC enable */ 69 #define MODER_CRC (1 << 13) /* CRC enable */ 70 #define MODER_HUGE (1 << 14) /* huge packets enable */ 71 #define MODER_PAD (1 << 15) /* padding enabled */ 72 #define MODER_RSM (1 << 16) /* receive small packets */ 73 74 /* interrupt source and mask registers */ 75 #define INT_MASK_TXF (1 << 0) /* transmit frame */ 76 #define INT_MASK_TXE (1 << 1) /* transmit error */ 77 #define INT_MASK_RXF (1 << 2) /* receive frame */ 78 #define INT_MASK_RXE (1 << 3) /* receive error */ 79 #define INT_MASK_BUSY (1 << 4) 80 #define INT_MASK_TXC (1 << 5) /* transmit control frame */ 81 #define INT_MASK_RXC (1 << 6) /* receive control frame */ 82 83 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) 84 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) 85 86 #define INT_MASK_ALL ( \ 87 INT_MASK_TXF | INT_MASK_TXE | \ 88 INT_MASK_RXF | INT_MASK_RXE | \ 89 INT_MASK_TXC | INT_MASK_RXC | \ 90 INT_MASK_BUSY \ 91 ) 92 93 /* packet length register */ 94 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) 95 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) 96 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ 97 PACKETLEN_MAX(max)) 98 99 /* transmit buffer number register */ 100 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) 101 102 /* control module mode register */ 103 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ 104 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ 105 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ 106 107 /* MII mode register */ 108 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ 109 #define MIIMODER_NOPRE (1 << 8) /* no preamble */ 110 111 /* MII command register */ 112 #define MIICOMMAND_SCAN (1 << 0) /* scan status */ 113 #define MIICOMMAND_READ (1 << 1) /* read status */ 114 #define MIICOMMAND_WRITE (1 << 2) /* write control data */ 115 116 /* MII address register */ 117 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) 118 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) 119 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ 120 MIIADDRESS_RGAD(reg)) 121 122 /* MII transmit data register */ 123 #define MIITX_DATA_VAL(x) ((x) & 0xffff) 124 125 /* MII receive data register */ 126 #define MIIRX_DATA_VAL(x) ((x) & 0xffff) 127 128 /* MII status register */ 129 #define MIISTATUS_LINKFAIL (1 << 0) 130 #define MIISTATUS_BUSY (1 << 1) 131 #define MIISTATUS_INVALID (1 << 2) 132 133 /* TX buffer descriptor */ 134 #define TX_BD_CS (1 << 0) /* carrier sense lost */ 135 #define TX_BD_DF (1 << 1) /* defer indication */ 136 #define TX_BD_LC (1 << 2) /* late collision */ 137 #define TX_BD_RL (1 << 3) /* retransmission limit */ 138 #define TX_BD_RETRY_MASK (0x00f0) 139 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) 140 #define TX_BD_UR (1 << 8) /* transmitter underrun */ 141 #define TX_BD_CRC (1 << 11) /* TX CRC enable */ 142 #define TX_BD_PAD (1 << 12) /* pad enable for short packets */ 143 #define TX_BD_WRAP (1 << 13) 144 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ 145 #define TX_BD_READY (1 << 15) /* TX buffer ready */ 146 #define TX_BD_LEN(x) (((x) & 0xffff) << 16) 147 #define TX_BD_LEN_MASK (0xffff << 16) 148 149 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ 150 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) 151 152 /* RX buffer descriptor */ 153 #define RX_BD_LC (1 << 0) /* late collision */ 154 #define RX_BD_CRC (1 << 1) /* RX CRC error */ 155 #define RX_BD_SF (1 << 2) /* short frame */ 156 #define RX_BD_TL (1 << 3) /* too long */ 157 #define RX_BD_DN (1 << 4) /* dribble nibble */ 158 #define RX_BD_IS (1 << 5) /* invalid symbol */ 159 #define RX_BD_OR (1 << 6) /* receiver overrun */ 160 #define RX_BD_MISS (1 << 7) 161 #define RX_BD_CF (1 << 8) /* control frame */ 162 #define RX_BD_WRAP (1 << 13) 163 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ 164 #define RX_BD_EMPTY (1 << 15) 165 #define RX_BD_LEN(x) (((x) & 0xffff) << 16) 166 167 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ 168 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) 169 170 #define ETHOC_BUFSIZ 1536 171 #define ETHOC_ZLEN 64 172 #define ETHOC_BD_BASE 0x400 173 #define ETHOC_TIMEOUT (HZ / 2) 174 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) 175 176 /** 177 * struct ethoc - driver-private device structure 178 * @iobase: pointer to I/O memory region 179 * @membase: pointer to buffer memory region 180 * @dma_alloc: dma allocated buffer size 181 * @io_region_size: I/O memory region size 182 * @num_tx: number of send buffers 183 * @cur_tx: last send buffer written 184 * @dty_tx: last buffer actually sent 185 * @num_rx: number of receive buffers 186 * @cur_rx: current receive buffer 187 * @vma: pointer to array of virtual memory addresses for buffers 188 * @netdev: pointer to network device structure 189 * @napi: NAPI structure 190 * @msg_enable: device state flags 191 * @lock: device lock 192 * @phy: attached PHY 193 * @mdio: MDIO bus for PHY access 194 * @phy_id: address of attached PHY 195 */ 196 struct ethoc { 197 void __iomem *iobase; 198 void __iomem *membase; 199 int dma_alloc; 200 resource_size_t io_region_size; 201 202 unsigned int num_tx; 203 unsigned int cur_tx; 204 unsigned int dty_tx; 205 206 unsigned int num_rx; 207 unsigned int cur_rx; 208 209 void** vma; 210 211 struct net_device *netdev; 212 struct napi_struct napi; 213 u32 msg_enable; 214 215 spinlock_t lock; 216 217 struct phy_device *phy; 218 struct mii_bus *mdio; 219 s8 phy_id; 220 }; 221 222 /** 223 * struct ethoc_bd - buffer descriptor 224 * @stat: buffer statistics 225 * @addr: physical memory address 226 */ 227 struct ethoc_bd { 228 u32 stat; 229 u32 addr; 230 }; 231 232 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) 233 { 234 return ioread32(dev->iobase + offset); 235 } 236 237 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 238 { 239 iowrite32(data, dev->iobase + offset); 240 } 241 242 static inline void ethoc_read_bd(struct ethoc *dev, int index, 243 struct ethoc_bd *bd) 244 { 245 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 246 bd->stat = ethoc_read(dev, offset + 0); 247 bd->addr = ethoc_read(dev, offset + 4); 248 } 249 250 static inline void ethoc_write_bd(struct ethoc *dev, int index, 251 const struct ethoc_bd *bd) 252 { 253 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 254 ethoc_write(dev, offset + 0, bd->stat); 255 ethoc_write(dev, offset + 4, bd->addr); 256 } 257 258 static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) 259 { 260 u32 imask = ethoc_read(dev, INT_MASK); 261 imask |= mask; 262 ethoc_write(dev, INT_MASK, imask); 263 } 264 265 static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) 266 { 267 u32 imask = ethoc_read(dev, INT_MASK); 268 imask &= ~mask; 269 ethoc_write(dev, INT_MASK, imask); 270 } 271 272 static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) 273 { 274 ethoc_write(dev, INT_SOURCE, mask); 275 } 276 277 static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) 278 { 279 u32 mode = ethoc_read(dev, MODER); 280 mode |= MODER_RXEN | MODER_TXEN; 281 ethoc_write(dev, MODER, mode); 282 } 283 284 static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) 285 { 286 u32 mode = ethoc_read(dev, MODER); 287 mode &= ~(MODER_RXEN | MODER_TXEN); 288 ethoc_write(dev, MODER, mode); 289 } 290 291 static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) 292 { 293 struct ethoc_bd bd; 294 int i; 295 void* vma; 296 297 dev->cur_tx = 0; 298 dev->dty_tx = 0; 299 dev->cur_rx = 0; 300 301 ethoc_write(dev, TX_BD_NUM, dev->num_tx); 302 303 /* setup transmission buffers */ 304 bd.addr = mem_start; 305 bd.stat = TX_BD_IRQ | TX_BD_CRC; 306 vma = dev->membase; 307 308 for (i = 0; i < dev->num_tx; i++) { 309 if (i == dev->num_tx - 1) 310 bd.stat |= TX_BD_WRAP; 311 312 ethoc_write_bd(dev, i, &bd); 313 bd.addr += ETHOC_BUFSIZ; 314 315 dev->vma[i] = vma; 316 vma += ETHOC_BUFSIZ; 317 } 318 319 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 320 321 for (i = 0; i < dev->num_rx; i++) { 322 if (i == dev->num_rx - 1) 323 bd.stat |= RX_BD_WRAP; 324 325 ethoc_write_bd(dev, dev->num_tx + i, &bd); 326 bd.addr += ETHOC_BUFSIZ; 327 328 dev->vma[dev->num_tx + i] = vma; 329 vma += ETHOC_BUFSIZ; 330 } 331 332 return 0; 333 } 334 335 static int ethoc_reset(struct ethoc *dev) 336 { 337 u32 mode; 338 339 /* TODO: reset controller? */ 340 341 ethoc_disable_rx_and_tx(dev); 342 343 /* TODO: setup registers */ 344 345 /* enable FCS generation and automatic padding */ 346 mode = ethoc_read(dev, MODER); 347 mode |= MODER_CRC | MODER_PAD; 348 ethoc_write(dev, MODER, mode); 349 350 /* set full-duplex mode */ 351 mode = ethoc_read(dev, MODER); 352 mode |= MODER_FULLD; 353 ethoc_write(dev, MODER, mode); 354 ethoc_write(dev, IPGT, 0x15); 355 356 ethoc_ack_irq(dev, INT_MASK_ALL); 357 ethoc_enable_irq(dev, INT_MASK_ALL); 358 ethoc_enable_rx_and_tx(dev); 359 return 0; 360 } 361 362 static unsigned int ethoc_update_rx_stats(struct ethoc *dev, 363 struct ethoc_bd *bd) 364 { 365 struct net_device *netdev = dev->netdev; 366 unsigned int ret = 0; 367 368 if (bd->stat & RX_BD_TL) { 369 dev_err(&netdev->dev, "RX: frame too long\n"); 370 netdev->stats.rx_length_errors++; 371 ret++; 372 } 373 374 if (bd->stat & RX_BD_SF) { 375 dev_err(&netdev->dev, "RX: frame too short\n"); 376 netdev->stats.rx_length_errors++; 377 ret++; 378 } 379 380 if (bd->stat & RX_BD_DN) { 381 dev_err(&netdev->dev, "RX: dribble nibble\n"); 382 netdev->stats.rx_frame_errors++; 383 } 384 385 if (bd->stat & RX_BD_CRC) { 386 dev_err(&netdev->dev, "RX: wrong CRC\n"); 387 netdev->stats.rx_crc_errors++; 388 ret++; 389 } 390 391 if (bd->stat & RX_BD_OR) { 392 dev_err(&netdev->dev, "RX: overrun\n"); 393 netdev->stats.rx_over_errors++; 394 ret++; 395 } 396 397 if (bd->stat & RX_BD_MISS) 398 netdev->stats.rx_missed_errors++; 399 400 if (bd->stat & RX_BD_LC) { 401 dev_err(&netdev->dev, "RX: late collision\n"); 402 netdev->stats.collisions++; 403 ret++; 404 } 405 406 return ret; 407 } 408 409 static int ethoc_rx(struct net_device *dev, int limit) 410 { 411 struct ethoc *priv = netdev_priv(dev); 412 int count; 413 414 for (count = 0; count < limit; ++count) { 415 unsigned int entry; 416 struct ethoc_bd bd; 417 418 entry = priv->num_tx + priv->cur_rx; 419 ethoc_read_bd(priv, entry, &bd); 420 if (bd.stat & RX_BD_EMPTY) { 421 ethoc_ack_irq(priv, INT_MASK_RX); 422 /* If packet (interrupt) came in between checking 423 * BD_EMTPY and clearing the interrupt source, then we 424 * risk missing the packet as the RX interrupt won't 425 * trigger right away when we reenable it; hence, check 426 * BD_EMTPY here again to make sure there isn't such a 427 * packet waiting for us... 428 */ 429 ethoc_read_bd(priv, entry, &bd); 430 if (bd.stat & RX_BD_EMPTY) 431 break; 432 } 433 434 if (ethoc_update_rx_stats(priv, &bd) == 0) { 435 int size = bd.stat >> 16; 436 struct sk_buff *skb; 437 438 size -= 4; /* strip the CRC */ 439 skb = netdev_alloc_skb_ip_align(dev, size); 440 441 if (likely(skb)) { 442 void *src = priv->vma[entry]; 443 memcpy_fromio(skb_put(skb, size), src, size); 444 skb->protocol = eth_type_trans(skb, dev); 445 dev->stats.rx_packets++; 446 dev->stats.rx_bytes += size; 447 netif_receive_skb(skb); 448 } else { 449 if (net_ratelimit()) 450 dev_warn(&dev->dev, "low on memory - " 451 "packet dropped\n"); 452 453 dev->stats.rx_dropped++; 454 break; 455 } 456 } 457 458 /* clear the buffer descriptor so it can be reused */ 459 bd.stat &= ~RX_BD_STATS; 460 bd.stat |= RX_BD_EMPTY; 461 ethoc_write_bd(priv, entry, &bd); 462 if (++priv->cur_rx == priv->num_rx) 463 priv->cur_rx = 0; 464 } 465 466 return count; 467 } 468 469 static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 470 { 471 struct net_device *netdev = dev->netdev; 472 473 if (bd->stat & TX_BD_LC) { 474 dev_err(&netdev->dev, "TX: late collision\n"); 475 netdev->stats.tx_window_errors++; 476 } 477 478 if (bd->stat & TX_BD_RL) { 479 dev_err(&netdev->dev, "TX: retransmit limit\n"); 480 netdev->stats.tx_aborted_errors++; 481 } 482 483 if (bd->stat & TX_BD_UR) { 484 dev_err(&netdev->dev, "TX: underrun\n"); 485 netdev->stats.tx_fifo_errors++; 486 } 487 488 if (bd->stat & TX_BD_CS) { 489 dev_err(&netdev->dev, "TX: carrier sense lost\n"); 490 netdev->stats.tx_carrier_errors++; 491 } 492 493 if (bd->stat & TX_BD_STATS) 494 netdev->stats.tx_errors++; 495 496 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 497 netdev->stats.tx_bytes += bd->stat >> 16; 498 netdev->stats.tx_packets++; 499 } 500 501 static int ethoc_tx(struct net_device *dev, int limit) 502 { 503 struct ethoc *priv = netdev_priv(dev); 504 int count; 505 struct ethoc_bd bd; 506 507 for (count = 0; count < limit; ++count) { 508 unsigned int entry; 509 510 entry = priv->dty_tx & (priv->num_tx-1); 511 512 ethoc_read_bd(priv, entry, &bd); 513 514 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { 515 ethoc_ack_irq(priv, INT_MASK_TX); 516 /* If interrupt came in between reading in the BD 517 * and clearing the interrupt source, then we risk 518 * missing the event as the TX interrupt won't trigger 519 * right away when we reenable it; hence, check 520 * BD_EMPTY here again to make sure there isn't such an 521 * event pending... 522 */ 523 ethoc_read_bd(priv, entry, &bd); 524 if (bd.stat & TX_BD_READY || 525 (priv->dty_tx == priv->cur_tx)) 526 break; 527 } 528 529 ethoc_update_tx_stats(priv, &bd); 530 priv->dty_tx++; 531 } 532 533 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 534 netif_wake_queue(dev); 535 536 return count; 537 } 538 539 static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 540 { 541 struct net_device *dev = dev_id; 542 struct ethoc *priv = netdev_priv(dev); 543 u32 pending; 544 u32 mask; 545 546 /* Figure out what triggered the interrupt... 547 * The tricky bit here is that the interrupt source bits get 548 * set in INT_SOURCE for an event regardless of whether that 549 * event is masked or not. Thus, in order to figure out what 550 * triggered the interrupt, we need to remove the sources 551 * for all events that are currently masked. This behaviour 552 * is not particularly well documented but reasonable... 553 */ 554 mask = ethoc_read(priv, INT_MASK); 555 pending = ethoc_read(priv, INT_SOURCE); 556 pending &= mask; 557 558 if (unlikely(pending == 0)) { 559 return IRQ_NONE; 560 } 561 562 ethoc_ack_irq(priv, pending); 563 564 /* We always handle the dropped packet interrupt */ 565 if (pending & INT_MASK_BUSY) { 566 dev_err(&dev->dev, "packet dropped\n"); 567 dev->stats.rx_dropped++; 568 } 569 570 /* Handle receive/transmit event by switching to polling */ 571 if (pending & (INT_MASK_TX | INT_MASK_RX)) { 572 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); 573 napi_schedule(&priv->napi); 574 } 575 576 return IRQ_HANDLED; 577 } 578 579 static int ethoc_get_mac_address(struct net_device *dev, void *addr) 580 { 581 struct ethoc *priv = netdev_priv(dev); 582 u8 *mac = (u8 *)addr; 583 u32 reg; 584 585 reg = ethoc_read(priv, MAC_ADDR0); 586 mac[2] = (reg >> 24) & 0xff; 587 mac[3] = (reg >> 16) & 0xff; 588 mac[4] = (reg >> 8) & 0xff; 589 mac[5] = (reg >> 0) & 0xff; 590 591 reg = ethoc_read(priv, MAC_ADDR1); 592 mac[0] = (reg >> 8) & 0xff; 593 mac[1] = (reg >> 0) & 0xff; 594 595 return 0; 596 } 597 598 static int ethoc_poll(struct napi_struct *napi, int budget) 599 { 600 struct ethoc *priv = container_of(napi, struct ethoc, napi); 601 int rx_work_done = 0; 602 int tx_work_done = 0; 603 604 rx_work_done = ethoc_rx(priv->netdev, budget); 605 tx_work_done = ethoc_tx(priv->netdev, budget); 606 607 if (rx_work_done < budget && tx_work_done < budget) { 608 napi_complete(napi); 609 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); 610 } 611 612 return rx_work_done; 613 } 614 615 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 616 { 617 struct ethoc *priv = bus->priv; 618 int i; 619 620 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 621 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 622 623 for (i=0; i < 5; i++) { 624 u32 status = ethoc_read(priv, MIISTATUS); 625 if (!(status & MIISTATUS_BUSY)) { 626 u32 data = ethoc_read(priv, MIIRX_DATA); 627 /* reset MII command register */ 628 ethoc_write(priv, MIICOMMAND, 0); 629 return data; 630 } 631 usleep_range(100,200); 632 } 633 634 return -EBUSY; 635 } 636 637 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 638 { 639 struct ethoc *priv = bus->priv; 640 int i; 641 642 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 643 ethoc_write(priv, MIITX_DATA, val); 644 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 645 646 for (i=0; i < 5; i++) { 647 u32 stat = ethoc_read(priv, MIISTATUS); 648 if (!(stat & MIISTATUS_BUSY)) { 649 /* reset MII command register */ 650 ethoc_write(priv, MIICOMMAND, 0); 651 return 0; 652 } 653 usleep_range(100,200); 654 } 655 656 return -EBUSY; 657 } 658 659 static int ethoc_mdio_reset(struct mii_bus *bus) 660 { 661 return 0; 662 } 663 664 static void ethoc_mdio_poll(struct net_device *dev) 665 { 666 } 667 668 static int __devinit ethoc_mdio_probe(struct net_device *dev) 669 { 670 struct ethoc *priv = netdev_priv(dev); 671 struct phy_device *phy; 672 int err; 673 674 if (priv->phy_id != -1) { 675 phy = priv->mdio->phy_map[priv->phy_id]; 676 } else { 677 phy = phy_find_first(priv->mdio); 678 } 679 680 if (!phy) { 681 dev_err(&dev->dev, "no PHY found\n"); 682 return -ENXIO; 683 } 684 685 err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0, 686 PHY_INTERFACE_MODE_GMII); 687 if (err) { 688 dev_err(&dev->dev, "could not attach to PHY\n"); 689 return err; 690 } 691 692 priv->phy = phy; 693 return 0; 694 } 695 696 static int ethoc_open(struct net_device *dev) 697 { 698 struct ethoc *priv = netdev_priv(dev); 699 int ret; 700 701 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, 702 dev->name, dev); 703 if (ret) 704 return ret; 705 706 ethoc_init_ring(priv, dev->mem_start); 707 ethoc_reset(priv); 708 709 if (netif_queue_stopped(dev)) { 710 dev_dbg(&dev->dev, " resuming queue\n"); 711 netif_wake_queue(dev); 712 } else { 713 dev_dbg(&dev->dev, " starting queue\n"); 714 netif_start_queue(dev); 715 } 716 717 phy_start(priv->phy); 718 napi_enable(&priv->napi); 719 720 if (netif_msg_ifup(priv)) { 721 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", 722 dev->base_addr, dev->mem_start, dev->mem_end); 723 } 724 725 return 0; 726 } 727 728 static int ethoc_stop(struct net_device *dev) 729 { 730 struct ethoc *priv = netdev_priv(dev); 731 732 napi_disable(&priv->napi); 733 734 if (priv->phy) 735 phy_stop(priv->phy); 736 737 ethoc_disable_rx_and_tx(priv); 738 free_irq(dev->irq, dev); 739 740 if (!netif_queue_stopped(dev)) 741 netif_stop_queue(dev); 742 743 return 0; 744 } 745 746 static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 747 { 748 struct ethoc *priv = netdev_priv(dev); 749 struct mii_ioctl_data *mdio = if_mii(ifr); 750 struct phy_device *phy = NULL; 751 752 if (!netif_running(dev)) 753 return -EINVAL; 754 755 if (cmd != SIOCGMIIPHY) { 756 if (mdio->phy_id >= PHY_MAX_ADDR) 757 return -ERANGE; 758 759 phy = priv->mdio->phy_map[mdio->phy_id]; 760 if (!phy) 761 return -ENODEV; 762 } else { 763 phy = priv->phy; 764 } 765 766 return phy_mii_ioctl(phy, ifr, cmd); 767 } 768 769 static int ethoc_config(struct net_device *dev, struct ifmap *map) 770 { 771 return -ENOSYS; 772 } 773 774 static int ethoc_set_mac_address(struct net_device *dev, void *addr) 775 { 776 struct ethoc *priv = netdev_priv(dev); 777 u8 *mac = (u8 *)addr; 778 779 if (!is_valid_ether_addr(mac)) 780 return -EADDRNOTAVAIL; 781 782 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | 783 (mac[4] << 8) | (mac[5] << 0)); 784 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); 785 786 memcpy(dev->dev_addr, mac, ETH_ALEN); 787 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 788 789 return 0; 790 } 791 792 static void ethoc_set_multicast_list(struct net_device *dev) 793 { 794 struct ethoc *priv = netdev_priv(dev); 795 u32 mode = ethoc_read(priv, MODER); 796 struct netdev_hw_addr *ha; 797 u32 hash[2] = { 0, 0 }; 798 799 /* set loopback mode if requested */ 800 if (dev->flags & IFF_LOOPBACK) 801 mode |= MODER_LOOP; 802 else 803 mode &= ~MODER_LOOP; 804 805 /* receive broadcast frames if requested */ 806 if (dev->flags & IFF_BROADCAST) 807 mode &= ~MODER_BRO; 808 else 809 mode |= MODER_BRO; 810 811 /* enable promiscuous mode if requested */ 812 if (dev->flags & IFF_PROMISC) 813 mode |= MODER_PRO; 814 else 815 mode &= ~MODER_PRO; 816 817 ethoc_write(priv, MODER, mode); 818 819 /* receive multicast frames */ 820 if (dev->flags & IFF_ALLMULTI) { 821 hash[0] = 0xffffffff; 822 hash[1] = 0xffffffff; 823 } else { 824 netdev_for_each_mc_addr(ha, dev) { 825 u32 crc = ether_crc(ETH_ALEN, ha->addr); 826 int bit = (crc >> 26) & 0x3f; 827 hash[bit >> 5] |= 1 << (bit & 0x1f); 828 } 829 } 830 831 ethoc_write(priv, ETH_HASH0, hash[0]); 832 ethoc_write(priv, ETH_HASH1, hash[1]); 833 } 834 835 static int ethoc_change_mtu(struct net_device *dev, int new_mtu) 836 { 837 return -ENOSYS; 838 } 839 840 static void ethoc_tx_timeout(struct net_device *dev) 841 { 842 struct ethoc *priv = netdev_priv(dev); 843 u32 pending = ethoc_read(priv, INT_SOURCE); 844 if (likely(pending)) 845 ethoc_interrupt(dev->irq, dev); 846 } 847 848 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 849 { 850 struct ethoc *priv = netdev_priv(dev); 851 struct ethoc_bd bd; 852 unsigned int entry; 853 void *dest; 854 855 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 856 dev->stats.tx_errors++; 857 goto out; 858 } 859 860 entry = priv->cur_tx % priv->num_tx; 861 spin_lock_irq(&priv->lock); 862 priv->cur_tx++; 863 864 ethoc_read_bd(priv, entry, &bd); 865 if (unlikely(skb->len < ETHOC_ZLEN)) 866 bd.stat |= TX_BD_PAD; 867 else 868 bd.stat &= ~TX_BD_PAD; 869 870 dest = priv->vma[entry]; 871 memcpy_toio(dest, skb->data, skb->len); 872 873 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 874 bd.stat |= TX_BD_LEN(skb->len); 875 ethoc_write_bd(priv, entry, &bd); 876 877 bd.stat |= TX_BD_READY; 878 ethoc_write_bd(priv, entry, &bd); 879 880 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { 881 dev_dbg(&dev->dev, "stopping queue\n"); 882 netif_stop_queue(dev); 883 } 884 885 spin_unlock_irq(&priv->lock); 886 skb_tx_timestamp(skb); 887 out: 888 dev_kfree_skb(skb); 889 return NETDEV_TX_OK; 890 } 891 892 static const struct net_device_ops ethoc_netdev_ops = { 893 .ndo_open = ethoc_open, 894 .ndo_stop = ethoc_stop, 895 .ndo_do_ioctl = ethoc_ioctl, 896 .ndo_set_config = ethoc_config, 897 .ndo_set_mac_address = ethoc_set_mac_address, 898 .ndo_set_rx_mode = ethoc_set_multicast_list, 899 .ndo_change_mtu = ethoc_change_mtu, 900 .ndo_tx_timeout = ethoc_tx_timeout, 901 .ndo_start_xmit = ethoc_start_xmit, 902 }; 903 904 /** 905 * ethoc_probe - initialize OpenCores ethernet MAC 906 * pdev: platform device 907 */ 908 static int __devinit ethoc_probe(struct platform_device *pdev) 909 { 910 struct net_device *netdev = NULL; 911 struct resource *res = NULL; 912 struct resource *mmio = NULL; 913 struct resource *mem = NULL; 914 struct ethoc *priv = NULL; 915 unsigned int phy; 916 int num_bd; 917 int ret = 0; 918 bool random_mac = false; 919 920 /* allocate networking device */ 921 netdev = alloc_etherdev(sizeof(struct ethoc)); 922 if (!netdev) { 923 ret = -ENOMEM; 924 goto out; 925 } 926 927 SET_NETDEV_DEV(netdev, &pdev->dev); 928 platform_set_drvdata(pdev, netdev); 929 930 /* obtain I/O memory space */ 931 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 932 if (!res) { 933 dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); 934 ret = -ENXIO; 935 goto free; 936 } 937 938 mmio = devm_request_mem_region(&pdev->dev, res->start, 939 resource_size(res), res->name); 940 if (!mmio) { 941 dev_err(&pdev->dev, "cannot request I/O memory space\n"); 942 ret = -ENXIO; 943 goto free; 944 } 945 946 netdev->base_addr = mmio->start; 947 948 /* obtain buffer memory space */ 949 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 950 if (res) { 951 mem = devm_request_mem_region(&pdev->dev, res->start, 952 resource_size(res), res->name); 953 if (!mem) { 954 dev_err(&pdev->dev, "cannot request memory space\n"); 955 ret = -ENXIO; 956 goto free; 957 } 958 959 netdev->mem_start = mem->start; 960 netdev->mem_end = mem->end; 961 } 962 963 964 /* obtain device IRQ number */ 965 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 966 if (!res) { 967 dev_err(&pdev->dev, "cannot obtain IRQ\n"); 968 ret = -ENXIO; 969 goto free; 970 } 971 972 netdev->irq = res->start; 973 974 /* setup driver-private data */ 975 priv = netdev_priv(netdev); 976 priv->netdev = netdev; 977 priv->dma_alloc = 0; 978 priv->io_region_size = resource_size(mmio); 979 980 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 981 resource_size(mmio)); 982 if (!priv->iobase) { 983 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 984 ret = -ENXIO; 985 goto error; 986 } 987 988 if (netdev->mem_end) { 989 priv->membase = devm_ioremap_nocache(&pdev->dev, 990 netdev->mem_start, resource_size(mem)); 991 if (!priv->membase) { 992 dev_err(&pdev->dev, "cannot remap memory space\n"); 993 ret = -ENXIO; 994 goto error; 995 } 996 } else { 997 /* Allocate buffer memory */ 998 priv->membase = dmam_alloc_coherent(&pdev->dev, 999 buffer_size, (void *)&netdev->mem_start, 1000 GFP_KERNEL); 1001 if (!priv->membase) { 1002 dev_err(&pdev->dev, "cannot allocate %dB buffer\n", 1003 buffer_size); 1004 ret = -ENOMEM; 1005 goto error; 1006 } 1007 netdev->mem_end = netdev->mem_start + buffer_size; 1008 priv->dma_alloc = buffer_size; 1009 } 1010 1011 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1012 num_bd = min_t(unsigned int, 1013 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1014 if (num_bd < 4) { 1015 ret = -ENODEV; 1016 goto error; 1017 } 1018 /* num_tx must be a power of two */ 1019 priv->num_tx = rounddown_pow_of_two(num_bd >> 1); 1020 priv->num_rx = num_bd - priv->num_tx; 1021 1022 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", 1023 priv->num_tx, priv->num_rx); 1024 1025 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1026 if (!priv->vma) { 1027 ret = -ENOMEM; 1028 goto error; 1029 } 1030 1031 /* Allow the platform setup code to pass in a MAC address. */ 1032 if (pdev->dev.platform_data) { 1033 struct ethoc_platform_data *pdata = pdev->dev.platform_data; 1034 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1035 priv->phy_id = pdata->phy_id; 1036 } else { 1037 priv->phy_id = -1; 1038 1039 #ifdef CONFIG_OF 1040 { 1041 const uint8_t* mac; 1042 1043 mac = of_get_property(pdev->dev.of_node, 1044 "local-mac-address", 1045 NULL); 1046 if (mac) 1047 memcpy(netdev->dev_addr, mac, IFHWADDRLEN); 1048 } 1049 #endif 1050 } 1051 1052 /* Check that the given MAC address is valid. If it isn't, read the 1053 * current MAC from the controller. */ 1054 if (!is_valid_ether_addr(netdev->dev_addr)) 1055 ethoc_get_mac_address(netdev, netdev->dev_addr); 1056 1057 /* Check the MAC again for validity, if it still isn't choose and 1058 * program a random one. */ 1059 if (!is_valid_ether_addr(netdev->dev_addr)) { 1060 eth_random_addr(netdev->dev_addr); 1061 random_mac = true; 1062 } 1063 1064 ret = ethoc_set_mac_address(netdev, netdev->dev_addr); 1065 if (ret) { 1066 dev_err(&netdev->dev, "failed to set MAC address\n"); 1067 goto error; 1068 } 1069 1070 if (random_mac) 1071 netdev->addr_assign_type |= NET_ADDR_RANDOM; 1072 1073 /* register MII bus */ 1074 priv->mdio = mdiobus_alloc(); 1075 if (!priv->mdio) { 1076 ret = -ENOMEM; 1077 goto free; 1078 } 1079 1080 priv->mdio->name = "ethoc-mdio"; 1081 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", 1082 priv->mdio->name, pdev->id); 1083 priv->mdio->read = ethoc_mdio_read; 1084 priv->mdio->write = ethoc_mdio_write; 1085 priv->mdio->reset = ethoc_mdio_reset; 1086 priv->mdio->priv = priv; 1087 1088 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1089 if (!priv->mdio->irq) { 1090 ret = -ENOMEM; 1091 goto free_mdio; 1092 } 1093 1094 for (phy = 0; phy < PHY_MAX_ADDR; phy++) 1095 priv->mdio->irq[phy] = PHY_POLL; 1096 1097 ret = mdiobus_register(priv->mdio); 1098 if (ret) { 1099 dev_err(&netdev->dev, "failed to register MDIO bus\n"); 1100 goto free_mdio; 1101 } 1102 1103 ret = ethoc_mdio_probe(netdev); 1104 if (ret) { 1105 dev_err(&netdev->dev, "failed to probe MDIO bus\n"); 1106 goto error; 1107 } 1108 1109 ether_setup(netdev); 1110 1111 /* setup the net_device structure */ 1112 netdev->netdev_ops = ðoc_netdev_ops; 1113 netdev->watchdog_timeo = ETHOC_TIMEOUT; 1114 netdev->features |= 0; 1115 1116 /* setup NAPI */ 1117 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1118 1119 spin_lock_init(&priv->lock); 1120 1121 ret = register_netdev(netdev); 1122 if (ret < 0) { 1123 dev_err(&netdev->dev, "failed to register interface\n"); 1124 goto error2; 1125 } 1126 1127 goto out; 1128 1129 error2: 1130 netif_napi_del(&priv->napi); 1131 error: 1132 mdiobus_unregister(priv->mdio); 1133 free_mdio: 1134 kfree(priv->mdio->irq); 1135 mdiobus_free(priv->mdio); 1136 free: 1137 free_netdev(netdev); 1138 out: 1139 return ret; 1140 } 1141 1142 /** 1143 * ethoc_remove - shutdown OpenCores ethernet MAC 1144 * @pdev: platform device 1145 */ 1146 static int __devexit ethoc_remove(struct platform_device *pdev) 1147 { 1148 struct net_device *netdev = platform_get_drvdata(pdev); 1149 struct ethoc *priv = netdev_priv(netdev); 1150 1151 platform_set_drvdata(pdev, NULL); 1152 1153 if (netdev) { 1154 netif_napi_del(&priv->napi); 1155 phy_disconnect(priv->phy); 1156 priv->phy = NULL; 1157 1158 if (priv->mdio) { 1159 mdiobus_unregister(priv->mdio); 1160 kfree(priv->mdio->irq); 1161 mdiobus_free(priv->mdio); 1162 } 1163 unregister_netdev(netdev); 1164 free_netdev(netdev); 1165 } 1166 1167 return 0; 1168 } 1169 1170 #ifdef CONFIG_PM 1171 static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) 1172 { 1173 return -ENOSYS; 1174 } 1175 1176 static int ethoc_resume(struct platform_device *pdev) 1177 { 1178 return -ENOSYS; 1179 } 1180 #else 1181 # define ethoc_suspend NULL 1182 # define ethoc_resume NULL 1183 #endif 1184 1185 static struct of_device_id ethoc_match[] = { 1186 { .compatible = "opencores,ethoc", }, 1187 {}, 1188 }; 1189 MODULE_DEVICE_TABLE(of, ethoc_match); 1190 1191 static struct platform_driver ethoc_driver = { 1192 .probe = ethoc_probe, 1193 .remove = __devexit_p(ethoc_remove), 1194 .suspend = ethoc_suspend, 1195 .resume = ethoc_resume, 1196 .driver = { 1197 .name = "ethoc", 1198 .owner = THIS_MODULE, 1199 .of_match_table = ethoc_match, 1200 }, 1201 }; 1202 1203 module_platform_driver(ethoc_driver); 1204 1205 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1206 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); 1207 MODULE_LICENSE("GPL v2"); 1208 1209