1 /* 2 * linux/drivers/net/ethernet/ethoc.c 3 * 4 * Copyright (C) 2007-2008 Avionic Design Development GmbH 5 * Copyright (C) 2008-2009 Avionic Design GmbH 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Written by Thierry Reding <thierry.reding@avionic-design.de> 12 */ 13 14 #include <linux/dma-mapping.h> 15 #include <linux/etherdevice.h> 16 #include <linux/crc32.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/mii.h> 20 #include <linux/phy.h> 21 #include <linux/platform_device.h> 22 #include <linux/sched.h> 23 #include <linux/slab.h> 24 #include <linux/of.h> 25 #include <linux/module.h> 26 #include <net/ethoc.h> 27 28 static int buffer_size = 0x8000; /* 32 KBytes */ 29 module_param(buffer_size, int, 0); 30 MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); 31 32 /* register offsets */ 33 #define MODER 0x00 34 #define INT_SOURCE 0x04 35 #define INT_MASK 0x08 36 #define IPGT 0x0c 37 #define IPGR1 0x10 38 #define IPGR2 0x14 39 #define PACKETLEN 0x18 40 #define COLLCONF 0x1c 41 #define TX_BD_NUM 0x20 42 #define CTRLMODER 0x24 43 #define MIIMODER 0x28 44 #define MIICOMMAND 0x2c 45 #define MIIADDRESS 0x30 46 #define MIITX_DATA 0x34 47 #define MIIRX_DATA 0x38 48 #define MIISTATUS 0x3c 49 #define MAC_ADDR0 0x40 50 #define MAC_ADDR1 0x44 51 #define ETH_HASH0 0x48 52 #define ETH_HASH1 0x4c 53 #define ETH_TXCTRL 0x50 54 55 /* mode register */ 56 #define MODER_RXEN (1 << 0) /* receive enable */ 57 #define MODER_TXEN (1 << 1) /* transmit enable */ 58 #define MODER_NOPRE (1 << 2) /* no preamble */ 59 #define MODER_BRO (1 << 3) /* broadcast address */ 60 #define MODER_IAM (1 << 4) /* individual address mode */ 61 #define MODER_PRO (1 << 5) /* promiscuous mode */ 62 #define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ 63 #define MODER_LOOP (1 << 7) /* loopback */ 64 #define MODER_NBO (1 << 8) /* no back-off */ 65 #define MODER_EDE (1 << 9) /* excess defer enable */ 66 #define MODER_FULLD (1 << 10) /* full duplex */ 67 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ 68 #define MODER_DCRC (1 << 12) /* delayed CRC enable */ 69 #define MODER_CRC (1 << 13) /* CRC enable */ 70 #define MODER_HUGE (1 << 14) /* huge packets enable */ 71 #define MODER_PAD (1 << 15) /* padding enabled */ 72 #define MODER_RSM (1 << 16) /* receive small packets */ 73 74 /* interrupt source and mask registers */ 75 #define INT_MASK_TXF (1 << 0) /* transmit frame */ 76 #define INT_MASK_TXE (1 << 1) /* transmit error */ 77 #define INT_MASK_RXF (1 << 2) /* receive frame */ 78 #define INT_MASK_RXE (1 << 3) /* receive error */ 79 #define INT_MASK_BUSY (1 << 4) 80 #define INT_MASK_TXC (1 << 5) /* transmit control frame */ 81 #define INT_MASK_RXC (1 << 6) /* receive control frame */ 82 83 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) 84 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) 85 86 #define INT_MASK_ALL ( \ 87 INT_MASK_TXF | INT_MASK_TXE | \ 88 INT_MASK_RXF | INT_MASK_RXE | \ 89 INT_MASK_TXC | INT_MASK_RXC | \ 90 INT_MASK_BUSY \ 91 ) 92 93 /* packet length register */ 94 #define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) 95 #define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) 96 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ 97 PACKETLEN_MAX(max)) 98 99 /* transmit buffer number register */ 100 #define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) 101 102 /* control module mode register */ 103 #define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ 104 #define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ 105 #define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ 106 107 /* MII mode register */ 108 #define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ 109 #define MIIMODER_NOPRE (1 << 8) /* no preamble */ 110 111 /* MII command register */ 112 #define MIICOMMAND_SCAN (1 << 0) /* scan status */ 113 #define MIICOMMAND_READ (1 << 1) /* read status */ 114 #define MIICOMMAND_WRITE (1 << 2) /* write control data */ 115 116 /* MII address register */ 117 #define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) 118 #define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) 119 #define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ 120 MIIADDRESS_RGAD(reg)) 121 122 /* MII transmit data register */ 123 #define MIITX_DATA_VAL(x) ((x) & 0xffff) 124 125 /* MII receive data register */ 126 #define MIIRX_DATA_VAL(x) ((x) & 0xffff) 127 128 /* MII status register */ 129 #define MIISTATUS_LINKFAIL (1 << 0) 130 #define MIISTATUS_BUSY (1 << 1) 131 #define MIISTATUS_INVALID (1 << 2) 132 133 /* TX buffer descriptor */ 134 #define TX_BD_CS (1 << 0) /* carrier sense lost */ 135 #define TX_BD_DF (1 << 1) /* defer indication */ 136 #define TX_BD_LC (1 << 2) /* late collision */ 137 #define TX_BD_RL (1 << 3) /* retransmission limit */ 138 #define TX_BD_RETRY_MASK (0x00f0) 139 #define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) 140 #define TX_BD_UR (1 << 8) /* transmitter underrun */ 141 #define TX_BD_CRC (1 << 11) /* TX CRC enable */ 142 #define TX_BD_PAD (1 << 12) /* pad enable for short packets */ 143 #define TX_BD_WRAP (1 << 13) 144 #define TX_BD_IRQ (1 << 14) /* interrupt request enable */ 145 #define TX_BD_READY (1 << 15) /* TX buffer ready */ 146 #define TX_BD_LEN(x) (((x) & 0xffff) << 16) 147 #define TX_BD_LEN_MASK (0xffff << 16) 148 149 #define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ 150 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) 151 152 /* RX buffer descriptor */ 153 #define RX_BD_LC (1 << 0) /* late collision */ 154 #define RX_BD_CRC (1 << 1) /* RX CRC error */ 155 #define RX_BD_SF (1 << 2) /* short frame */ 156 #define RX_BD_TL (1 << 3) /* too long */ 157 #define RX_BD_DN (1 << 4) /* dribble nibble */ 158 #define RX_BD_IS (1 << 5) /* invalid symbol */ 159 #define RX_BD_OR (1 << 6) /* receiver overrun */ 160 #define RX_BD_MISS (1 << 7) 161 #define RX_BD_CF (1 << 8) /* control frame */ 162 #define RX_BD_WRAP (1 << 13) 163 #define RX_BD_IRQ (1 << 14) /* interrupt request enable */ 164 #define RX_BD_EMPTY (1 << 15) 165 #define RX_BD_LEN(x) (((x) & 0xffff) << 16) 166 167 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ 168 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) 169 170 #define ETHOC_BUFSIZ 1536 171 #define ETHOC_ZLEN 64 172 #define ETHOC_BD_BASE 0x400 173 #define ETHOC_TIMEOUT (HZ / 2) 174 #define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) 175 176 /** 177 * struct ethoc - driver-private device structure 178 * @iobase: pointer to I/O memory region 179 * @membase: pointer to buffer memory region 180 * @dma_alloc: dma allocated buffer size 181 * @io_region_size: I/O memory region size 182 * @num_tx: number of send buffers 183 * @cur_tx: last send buffer written 184 * @dty_tx: last buffer actually sent 185 * @num_rx: number of receive buffers 186 * @cur_rx: current receive buffer 187 * @vma: pointer to array of virtual memory addresses for buffers 188 * @netdev: pointer to network device structure 189 * @napi: NAPI structure 190 * @msg_enable: device state flags 191 * @lock: device lock 192 * @phy: attached PHY 193 * @mdio: MDIO bus for PHY access 194 * @phy_id: address of attached PHY 195 */ 196 struct ethoc { 197 void __iomem *iobase; 198 void __iomem *membase; 199 int dma_alloc; 200 resource_size_t io_region_size; 201 202 unsigned int num_tx; 203 unsigned int cur_tx; 204 unsigned int dty_tx; 205 206 unsigned int num_rx; 207 unsigned int cur_rx; 208 209 void **vma; 210 211 struct net_device *netdev; 212 struct napi_struct napi; 213 u32 msg_enable; 214 215 spinlock_t lock; 216 217 struct phy_device *phy; 218 struct mii_bus *mdio; 219 s8 phy_id; 220 }; 221 222 /** 223 * struct ethoc_bd - buffer descriptor 224 * @stat: buffer statistics 225 * @addr: physical memory address 226 */ 227 struct ethoc_bd { 228 u32 stat; 229 u32 addr; 230 }; 231 232 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) 233 { 234 return ioread32(dev->iobase + offset); 235 } 236 237 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 238 { 239 iowrite32(data, dev->iobase + offset); 240 } 241 242 static inline void ethoc_read_bd(struct ethoc *dev, int index, 243 struct ethoc_bd *bd) 244 { 245 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 246 bd->stat = ethoc_read(dev, offset + 0); 247 bd->addr = ethoc_read(dev, offset + 4); 248 } 249 250 static inline void ethoc_write_bd(struct ethoc *dev, int index, 251 const struct ethoc_bd *bd) 252 { 253 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 254 ethoc_write(dev, offset + 0, bd->stat); 255 ethoc_write(dev, offset + 4, bd->addr); 256 } 257 258 static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) 259 { 260 u32 imask = ethoc_read(dev, INT_MASK); 261 imask |= mask; 262 ethoc_write(dev, INT_MASK, imask); 263 } 264 265 static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) 266 { 267 u32 imask = ethoc_read(dev, INT_MASK); 268 imask &= ~mask; 269 ethoc_write(dev, INT_MASK, imask); 270 } 271 272 static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) 273 { 274 ethoc_write(dev, INT_SOURCE, mask); 275 } 276 277 static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) 278 { 279 u32 mode = ethoc_read(dev, MODER); 280 mode |= MODER_RXEN | MODER_TXEN; 281 ethoc_write(dev, MODER, mode); 282 } 283 284 static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) 285 { 286 u32 mode = ethoc_read(dev, MODER); 287 mode &= ~(MODER_RXEN | MODER_TXEN); 288 ethoc_write(dev, MODER, mode); 289 } 290 291 static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) 292 { 293 struct ethoc_bd bd; 294 int i; 295 void *vma; 296 297 dev->cur_tx = 0; 298 dev->dty_tx = 0; 299 dev->cur_rx = 0; 300 301 ethoc_write(dev, TX_BD_NUM, dev->num_tx); 302 303 /* setup transmission buffers */ 304 bd.addr = mem_start; 305 bd.stat = TX_BD_IRQ | TX_BD_CRC; 306 vma = dev->membase; 307 308 for (i = 0; i < dev->num_tx; i++) { 309 if (i == dev->num_tx - 1) 310 bd.stat |= TX_BD_WRAP; 311 312 ethoc_write_bd(dev, i, &bd); 313 bd.addr += ETHOC_BUFSIZ; 314 315 dev->vma[i] = vma; 316 vma += ETHOC_BUFSIZ; 317 } 318 319 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 320 321 for (i = 0; i < dev->num_rx; i++) { 322 if (i == dev->num_rx - 1) 323 bd.stat |= RX_BD_WRAP; 324 325 ethoc_write_bd(dev, dev->num_tx + i, &bd); 326 bd.addr += ETHOC_BUFSIZ; 327 328 dev->vma[dev->num_tx + i] = vma; 329 vma += ETHOC_BUFSIZ; 330 } 331 332 return 0; 333 } 334 335 static int ethoc_reset(struct ethoc *dev) 336 { 337 u32 mode; 338 339 /* TODO: reset controller? */ 340 341 ethoc_disable_rx_and_tx(dev); 342 343 /* TODO: setup registers */ 344 345 /* enable FCS generation and automatic padding */ 346 mode = ethoc_read(dev, MODER); 347 mode |= MODER_CRC | MODER_PAD; 348 ethoc_write(dev, MODER, mode); 349 350 /* set full-duplex mode */ 351 mode = ethoc_read(dev, MODER); 352 mode |= MODER_FULLD; 353 ethoc_write(dev, MODER, mode); 354 ethoc_write(dev, IPGT, 0x15); 355 356 ethoc_ack_irq(dev, INT_MASK_ALL); 357 ethoc_enable_irq(dev, INT_MASK_ALL); 358 ethoc_enable_rx_and_tx(dev); 359 return 0; 360 } 361 362 static unsigned int ethoc_update_rx_stats(struct ethoc *dev, 363 struct ethoc_bd *bd) 364 { 365 struct net_device *netdev = dev->netdev; 366 unsigned int ret = 0; 367 368 if (bd->stat & RX_BD_TL) { 369 dev_err(&netdev->dev, "RX: frame too long\n"); 370 netdev->stats.rx_length_errors++; 371 ret++; 372 } 373 374 if (bd->stat & RX_BD_SF) { 375 dev_err(&netdev->dev, "RX: frame too short\n"); 376 netdev->stats.rx_length_errors++; 377 ret++; 378 } 379 380 if (bd->stat & RX_BD_DN) { 381 dev_err(&netdev->dev, "RX: dribble nibble\n"); 382 netdev->stats.rx_frame_errors++; 383 } 384 385 if (bd->stat & RX_BD_CRC) { 386 dev_err(&netdev->dev, "RX: wrong CRC\n"); 387 netdev->stats.rx_crc_errors++; 388 ret++; 389 } 390 391 if (bd->stat & RX_BD_OR) { 392 dev_err(&netdev->dev, "RX: overrun\n"); 393 netdev->stats.rx_over_errors++; 394 ret++; 395 } 396 397 if (bd->stat & RX_BD_MISS) 398 netdev->stats.rx_missed_errors++; 399 400 if (bd->stat & RX_BD_LC) { 401 dev_err(&netdev->dev, "RX: late collision\n"); 402 netdev->stats.collisions++; 403 ret++; 404 } 405 406 return ret; 407 } 408 409 static int ethoc_rx(struct net_device *dev, int limit) 410 { 411 struct ethoc *priv = netdev_priv(dev); 412 int count; 413 414 for (count = 0; count < limit; ++count) { 415 unsigned int entry; 416 struct ethoc_bd bd; 417 418 entry = priv->num_tx + priv->cur_rx; 419 ethoc_read_bd(priv, entry, &bd); 420 if (bd.stat & RX_BD_EMPTY) { 421 ethoc_ack_irq(priv, INT_MASK_RX); 422 /* If packet (interrupt) came in between checking 423 * BD_EMTPY and clearing the interrupt source, then we 424 * risk missing the packet as the RX interrupt won't 425 * trigger right away when we reenable it; hence, check 426 * BD_EMTPY here again to make sure there isn't such a 427 * packet waiting for us... 428 */ 429 ethoc_read_bd(priv, entry, &bd); 430 if (bd.stat & RX_BD_EMPTY) 431 break; 432 } 433 434 if (ethoc_update_rx_stats(priv, &bd) == 0) { 435 int size = bd.stat >> 16; 436 struct sk_buff *skb; 437 438 size -= 4; /* strip the CRC */ 439 skb = netdev_alloc_skb_ip_align(dev, size); 440 441 if (likely(skb)) { 442 void *src = priv->vma[entry]; 443 memcpy_fromio(skb_put(skb, size), src, size); 444 skb->protocol = eth_type_trans(skb, dev); 445 dev->stats.rx_packets++; 446 dev->stats.rx_bytes += size; 447 netif_receive_skb(skb); 448 } else { 449 if (net_ratelimit()) 450 dev_warn(&dev->dev, 451 "low on memory - packet dropped\n"); 452 453 dev->stats.rx_dropped++; 454 break; 455 } 456 } 457 458 /* clear the buffer descriptor so it can be reused */ 459 bd.stat &= ~RX_BD_STATS; 460 bd.stat |= RX_BD_EMPTY; 461 ethoc_write_bd(priv, entry, &bd); 462 if (++priv->cur_rx == priv->num_rx) 463 priv->cur_rx = 0; 464 } 465 466 return count; 467 } 468 469 static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 470 { 471 struct net_device *netdev = dev->netdev; 472 473 if (bd->stat & TX_BD_LC) { 474 dev_err(&netdev->dev, "TX: late collision\n"); 475 netdev->stats.tx_window_errors++; 476 } 477 478 if (bd->stat & TX_BD_RL) { 479 dev_err(&netdev->dev, "TX: retransmit limit\n"); 480 netdev->stats.tx_aborted_errors++; 481 } 482 483 if (bd->stat & TX_BD_UR) { 484 dev_err(&netdev->dev, "TX: underrun\n"); 485 netdev->stats.tx_fifo_errors++; 486 } 487 488 if (bd->stat & TX_BD_CS) { 489 dev_err(&netdev->dev, "TX: carrier sense lost\n"); 490 netdev->stats.tx_carrier_errors++; 491 } 492 493 if (bd->stat & TX_BD_STATS) 494 netdev->stats.tx_errors++; 495 496 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 497 netdev->stats.tx_bytes += bd->stat >> 16; 498 netdev->stats.tx_packets++; 499 } 500 501 static int ethoc_tx(struct net_device *dev, int limit) 502 { 503 struct ethoc *priv = netdev_priv(dev); 504 int count; 505 struct ethoc_bd bd; 506 507 for (count = 0; count < limit; ++count) { 508 unsigned int entry; 509 510 entry = priv->dty_tx & (priv->num_tx-1); 511 512 ethoc_read_bd(priv, entry, &bd); 513 514 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { 515 ethoc_ack_irq(priv, INT_MASK_TX); 516 /* If interrupt came in between reading in the BD 517 * and clearing the interrupt source, then we risk 518 * missing the event as the TX interrupt won't trigger 519 * right away when we reenable it; hence, check 520 * BD_EMPTY here again to make sure there isn't such an 521 * event pending... 522 */ 523 ethoc_read_bd(priv, entry, &bd); 524 if (bd.stat & TX_BD_READY || 525 (priv->dty_tx == priv->cur_tx)) 526 break; 527 } 528 529 ethoc_update_tx_stats(priv, &bd); 530 priv->dty_tx++; 531 } 532 533 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 534 netif_wake_queue(dev); 535 536 return count; 537 } 538 539 static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 540 { 541 struct net_device *dev = dev_id; 542 struct ethoc *priv = netdev_priv(dev); 543 u32 pending; 544 u32 mask; 545 546 /* Figure out what triggered the interrupt... 547 * The tricky bit here is that the interrupt source bits get 548 * set in INT_SOURCE for an event regardless of whether that 549 * event is masked or not. Thus, in order to figure out what 550 * triggered the interrupt, we need to remove the sources 551 * for all events that are currently masked. This behaviour 552 * is not particularly well documented but reasonable... 553 */ 554 mask = ethoc_read(priv, INT_MASK); 555 pending = ethoc_read(priv, INT_SOURCE); 556 pending &= mask; 557 558 if (unlikely(pending == 0)) 559 return IRQ_NONE; 560 561 ethoc_ack_irq(priv, pending); 562 563 /* We always handle the dropped packet interrupt */ 564 if (pending & INT_MASK_BUSY) { 565 dev_err(&dev->dev, "packet dropped\n"); 566 dev->stats.rx_dropped++; 567 } 568 569 /* Handle receive/transmit event by switching to polling */ 570 if (pending & (INT_MASK_TX | INT_MASK_RX)) { 571 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); 572 napi_schedule(&priv->napi); 573 } 574 575 return IRQ_HANDLED; 576 } 577 578 static int ethoc_get_mac_address(struct net_device *dev, void *addr) 579 { 580 struct ethoc *priv = netdev_priv(dev); 581 u8 *mac = (u8 *)addr; 582 u32 reg; 583 584 reg = ethoc_read(priv, MAC_ADDR0); 585 mac[2] = (reg >> 24) & 0xff; 586 mac[3] = (reg >> 16) & 0xff; 587 mac[4] = (reg >> 8) & 0xff; 588 mac[5] = (reg >> 0) & 0xff; 589 590 reg = ethoc_read(priv, MAC_ADDR1); 591 mac[0] = (reg >> 8) & 0xff; 592 mac[1] = (reg >> 0) & 0xff; 593 594 return 0; 595 } 596 597 static int ethoc_poll(struct napi_struct *napi, int budget) 598 { 599 struct ethoc *priv = container_of(napi, struct ethoc, napi); 600 int rx_work_done = 0; 601 int tx_work_done = 0; 602 603 rx_work_done = ethoc_rx(priv->netdev, budget); 604 tx_work_done = ethoc_tx(priv->netdev, budget); 605 606 if (rx_work_done < budget && tx_work_done < budget) { 607 napi_complete(napi); 608 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); 609 } 610 611 return rx_work_done; 612 } 613 614 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 615 { 616 struct ethoc *priv = bus->priv; 617 int i; 618 619 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 620 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 621 622 for (i = 0; i < 5; i++) { 623 u32 status = ethoc_read(priv, MIISTATUS); 624 if (!(status & MIISTATUS_BUSY)) { 625 u32 data = ethoc_read(priv, MIIRX_DATA); 626 /* reset MII command register */ 627 ethoc_write(priv, MIICOMMAND, 0); 628 return data; 629 } 630 usleep_range(100, 200); 631 } 632 633 return -EBUSY; 634 } 635 636 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 637 { 638 struct ethoc *priv = bus->priv; 639 int i; 640 641 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 642 ethoc_write(priv, MIITX_DATA, val); 643 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 644 645 for (i = 0; i < 5; i++) { 646 u32 stat = ethoc_read(priv, MIISTATUS); 647 if (!(stat & MIISTATUS_BUSY)) { 648 /* reset MII command register */ 649 ethoc_write(priv, MIICOMMAND, 0); 650 return 0; 651 } 652 usleep_range(100, 200); 653 } 654 655 return -EBUSY; 656 } 657 658 static int ethoc_mdio_reset(struct mii_bus *bus) 659 { 660 return 0; 661 } 662 663 static void ethoc_mdio_poll(struct net_device *dev) 664 { 665 } 666 667 static int ethoc_mdio_probe(struct net_device *dev) 668 { 669 struct ethoc *priv = netdev_priv(dev); 670 struct phy_device *phy; 671 int err; 672 673 if (priv->phy_id != -1) 674 phy = priv->mdio->phy_map[priv->phy_id]; 675 else 676 phy = phy_find_first(priv->mdio); 677 678 if (!phy) { 679 dev_err(&dev->dev, "no PHY found\n"); 680 return -ENXIO; 681 } 682 683 err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 684 PHY_INTERFACE_MODE_GMII); 685 if (err) { 686 dev_err(&dev->dev, "could not attach to PHY\n"); 687 return err; 688 } 689 690 priv->phy = phy; 691 return 0; 692 } 693 694 static int ethoc_open(struct net_device *dev) 695 { 696 struct ethoc *priv = netdev_priv(dev); 697 int ret; 698 699 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, 700 dev->name, dev); 701 if (ret) 702 return ret; 703 704 ethoc_init_ring(priv, dev->mem_start); 705 ethoc_reset(priv); 706 707 if (netif_queue_stopped(dev)) { 708 dev_dbg(&dev->dev, " resuming queue\n"); 709 netif_wake_queue(dev); 710 } else { 711 dev_dbg(&dev->dev, " starting queue\n"); 712 netif_start_queue(dev); 713 } 714 715 phy_start(priv->phy); 716 napi_enable(&priv->napi); 717 718 if (netif_msg_ifup(priv)) { 719 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", 720 dev->base_addr, dev->mem_start, dev->mem_end); 721 } 722 723 return 0; 724 } 725 726 static int ethoc_stop(struct net_device *dev) 727 { 728 struct ethoc *priv = netdev_priv(dev); 729 730 napi_disable(&priv->napi); 731 732 if (priv->phy) 733 phy_stop(priv->phy); 734 735 ethoc_disable_rx_and_tx(priv); 736 free_irq(dev->irq, dev); 737 738 if (!netif_queue_stopped(dev)) 739 netif_stop_queue(dev); 740 741 return 0; 742 } 743 744 static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 745 { 746 struct ethoc *priv = netdev_priv(dev); 747 struct mii_ioctl_data *mdio = if_mii(ifr); 748 struct phy_device *phy = NULL; 749 750 if (!netif_running(dev)) 751 return -EINVAL; 752 753 if (cmd != SIOCGMIIPHY) { 754 if (mdio->phy_id >= PHY_MAX_ADDR) 755 return -ERANGE; 756 757 phy = priv->mdio->phy_map[mdio->phy_id]; 758 if (!phy) 759 return -ENODEV; 760 } else { 761 phy = priv->phy; 762 } 763 764 return phy_mii_ioctl(phy, ifr, cmd); 765 } 766 767 static int ethoc_config(struct net_device *dev, struct ifmap *map) 768 { 769 return -ENOSYS; 770 } 771 772 static void ethoc_do_set_mac_address(struct net_device *dev) 773 { 774 struct ethoc *priv = netdev_priv(dev); 775 unsigned char *mac = dev->dev_addr; 776 777 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | 778 (mac[4] << 8) | (mac[5] << 0)); 779 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); 780 } 781 782 static int ethoc_set_mac_address(struct net_device *dev, void *p) 783 { 784 const struct sockaddr *addr = p; 785 786 if (!is_valid_ether_addr(addr->sa_data)) 787 return -EADDRNOTAVAIL; 788 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 789 ethoc_do_set_mac_address(dev); 790 return 0; 791 } 792 793 static void ethoc_set_multicast_list(struct net_device *dev) 794 { 795 struct ethoc *priv = netdev_priv(dev); 796 u32 mode = ethoc_read(priv, MODER); 797 struct netdev_hw_addr *ha; 798 u32 hash[2] = { 0, 0 }; 799 800 /* set loopback mode if requested */ 801 if (dev->flags & IFF_LOOPBACK) 802 mode |= MODER_LOOP; 803 else 804 mode &= ~MODER_LOOP; 805 806 /* receive broadcast frames if requested */ 807 if (dev->flags & IFF_BROADCAST) 808 mode &= ~MODER_BRO; 809 else 810 mode |= MODER_BRO; 811 812 /* enable promiscuous mode if requested */ 813 if (dev->flags & IFF_PROMISC) 814 mode |= MODER_PRO; 815 else 816 mode &= ~MODER_PRO; 817 818 ethoc_write(priv, MODER, mode); 819 820 /* receive multicast frames */ 821 if (dev->flags & IFF_ALLMULTI) { 822 hash[0] = 0xffffffff; 823 hash[1] = 0xffffffff; 824 } else { 825 netdev_for_each_mc_addr(ha, dev) { 826 u32 crc = ether_crc(ETH_ALEN, ha->addr); 827 int bit = (crc >> 26) & 0x3f; 828 hash[bit >> 5] |= 1 << (bit & 0x1f); 829 } 830 } 831 832 ethoc_write(priv, ETH_HASH0, hash[0]); 833 ethoc_write(priv, ETH_HASH1, hash[1]); 834 } 835 836 static int ethoc_change_mtu(struct net_device *dev, int new_mtu) 837 { 838 return -ENOSYS; 839 } 840 841 static void ethoc_tx_timeout(struct net_device *dev) 842 { 843 struct ethoc *priv = netdev_priv(dev); 844 u32 pending = ethoc_read(priv, INT_SOURCE); 845 if (likely(pending)) 846 ethoc_interrupt(dev->irq, dev); 847 } 848 849 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 850 { 851 struct ethoc *priv = netdev_priv(dev); 852 struct ethoc_bd bd; 853 unsigned int entry; 854 void *dest; 855 856 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 857 dev->stats.tx_errors++; 858 goto out; 859 } 860 861 entry = priv->cur_tx % priv->num_tx; 862 spin_lock_irq(&priv->lock); 863 priv->cur_tx++; 864 865 ethoc_read_bd(priv, entry, &bd); 866 if (unlikely(skb->len < ETHOC_ZLEN)) 867 bd.stat |= TX_BD_PAD; 868 else 869 bd.stat &= ~TX_BD_PAD; 870 871 dest = priv->vma[entry]; 872 memcpy_toio(dest, skb->data, skb->len); 873 874 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 875 bd.stat |= TX_BD_LEN(skb->len); 876 ethoc_write_bd(priv, entry, &bd); 877 878 bd.stat |= TX_BD_READY; 879 ethoc_write_bd(priv, entry, &bd); 880 881 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { 882 dev_dbg(&dev->dev, "stopping queue\n"); 883 netif_stop_queue(dev); 884 } 885 886 spin_unlock_irq(&priv->lock); 887 skb_tx_timestamp(skb); 888 out: 889 dev_kfree_skb(skb); 890 return NETDEV_TX_OK; 891 } 892 893 static const struct net_device_ops ethoc_netdev_ops = { 894 .ndo_open = ethoc_open, 895 .ndo_stop = ethoc_stop, 896 .ndo_do_ioctl = ethoc_ioctl, 897 .ndo_set_config = ethoc_config, 898 .ndo_set_mac_address = ethoc_set_mac_address, 899 .ndo_set_rx_mode = ethoc_set_multicast_list, 900 .ndo_change_mtu = ethoc_change_mtu, 901 .ndo_tx_timeout = ethoc_tx_timeout, 902 .ndo_start_xmit = ethoc_start_xmit, 903 }; 904 905 /** 906 * ethoc_probe - initialize OpenCores ethernet MAC 907 * pdev: platform device 908 */ 909 static int ethoc_probe(struct platform_device *pdev) 910 { 911 struct net_device *netdev = NULL; 912 struct resource *res = NULL; 913 struct resource *mmio = NULL; 914 struct resource *mem = NULL; 915 struct ethoc *priv = NULL; 916 unsigned int phy; 917 int num_bd; 918 int ret = 0; 919 bool random_mac = false; 920 921 /* allocate networking device */ 922 netdev = alloc_etherdev(sizeof(struct ethoc)); 923 if (!netdev) { 924 ret = -ENOMEM; 925 goto out; 926 } 927 928 SET_NETDEV_DEV(netdev, &pdev->dev); 929 platform_set_drvdata(pdev, netdev); 930 931 /* obtain I/O memory space */ 932 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 933 if (!res) { 934 dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); 935 ret = -ENXIO; 936 goto free; 937 } 938 939 mmio = devm_request_mem_region(&pdev->dev, res->start, 940 resource_size(res), res->name); 941 if (!mmio) { 942 dev_err(&pdev->dev, "cannot request I/O memory space\n"); 943 ret = -ENXIO; 944 goto free; 945 } 946 947 netdev->base_addr = mmio->start; 948 949 /* obtain buffer memory space */ 950 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 951 if (res) { 952 mem = devm_request_mem_region(&pdev->dev, res->start, 953 resource_size(res), res->name); 954 if (!mem) { 955 dev_err(&pdev->dev, "cannot request memory space\n"); 956 ret = -ENXIO; 957 goto free; 958 } 959 960 netdev->mem_start = mem->start; 961 netdev->mem_end = mem->end; 962 } 963 964 965 /* obtain device IRQ number */ 966 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 967 if (!res) { 968 dev_err(&pdev->dev, "cannot obtain IRQ\n"); 969 ret = -ENXIO; 970 goto free; 971 } 972 973 netdev->irq = res->start; 974 975 /* setup driver-private data */ 976 priv = netdev_priv(netdev); 977 priv->netdev = netdev; 978 priv->dma_alloc = 0; 979 priv->io_region_size = resource_size(mmio); 980 981 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 982 resource_size(mmio)); 983 if (!priv->iobase) { 984 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 985 ret = -ENXIO; 986 goto error; 987 } 988 989 if (netdev->mem_end) { 990 priv->membase = devm_ioremap_nocache(&pdev->dev, 991 netdev->mem_start, resource_size(mem)); 992 if (!priv->membase) { 993 dev_err(&pdev->dev, "cannot remap memory space\n"); 994 ret = -ENXIO; 995 goto error; 996 } 997 } else { 998 /* Allocate buffer memory */ 999 priv->membase = dmam_alloc_coherent(&pdev->dev, 1000 buffer_size, (void *)&netdev->mem_start, 1001 GFP_KERNEL); 1002 if (!priv->membase) { 1003 dev_err(&pdev->dev, "cannot allocate %dB buffer\n", 1004 buffer_size); 1005 ret = -ENOMEM; 1006 goto error; 1007 } 1008 netdev->mem_end = netdev->mem_start + buffer_size; 1009 priv->dma_alloc = buffer_size; 1010 } 1011 1012 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1013 num_bd = min_t(unsigned int, 1014 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1015 if (num_bd < 4) { 1016 ret = -ENODEV; 1017 goto error; 1018 } 1019 /* num_tx must be a power of two */ 1020 priv->num_tx = rounddown_pow_of_two(num_bd >> 1); 1021 priv->num_rx = num_bd - priv->num_tx; 1022 1023 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", 1024 priv->num_tx, priv->num_rx); 1025 1026 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); 1027 if (!priv->vma) { 1028 ret = -ENOMEM; 1029 goto error; 1030 } 1031 1032 /* Allow the platform setup code to pass in a MAC address. */ 1033 if (dev_get_platdata(&pdev->dev)) { 1034 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); 1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1036 priv->phy_id = pdata->phy_id; 1037 } else { 1038 priv->phy_id = -1; 1039 1040 #ifdef CONFIG_OF 1041 { 1042 const uint8_t *mac; 1043 1044 mac = of_get_property(pdev->dev.of_node, 1045 "local-mac-address", 1046 NULL); 1047 if (mac) 1048 memcpy(netdev->dev_addr, mac, IFHWADDRLEN); 1049 } 1050 #endif 1051 } 1052 1053 /* Check that the given MAC address is valid. If it isn't, read the 1054 * current MAC from the controller. 1055 */ 1056 if (!is_valid_ether_addr(netdev->dev_addr)) 1057 ethoc_get_mac_address(netdev, netdev->dev_addr); 1058 1059 /* Check the MAC again for validity, if it still isn't choose and 1060 * program a random one. 1061 */ 1062 if (!is_valid_ether_addr(netdev->dev_addr)) { 1063 eth_random_addr(netdev->dev_addr); 1064 random_mac = true; 1065 } 1066 1067 ethoc_do_set_mac_address(netdev); 1068 1069 if (random_mac) 1070 netdev->addr_assign_type = NET_ADDR_RANDOM; 1071 1072 /* register MII bus */ 1073 priv->mdio = mdiobus_alloc(); 1074 if (!priv->mdio) { 1075 ret = -ENOMEM; 1076 goto free; 1077 } 1078 1079 priv->mdio->name = "ethoc-mdio"; 1080 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", 1081 priv->mdio->name, pdev->id); 1082 priv->mdio->read = ethoc_mdio_read; 1083 priv->mdio->write = ethoc_mdio_write; 1084 priv->mdio->reset = ethoc_mdio_reset; 1085 priv->mdio->priv = priv; 1086 1087 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1088 if (!priv->mdio->irq) { 1089 ret = -ENOMEM; 1090 goto free_mdio; 1091 } 1092 1093 for (phy = 0; phy < PHY_MAX_ADDR; phy++) 1094 priv->mdio->irq[phy] = PHY_POLL; 1095 1096 ret = mdiobus_register(priv->mdio); 1097 if (ret) { 1098 dev_err(&netdev->dev, "failed to register MDIO bus\n"); 1099 goto free_mdio; 1100 } 1101 1102 ret = ethoc_mdio_probe(netdev); 1103 if (ret) { 1104 dev_err(&netdev->dev, "failed to probe MDIO bus\n"); 1105 goto error; 1106 } 1107 1108 ether_setup(netdev); 1109 1110 /* setup the net_device structure */ 1111 netdev->netdev_ops = ðoc_netdev_ops; 1112 netdev->watchdog_timeo = ETHOC_TIMEOUT; 1113 netdev->features |= 0; 1114 1115 /* setup NAPI */ 1116 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1117 1118 spin_lock_init(&priv->lock); 1119 1120 ret = register_netdev(netdev); 1121 if (ret < 0) { 1122 dev_err(&netdev->dev, "failed to register interface\n"); 1123 goto error2; 1124 } 1125 1126 goto out; 1127 1128 error2: 1129 netif_napi_del(&priv->napi); 1130 error: 1131 mdiobus_unregister(priv->mdio); 1132 free_mdio: 1133 kfree(priv->mdio->irq); 1134 mdiobus_free(priv->mdio); 1135 free: 1136 free_netdev(netdev); 1137 out: 1138 return ret; 1139 } 1140 1141 /** 1142 * ethoc_remove - shutdown OpenCores ethernet MAC 1143 * @pdev: platform device 1144 */ 1145 static int ethoc_remove(struct platform_device *pdev) 1146 { 1147 struct net_device *netdev = platform_get_drvdata(pdev); 1148 struct ethoc *priv = netdev_priv(netdev); 1149 1150 if (netdev) { 1151 netif_napi_del(&priv->napi); 1152 phy_disconnect(priv->phy); 1153 priv->phy = NULL; 1154 1155 if (priv->mdio) { 1156 mdiobus_unregister(priv->mdio); 1157 kfree(priv->mdio->irq); 1158 mdiobus_free(priv->mdio); 1159 } 1160 unregister_netdev(netdev); 1161 free_netdev(netdev); 1162 } 1163 1164 return 0; 1165 } 1166 1167 #ifdef CONFIG_PM 1168 static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) 1169 { 1170 return -ENOSYS; 1171 } 1172 1173 static int ethoc_resume(struct platform_device *pdev) 1174 { 1175 return -ENOSYS; 1176 } 1177 #else 1178 # define ethoc_suspend NULL 1179 # define ethoc_resume NULL 1180 #endif 1181 1182 static struct of_device_id ethoc_match[] = { 1183 { .compatible = "opencores,ethoc", }, 1184 {}, 1185 }; 1186 MODULE_DEVICE_TABLE(of, ethoc_match); 1187 1188 static struct platform_driver ethoc_driver = { 1189 .probe = ethoc_probe, 1190 .remove = ethoc_remove, 1191 .suspend = ethoc_suspend, 1192 .resume = ethoc_resume, 1193 .driver = { 1194 .name = "ethoc", 1195 .owner = THIS_MODULE, 1196 .of_match_table = ethoc_match, 1197 }, 1198 }; 1199 1200 module_platform_driver(ethoc_driver); 1201 1202 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1203 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); 1204 MODULE_LICENSE("GPL v2"); 1205 1206