1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * EP93xx ethernet network device driver 4 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 5 * Dedicated to Marija Kulikova. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/netdevice.h> 14 #include <linux/mii.h> 15 #include <linux/etherdevice.h> 16 #include <linux/ethtool.h> 17 #include <linux/interrupt.h> 18 #include <linux/moduleparam.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 #include <linux/delay.h> 22 #include <linux/io.h> 23 #include <linux/slab.h> 24 25 #define DRV_MODULE_NAME "ep93xx-eth" 26 27 #define RX_QUEUE_ENTRIES 64 28 #define TX_QUEUE_ENTRIES 8 29 30 #define MAX_PKT_SIZE 2044 31 #define PKT_BUF_SIZE 2048 32 33 #define REG_RXCTL 0x0000 34 #define REG_RXCTL_DEFAULT 0x00073800 35 #define REG_TXCTL 0x0004 36 #define REG_TXCTL_ENABLE 0x00000001 37 #define REG_MIICMD 0x0010 38 #define REG_MIICMD_READ 0x00008000 39 #define REG_MIICMD_WRITE 0x00004000 40 #define REG_MIIDATA 0x0014 41 #define REG_MIISTS 0x0018 42 #define REG_MIISTS_BUSY 0x00000001 43 #define REG_SELFCTL 0x0020 44 #define REG_SELFCTL_RESET 0x00000001 45 #define REG_INTEN 0x0024 46 #define REG_INTEN_TX 0x00000008 47 #define REG_INTEN_RX 0x00000007 48 #define REG_INTSTSP 0x0028 49 #define REG_INTSTS_TX 0x00000008 50 #define REG_INTSTS_RX 0x00000004 51 #define REG_INTSTSC 0x002c 52 #define REG_AFP 0x004c 53 #define REG_INDAD0 0x0050 54 #define REG_INDAD1 0x0051 55 #define REG_INDAD2 0x0052 56 #define REG_INDAD3 0x0053 57 #define REG_INDAD4 0x0054 58 #define REG_INDAD5 0x0055 59 #define REG_GIINTMSK 0x0064 60 #define REG_GIINTMSK_ENABLE 0x00008000 61 #define REG_BMCTL 0x0080 62 #define REG_BMCTL_ENABLE_TX 0x00000100 63 #define REG_BMCTL_ENABLE_RX 0x00000001 64 #define REG_BMSTS 0x0084 65 #define REG_BMSTS_RX_ACTIVE 0x00000008 66 #define REG_RXDQBADD 0x0090 67 #define REG_RXDQBLEN 0x0094 68 #define REG_RXDCURADD 0x0098 69 #define REG_RXDENQ 0x009c 70 #define REG_RXSTSQBADD 0x00a0 71 #define REG_RXSTSQBLEN 0x00a4 72 #define REG_RXSTSQCURADD 0x00a8 73 #define REG_RXSTSENQ 0x00ac 74 #define REG_TXDQBADD 0x00b0 75 #define REG_TXDQBLEN 0x00b4 76 #define REG_TXDQCURADD 0x00b8 77 #define REG_TXDENQ 0x00bc 78 #define REG_TXSTSQBADD 0x00c0 79 #define REG_TXSTSQBLEN 0x00c4 80 #define REG_TXSTSQCURADD 0x00c8 81 #define REG_MAXFRMLEN 0x00e8 82 83 struct ep93xx_rdesc 84 { 85 u32 buf_addr; 86 u32 rdesc1; 87 }; 88 89 #define RDESC1_NSOF 0x80000000 90 #define RDESC1_BUFFER_INDEX 0x7fff0000 91 #define RDESC1_BUFFER_LENGTH 0x0000ffff 92 93 struct ep93xx_rstat 94 { 95 u32 rstat0; 96 u32 rstat1; 97 }; 98 99 #define RSTAT0_RFP 0x80000000 100 #define RSTAT0_RWE 0x40000000 101 #define RSTAT0_EOF 0x20000000 102 #define RSTAT0_EOB 0x10000000 103 #define RSTAT0_AM 0x00c00000 104 #define RSTAT0_RX_ERR 0x00200000 105 #define RSTAT0_OE 0x00100000 106 #define RSTAT0_FE 0x00080000 107 #define RSTAT0_RUNT 0x00040000 108 #define RSTAT0_EDATA 0x00020000 109 #define RSTAT0_CRCE 0x00010000 110 #define RSTAT0_CRCI 0x00008000 111 #define RSTAT0_HTI 0x00003f00 112 #define RSTAT1_RFP 0x80000000 113 #define RSTAT1_BUFFER_INDEX 0x7fff0000 114 #define RSTAT1_FRAME_LENGTH 0x0000ffff 115 116 struct ep93xx_tdesc 117 { 118 u32 buf_addr; 119 u32 tdesc1; 120 }; 121 122 #define TDESC1_EOF 0x80000000 123 #define TDESC1_BUFFER_INDEX 0x7fff0000 124 #define TDESC1_BUFFER_ABORT 0x00008000 125 #define TDESC1_BUFFER_LENGTH 0x00000fff 126 127 struct ep93xx_tstat 128 { 129 u32 tstat0; 130 }; 131 132 #define TSTAT0_TXFP 0x80000000 133 #define TSTAT0_TXWE 0x40000000 134 #define TSTAT0_FA 0x20000000 135 #define TSTAT0_LCRS 0x10000000 136 #define TSTAT0_OW 0x04000000 137 #define TSTAT0_TXU 0x02000000 138 #define TSTAT0_ECOLL 0x01000000 139 #define TSTAT0_NCOLL 0x001f0000 140 #define TSTAT0_BUFFER_INDEX 0x00007fff 141 142 struct ep93xx_descs 143 { 144 struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; 145 struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; 146 struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; 147 struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; 148 }; 149 150 struct ep93xx_priv 151 { 152 struct resource *res; 153 void __iomem *base_addr; 154 int irq; 155 156 struct ep93xx_descs *descs; 157 dma_addr_t descs_dma_addr; 158 159 void *rx_buf[RX_QUEUE_ENTRIES]; 160 void *tx_buf[TX_QUEUE_ENTRIES]; 161 162 spinlock_t rx_lock; 163 unsigned int rx_pointer; 164 unsigned int tx_clean_pointer; 165 unsigned int tx_pointer; 166 spinlock_t tx_pending_lock; 167 unsigned int tx_pending; 168 169 struct net_device *dev; 170 struct napi_struct napi; 171 172 struct mii_if_info mii; 173 u8 mdc_divisor; 174 }; 175 176 #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) 177 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) 178 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) 179 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) 180 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) 181 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) 182 183 static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) 184 { 185 struct ep93xx_priv *ep = netdev_priv(dev); 186 int data; 187 int i; 188 189 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); 190 191 for (i = 0; i < 10; i++) { 192 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) 193 break; 194 msleep(1); 195 } 196 197 if (i == 10) { 198 pr_info("mdio read timed out\n"); 199 data = 0xffff; 200 } else { 201 data = rdl(ep, REG_MIIDATA); 202 } 203 204 return data; 205 } 206 207 static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) 208 { 209 struct ep93xx_priv *ep = netdev_priv(dev); 210 int i; 211 212 wrl(ep, REG_MIIDATA, data); 213 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); 214 215 for (i = 0; i < 10; i++) { 216 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) 217 break; 218 msleep(1); 219 } 220 221 if (i == 10) 222 pr_info("mdio write timed out\n"); 223 } 224 225 static int ep93xx_rx(struct net_device *dev, int budget) 226 { 227 struct ep93xx_priv *ep = netdev_priv(dev); 228 int processed = 0; 229 230 while (processed < budget) { 231 int entry; 232 struct ep93xx_rstat *rstat; 233 u32 rstat0; 234 u32 rstat1; 235 int length; 236 struct sk_buff *skb; 237 238 entry = ep->rx_pointer; 239 rstat = ep->descs->rstat + entry; 240 241 rstat0 = rstat->rstat0; 242 rstat1 = rstat->rstat1; 243 if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) 244 break; 245 246 rstat->rstat0 = 0; 247 rstat->rstat1 = 0; 248 249 if (!(rstat0 & RSTAT0_EOF)) 250 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1); 251 if (!(rstat0 & RSTAT0_EOB)) 252 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1); 253 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) 254 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); 255 256 if (!(rstat0 & RSTAT0_RWE)) { 257 dev->stats.rx_errors++; 258 if (rstat0 & RSTAT0_OE) 259 dev->stats.rx_fifo_errors++; 260 if (rstat0 & RSTAT0_FE) 261 dev->stats.rx_frame_errors++; 262 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) 263 dev->stats.rx_length_errors++; 264 if (rstat0 & RSTAT0_CRCE) 265 dev->stats.rx_crc_errors++; 266 goto err; 267 } 268 269 length = rstat1 & RSTAT1_FRAME_LENGTH; 270 if (length > MAX_PKT_SIZE) { 271 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1); 272 goto err; 273 } 274 275 /* Strip FCS. */ 276 if (rstat0 & RSTAT0_CRCI) 277 length -= 4; 278 279 skb = netdev_alloc_skb(dev, length + 2); 280 if (likely(skb != NULL)) { 281 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; 282 skb_reserve(skb, 2); 283 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, 284 length, DMA_FROM_DEVICE); 285 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 286 dma_sync_single_for_device(dev->dev.parent, 287 rxd->buf_addr, length, 288 DMA_FROM_DEVICE); 289 skb_put(skb, length); 290 skb->protocol = eth_type_trans(skb, dev); 291 292 napi_gro_receive(&ep->napi, skb); 293 294 dev->stats.rx_packets++; 295 dev->stats.rx_bytes += length; 296 } else { 297 dev->stats.rx_dropped++; 298 } 299 300 err: 301 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); 302 processed++; 303 } 304 305 return processed; 306 } 307 308 static int ep93xx_poll(struct napi_struct *napi, int budget) 309 { 310 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); 311 struct net_device *dev = ep->dev; 312 int rx; 313 314 rx = ep93xx_rx(dev, budget); 315 if (rx < budget && napi_complete_done(napi, rx)) { 316 spin_lock_irq(&ep->rx_lock); 317 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 318 spin_unlock_irq(&ep->rx_lock); 319 } 320 321 if (rx) { 322 wrw(ep, REG_RXDENQ, rx); 323 wrw(ep, REG_RXSTSENQ, rx); 324 } 325 326 return rx; 327 } 328 329 static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 330 { 331 struct ep93xx_priv *ep = netdev_priv(dev); 332 struct ep93xx_tdesc *txd; 333 int entry; 334 335 if (unlikely(skb->len > MAX_PKT_SIZE)) { 336 dev->stats.tx_dropped++; 337 dev_kfree_skb(skb); 338 return NETDEV_TX_OK; 339 } 340 341 entry = ep->tx_pointer; 342 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 343 344 txd = &ep->descs->tdesc[entry]; 345 346 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 347 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, 348 DMA_TO_DEVICE); 349 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 350 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, 351 DMA_TO_DEVICE); 352 dev_kfree_skb(skb); 353 354 spin_lock_irq(&ep->tx_pending_lock); 355 ep->tx_pending++; 356 if (ep->tx_pending == TX_QUEUE_ENTRIES) 357 netif_stop_queue(dev); 358 spin_unlock_irq(&ep->tx_pending_lock); 359 360 wrl(ep, REG_TXDENQ, 1); 361 362 return NETDEV_TX_OK; 363 } 364 365 static void ep93xx_tx_complete(struct net_device *dev) 366 { 367 struct ep93xx_priv *ep = netdev_priv(dev); 368 int wake; 369 370 wake = 0; 371 372 spin_lock(&ep->tx_pending_lock); 373 while (1) { 374 int entry; 375 struct ep93xx_tstat *tstat; 376 u32 tstat0; 377 378 entry = ep->tx_clean_pointer; 379 tstat = ep->descs->tstat + entry; 380 381 tstat0 = tstat->tstat0; 382 if (!(tstat0 & TSTAT0_TXFP)) 383 break; 384 385 tstat->tstat0 = 0; 386 387 if (tstat0 & TSTAT0_FA) 388 pr_crit("frame aborted %.8x\n", tstat0); 389 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) 390 pr_crit("entry mismatch %.8x\n", tstat0); 391 392 if (tstat0 & TSTAT0_TXWE) { 393 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 394 395 dev->stats.tx_packets++; 396 dev->stats.tx_bytes += length; 397 } else { 398 dev->stats.tx_errors++; 399 } 400 401 if (tstat0 & TSTAT0_OW) 402 dev->stats.tx_window_errors++; 403 if (tstat0 & TSTAT0_TXU) 404 dev->stats.tx_fifo_errors++; 405 dev->stats.collisions += (tstat0 >> 16) & 0x1f; 406 407 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); 408 if (ep->tx_pending == TX_QUEUE_ENTRIES) 409 wake = 1; 410 ep->tx_pending--; 411 } 412 spin_unlock(&ep->tx_pending_lock); 413 414 if (wake) 415 netif_wake_queue(dev); 416 } 417 418 static irqreturn_t ep93xx_irq(int irq, void *dev_id) 419 { 420 struct net_device *dev = dev_id; 421 struct ep93xx_priv *ep = netdev_priv(dev); 422 u32 status; 423 424 status = rdl(ep, REG_INTSTSC); 425 if (status == 0) 426 return IRQ_NONE; 427 428 if (status & REG_INTSTS_RX) { 429 spin_lock(&ep->rx_lock); 430 if (likely(napi_schedule_prep(&ep->napi))) { 431 wrl(ep, REG_INTEN, REG_INTEN_TX); 432 __napi_schedule(&ep->napi); 433 } 434 spin_unlock(&ep->rx_lock); 435 } 436 437 if (status & REG_INTSTS_TX) 438 ep93xx_tx_complete(dev); 439 440 return IRQ_HANDLED; 441 } 442 443 static void ep93xx_free_buffers(struct ep93xx_priv *ep) 444 { 445 struct device *dev = ep->dev->dev.parent; 446 int i; 447 448 if (!ep->descs) 449 return; 450 451 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 452 dma_addr_t d; 453 454 d = ep->descs->rdesc[i].buf_addr; 455 if (d) 456 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); 457 458 kfree(ep->rx_buf[i]); 459 } 460 461 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 462 dma_addr_t d; 463 464 d = ep->descs->tdesc[i].buf_addr; 465 if (d) 466 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); 467 468 kfree(ep->tx_buf[i]); 469 } 470 471 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, 472 ep->descs_dma_addr); 473 ep->descs = NULL; 474 } 475 476 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 477 { 478 struct device *dev = ep->dev->dev.parent; 479 int i; 480 481 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), 482 &ep->descs_dma_addr, GFP_KERNEL); 483 if (ep->descs == NULL) 484 return 1; 485 486 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 487 void *buf; 488 dma_addr_t d; 489 490 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); 491 if (buf == NULL) 492 goto err; 493 494 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); 495 if (dma_mapping_error(dev, d)) { 496 kfree(buf); 497 goto err; 498 } 499 500 ep->rx_buf[i] = buf; 501 ep->descs->rdesc[i].buf_addr = d; 502 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 503 } 504 505 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 506 void *buf; 507 dma_addr_t d; 508 509 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); 510 if (buf == NULL) 511 goto err; 512 513 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); 514 if (dma_mapping_error(dev, d)) { 515 kfree(buf); 516 goto err; 517 } 518 519 ep->tx_buf[i] = buf; 520 ep->descs->tdesc[i].buf_addr = d; 521 } 522 523 return 0; 524 525 err: 526 ep93xx_free_buffers(ep); 527 return 1; 528 } 529 530 static int ep93xx_start_hw(struct net_device *dev) 531 { 532 struct ep93xx_priv *ep = netdev_priv(dev); 533 unsigned long addr; 534 int i; 535 536 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); 537 for (i = 0; i < 10; i++) { 538 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) 539 break; 540 msleep(1); 541 } 542 543 if (i == 10) { 544 pr_crit("hw failed to reset\n"); 545 return 1; 546 } 547 548 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); 549 550 /* Does the PHY support preamble suppress? */ 551 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) 552 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); 553 554 /* Receive descriptor ring. */ 555 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); 556 wrl(ep, REG_RXDQBADD, addr); 557 wrl(ep, REG_RXDCURADD, addr); 558 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); 559 560 /* Receive status ring. */ 561 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); 562 wrl(ep, REG_RXSTSQBADD, addr); 563 wrl(ep, REG_RXSTSQCURADD, addr); 564 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); 565 566 /* Transmit descriptor ring. */ 567 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); 568 wrl(ep, REG_TXDQBADD, addr); 569 wrl(ep, REG_TXDQCURADD, addr); 570 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); 571 572 /* Transmit status ring. */ 573 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); 574 wrl(ep, REG_TXSTSQBADD, addr); 575 wrl(ep, REG_TXSTSQCURADD, addr); 576 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); 577 578 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); 579 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 580 wrl(ep, REG_GIINTMSK, 0); 581 582 for (i = 0; i < 10; i++) { 583 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) 584 break; 585 msleep(1); 586 } 587 588 if (i == 10) { 589 pr_crit("hw failed to start\n"); 590 return 1; 591 } 592 593 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); 594 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); 595 596 wrb(ep, REG_INDAD0, dev->dev_addr[0]); 597 wrb(ep, REG_INDAD1, dev->dev_addr[1]); 598 wrb(ep, REG_INDAD2, dev->dev_addr[2]); 599 wrb(ep, REG_INDAD3, dev->dev_addr[3]); 600 wrb(ep, REG_INDAD4, dev->dev_addr[4]); 601 wrb(ep, REG_INDAD5, dev->dev_addr[5]); 602 wrl(ep, REG_AFP, 0); 603 604 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); 605 606 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); 607 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); 608 609 return 0; 610 } 611 612 static void ep93xx_stop_hw(struct net_device *dev) 613 { 614 struct ep93xx_priv *ep = netdev_priv(dev); 615 int i; 616 617 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); 618 for (i = 0; i < 10; i++) { 619 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) 620 break; 621 msleep(1); 622 } 623 624 if (i == 10) 625 pr_crit("hw failed to reset\n"); 626 } 627 628 static int ep93xx_open(struct net_device *dev) 629 { 630 struct ep93xx_priv *ep = netdev_priv(dev); 631 int err; 632 633 if (ep93xx_alloc_buffers(ep)) 634 return -ENOMEM; 635 636 napi_enable(&ep->napi); 637 638 if (ep93xx_start_hw(dev)) { 639 napi_disable(&ep->napi); 640 ep93xx_free_buffers(ep); 641 return -EIO; 642 } 643 644 spin_lock_init(&ep->rx_lock); 645 ep->rx_pointer = 0; 646 ep->tx_clean_pointer = 0; 647 ep->tx_pointer = 0; 648 spin_lock_init(&ep->tx_pending_lock); 649 ep->tx_pending = 0; 650 651 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); 652 if (err) { 653 napi_disable(&ep->napi); 654 ep93xx_stop_hw(dev); 655 ep93xx_free_buffers(ep); 656 return err; 657 } 658 659 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); 660 661 netif_start_queue(dev); 662 663 return 0; 664 } 665 666 static int ep93xx_close(struct net_device *dev) 667 { 668 struct ep93xx_priv *ep = netdev_priv(dev); 669 670 napi_disable(&ep->napi); 671 netif_stop_queue(dev); 672 673 wrl(ep, REG_GIINTMSK, 0); 674 free_irq(ep->irq, dev); 675 ep93xx_stop_hw(dev); 676 ep93xx_free_buffers(ep); 677 678 return 0; 679 } 680 681 static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 682 { 683 struct ep93xx_priv *ep = netdev_priv(dev); 684 struct mii_ioctl_data *data = if_mii(ifr); 685 686 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); 687 } 688 689 static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 690 { 691 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 692 } 693 694 static int ep93xx_get_link_ksettings(struct net_device *dev, 695 struct ethtool_link_ksettings *cmd) 696 { 697 struct ep93xx_priv *ep = netdev_priv(dev); 698 699 mii_ethtool_get_link_ksettings(&ep->mii, cmd); 700 701 return 0; 702 } 703 704 static int ep93xx_set_link_ksettings(struct net_device *dev, 705 const struct ethtool_link_ksettings *cmd) 706 { 707 struct ep93xx_priv *ep = netdev_priv(dev); 708 return mii_ethtool_set_link_ksettings(&ep->mii, cmd); 709 } 710 711 static int ep93xx_nway_reset(struct net_device *dev) 712 { 713 struct ep93xx_priv *ep = netdev_priv(dev); 714 return mii_nway_restart(&ep->mii); 715 } 716 717 static u32 ep93xx_get_link(struct net_device *dev) 718 { 719 struct ep93xx_priv *ep = netdev_priv(dev); 720 return mii_link_ok(&ep->mii); 721 } 722 723 static const struct ethtool_ops ep93xx_ethtool_ops = { 724 .get_drvinfo = ep93xx_get_drvinfo, 725 .nway_reset = ep93xx_nway_reset, 726 .get_link = ep93xx_get_link, 727 .get_link_ksettings = ep93xx_get_link_ksettings, 728 .set_link_ksettings = ep93xx_set_link_ksettings, 729 }; 730 731 static const struct net_device_ops ep93xx_netdev_ops = { 732 .ndo_open = ep93xx_open, 733 .ndo_stop = ep93xx_close, 734 .ndo_start_xmit = ep93xx_xmit, 735 .ndo_eth_ioctl = ep93xx_ioctl, 736 .ndo_validate_addr = eth_validate_addr, 737 .ndo_set_mac_address = eth_mac_addr, 738 }; 739 740 static void ep93xx_eth_remove(struct platform_device *pdev) 741 { 742 struct net_device *dev; 743 struct ep93xx_priv *ep; 744 struct resource *mem; 745 746 dev = platform_get_drvdata(pdev); 747 if (dev == NULL) 748 return; 749 750 ep = netdev_priv(dev); 751 752 /* @@@ Force down. */ 753 unregister_netdev(dev); 754 ep93xx_free_buffers(ep); 755 756 if (ep->base_addr != NULL) 757 iounmap(ep->base_addr); 758 759 if (ep->res != NULL) { 760 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 761 release_mem_region(mem->start, resource_size(mem)); 762 } 763 764 free_netdev(dev); 765 } 766 767 static int ep93xx_eth_probe(struct platform_device *pdev) 768 { 769 struct net_device *dev; 770 struct ep93xx_priv *ep; 771 struct resource *mem; 772 void __iomem *base_addr; 773 struct device_node *np; 774 u8 addr[ETH_ALEN]; 775 u32 phy_id; 776 int irq; 777 int err; 778 779 if (pdev == NULL) 780 return -ENODEV; 781 782 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 783 irq = platform_get_irq(pdev, 0); 784 if (!mem || irq < 0) 785 return -ENXIO; 786 787 base_addr = ioremap(mem->start, resource_size(mem)); 788 if (!base_addr) 789 return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n"); 790 791 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 792 if (!np) 793 return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n"); 794 795 err = of_property_read_u32(np, "reg", &phy_id); 796 of_node_put(np); 797 if (err) 798 return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n"); 799 800 dev = alloc_etherdev(sizeof(struct ep93xx_priv)); 801 if (dev == NULL) { 802 err = -ENOMEM; 803 goto err_out; 804 } 805 806 memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN); 807 eth_hw_addr_set(dev, addr); 808 dev->ethtool_ops = &ep93xx_ethtool_ops; 809 dev->netdev_ops = &ep93xx_netdev_ops; 810 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 811 812 ep = netdev_priv(dev); 813 ep->dev = dev; 814 SET_NETDEV_DEV(dev, &pdev->dev); 815 netif_napi_add(dev, &ep->napi, ep93xx_poll); 816 817 platform_set_drvdata(pdev, dev); 818 819 ep->res = request_mem_region(mem->start, resource_size(mem), 820 dev_name(&pdev->dev)); 821 if (ep->res == NULL) { 822 dev_err(&pdev->dev, "Could not reserve memory region\n"); 823 err = -ENOMEM; 824 goto err_out; 825 } 826 827 ep->base_addr = base_addr; 828 ep->irq = irq; 829 830 ep->mii.phy_id = phy_id; 831 ep->mii.phy_id_mask = 0x1f; 832 ep->mii.reg_num_mask = 0x1f; 833 ep->mii.dev = dev; 834 ep->mii.mdio_read = ep93xx_mdio_read; 835 ep->mii.mdio_write = ep93xx_mdio_write; 836 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ 837 838 if (is_zero_ether_addr(dev->dev_addr)) 839 eth_hw_addr_random(dev); 840 841 err = register_netdev(dev); 842 if (err) { 843 dev_err(&pdev->dev, "Failed to register netdev\n"); 844 goto err_out; 845 } 846 847 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n", 848 dev->name, ep->irq, dev->dev_addr); 849 850 return 0; 851 852 err_out: 853 ep93xx_eth_remove(pdev); 854 return err; 855 } 856 857 static const struct of_device_id ep93xx_eth_of_ids[] = { 858 { .compatible = "cirrus,ep9301-eth" }, 859 { /* sentinel */ } 860 }; 861 MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids); 862 863 static struct platform_driver ep93xx_eth_driver = { 864 .probe = ep93xx_eth_probe, 865 .remove = ep93xx_eth_remove, 866 .driver = { 867 .name = "ep93xx-eth", 868 .of_match_table = ep93xx_eth_of_ids, 869 }, 870 }; 871 872 module_platform_driver(ep93xx_eth_driver); 873 874 MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver"); 875 MODULE_LICENSE("GPL"); 876 MODULE_ALIAS("platform:ep93xx-eth"); 877