1 /* 2 * This program is free software; you can redistribute it and/or modify it 3 * under the terms of the GNU General Public License version 2 as published 4 * by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 14 * 15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org> 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/interrupt.h> 23 #include <linux/uaccess.h> 24 #include <linux/in.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/ip.h> 29 #include <linux/tcp.h> 30 #include <linux/skbuff.h> 31 #include <linux/mm.h> 32 #include <linux/platform_device.h> 33 #include <linux/ethtool.h> 34 #include <linux/init.h> 35 #include <linux/delay.h> 36 #include <linux/io.h> 37 38 #include <asm/checksum.h> 39 40 #include <lantiq_soc.h> 41 #include <xway_dma.h> 42 #include <lantiq_platform.h> 43 44 #define LTQ_ETOP_MDIO 0x11804 45 #define MDIO_REQUEST 0x80000000 46 #define MDIO_READ 0x40000000 47 #define MDIO_ADDR_MASK 0x1f 48 #define MDIO_ADDR_OFFSET 0x15 49 #define MDIO_REG_MASK 0x1f 50 #define MDIO_REG_OFFSET 0x10 51 #define MDIO_VAL_MASK 0xffff 52 53 #define PPE32_CGEN 0x800 54 #define LQ_PPE32_ENET_MAC_CFG 0x1840 55 56 #define LTQ_ETOP_ENETS0 0x11850 57 #define LTQ_ETOP_MAC_DA0 0x1186C 58 #define LTQ_ETOP_MAC_DA1 0x11870 59 #define LTQ_ETOP_CFG 0x16020 60 #define LTQ_ETOP_IGPLEN 0x16080 61 62 #define MAX_DMA_CHAN 0x8 63 #define MAX_DMA_CRC_LEN 0x4 64 #define MAX_DMA_DATA_LEN 0x600 65 66 #define ETOP_FTCU BIT(28) 67 #define ETOP_MII_MASK 0xf 68 #define ETOP_MII_NORMAL 0xd 69 #define ETOP_MII_REVERSE 0xe 70 #define ETOP_PLEN_UNDER 0x40 71 #define ETOP_CGEN 0x800 72 73 /* use 2 static channels for TX/RX */ 74 #define LTQ_ETOP_TX_CHANNEL 1 75 #define LTQ_ETOP_RX_CHANNEL 6 76 #define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) 77 #define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) 78 79 #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) 80 #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) 81 #define ltq_etop_w32_mask(x, y, z) \ 82 ltq_w32_mask(x, y, ltq_etop_membase + (z)) 83 84 #define DRV_VERSION "1.0" 85 86 static void __iomem *ltq_etop_membase; 87 88 struct ltq_etop_chan { 89 int idx; 90 int tx_free; 91 struct net_device *netdev; 92 struct napi_struct napi; 93 struct ltq_dma_channel dma; 94 struct sk_buff *skb[LTQ_DESC_NUM]; 95 }; 96 97 struct ltq_etop_priv { 98 struct net_device *netdev; 99 struct ltq_eth_data *pldata; 100 struct resource *res; 101 102 struct mii_bus *mii_bus; 103 struct phy_device *phydev; 104 105 struct ltq_etop_chan ch[MAX_DMA_CHAN]; 106 int tx_free[MAX_DMA_CHAN >> 1]; 107 108 spinlock_t lock; 109 }; 110 111 static int 112 ltq_etop_alloc_skb(struct ltq_etop_chan *ch) 113 { 114 ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); 115 if (!ch->skb[ch->dma.desc]) 116 return -ENOMEM; 117 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, 118 ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, 119 DMA_FROM_DEVICE); 120 ch->dma.desc_base[ch->dma.desc].addr = 121 CPHYSADDR(ch->skb[ch->dma.desc]->data); 122 ch->dma.desc_base[ch->dma.desc].ctl = 123 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | 124 MAX_DMA_DATA_LEN; 125 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); 126 return 0; 127 } 128 129 static void 130 ltq_etop_hw_receive(struct ltq_etop_chan *ch) 131 { 132 struct ltq_etop_priv *priv = netdev_priv(ch->netdev); 133 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; 134 struct sk_buff *skb = ch->skb[ch->dma.desc]; 135 int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; 136 unsigned long flags; 137 138 spin_lock_irqsave(&priv->lock, flags); 139 if (ltq_etop_alloc_skb(ch)) { 140 netdev_err(ch->netdev, 141 "failed to allocate new rx buffer, stopping DMA\n"); 142 ltq_dma_close(&ch->dma); 143 } 144 ch->dma.desc++; 145 ch->dma.desc %= LTQ_DESC_NUM; 146 spin_unlock_irqrestore(&priv->lock, flags); 147 148 skb_put(skb, len); 149 skb->dev = ch->netdev; 150 skb->protocol = eth_type_trans(skb, ch->netdev); 151 netif_receive_skb(skb); 152 } 153 154 static int 155 ltq_etop_poll_rx(struct napi_struct *napi, int budget) 156 { 157 struct ltq_etop_chan *ch = container_of(napi, 158 struct ltq_etop_chan, napi); 159 int rx = 0; 160 int complete = 0; 161 162 while ((rx < budget) && !complete) { 163 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; 164 165 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { 166 ltq_etop_hw_receive(ch); 167 rx++; 168 } else { 169 complete = 1; 170 } 171 } 172 if (complete || !rx) { 173 napi_complete(&ch->napi); 174 ltq_dma_ack_irq(&ch->dma); 175 } 176 return rx; 177 } 178 179 static int 180 ltq_etop_poll_tx(struct napi_struct *napi, int budget) 181 { 182 struct ltq_etop_chan *ch = 183 container_of(napi, struct ltq_etop_chan, napi); 184 struct ltq_etop_priv *priv = netdev_priv(ch->netdev); 185 struct netdev_queue *txq = 186 netdev_get_tx_queue(ch->netdev, ch->idx >> 1); 187 unsigned long flags; 188 189 spin_lock_irqsave(&priv->lock, flags); 190 while ((ch->dma.desc_base[ch->tx_free].ctl & 191 (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { 192 dev_kfree_skb_any(ch->skb[ch->tx_free]); 193 ch->skb[ch->tx_free] = NULL; 194 memset(&ch->dma.desc_base[ch->tx_free], 0, 195 sizeof(struct ltq_dma_desc)); 196 ch->tx_free++; 197 ch->tx_free %= LTQ_DESC_NUM; 198 } 199 spin_unlock_irqrestore(&priv->lock, flags); 200 201 if (netif_tx_queue_stopped(txq)) 202 netif_tx_start_queue(txq); 203 napi_complete(&ch->napi); 204 ltq_dma_ack_irq(&ch->dma); 205 return 1; 206 } 207 208 static irqreturn_t 209 ltq_etop_dma_irq(int irq, void *_priv) 210 { 211 struct ltq_etop_priv *priv = _priv; 212 int ch = irq - LTQ_DMA_CH0_INT; 213 214 napi_schedule(&priv->ch[ch].napi); 215 return IRQ_HANDLED; 216 } 217 218 static void 219 ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) 220 { 221 struct ltq_etop_priv *priv = netdev_priv(dev); 222 223 ltq_dma_free(&ch->dma); 224 if (ch->dma.irq) 225 free_irq(ch->dma.irq, priv); 226 if (IS_RX(ch->idx)) { 227 int desc; 228 for (desc = 0; desc < LTQ_DESC_NUM; desc++) 229 dev_kfree_skb_any(ch->skb[ch->dma.desc]); 230 } 231 } 232 233 static void 234 ltq_etop_hw_exit(struct net_device *dev) 235 { 236 struct ltq_etop_priv *priv = netdev_priv(dev); 237 int i; 238 239 ltq_pmu_disable(PMU_PPE); 240 for (i = 0; i < MAX_DMA_CHAN; i++) 241 if (IS_TX(i) || IS_RX(i)) 242 ltq_etop_free_channel(dev, &priv->ch[i]); 243 } 244 245 static int 246 ltq_etop_hw_init(struct net_device *dev) 247 { 248 struct ltq_etop_priv *priv = netdev_priv(dev); 249 int i; 250 251 ltq_pmu_enable(PMU_PPE); 252 253 switch (priv->pldata->mii_mode) { 254 case PHY_INTERFACE_MODE_RMII: 255 ltq_etop_w32_mask(ETOP_MII_MASK, 256 ETOP_MII_REVERSE, LTQ_ETOP_CFG); 257 break; 258 259 case PHY_INTERFACE_MODE_MII: 260 ltq_etop_w32_mask(ETOP_MII_MASK, 261 ETOP_MII_NORMAL, LTQ_ETOP_CFG); 262 break; 263 264 default: 265 netdev_err(dev, "unknown mii mode %d\n", 266 priv->pldata->mii_mode); 267 return -ENOTSUPP; 268 } 269 270 /* enable crc generation */ 271 ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); 272 273 ltq_dma_init_port(DMA_PORT_ETOP); 274 275 for (i = 0; i < MAX_DMA_CHAN; i++) { 276 int irq = LTQ_DMA_CH0_INT + i; 277 struct ltq_etop_chan *ch = &priv->ch[i]; 278 279 ch->idx = ch->dma.nr = i; 280 281 if (IS_TX(i)) { 282 ltq_dma_alloc_tx(&ch->dma); 283 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 284 "etop_tx", priv); 285 } else if (IS_RX(i)) { 286 ltq_dma_alloc_rx(&ch->dma); 287 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; 288 ch->dma.desc++) 289 if (ltq_etop_alloc_skb(ch)) 290 return -ENOMEM; 291 ch->dma.desc = 0; 292 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 293 "etop_rx", priv); 294 } 295 ch->dma.irq = irq; 296 } 297 return 0; 298 } 299 300 static void 301 ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 302 { 303 strcpy(info->driver, "Lantiq ETOP"); 304 strcpy(info->bus_info, "internal"); 305 strcpy(info->version, DRV_VERSION); 306 } 307 308 static int 309 ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 310 { 311 struct ltq_etop_priv *priv = netdev_priv(dev); 312 313 return phy_ethtool_gset(priv->phydev, cmd); 314 } 315 316 static int 317 ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 318 { 319 struct ltq_etop_priv *priv = netdev_priv(dev); 320 321 return phy_ethtool_sset(priv->phydev, cmd); 322 } 323 324 static int 325 ltq_etop_nway_reset(struct net_device *dev) 326 { 327 struct ltq_etop_priv *priv = netdev_priv(dev); 328 329 return phy_start_aneg(priv->phydev); 330 } 331 332 static const struct ethtool_ops ltq_etop_ethtool_ops = { 333 .get_drvinfo = ltq_etop_get_drvinfo, 334 .get_settings = ltq_etop_get_settings, 335 .set_settings = ltq_etop_set_settings, 336 .nway_reset = ltq_etop_nway_reset, 337 }; 338 339 static int 340 ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) 341 { 342 u32 val = MDIO_REQUEST | 343 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | 344 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | 345 phy_data; 346 347 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) 348 ; 349 ltq_etop_w32(val, LTQ_ETOP_MDIO); 350 return 0; 351 } 352 353 static int 354 ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) 355 { 356 u32 val = MDIO_REQUEST | MDIO_READ | 357 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | 358 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); 359 360 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) 361 ; 362 ltq_etop_w32(val, LTQ_ETOP_MDIO); 363 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) 364 ; 365 val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; 366 return val; 367 } 368 369 static void 370 ltq_etop_mdio_link(struct net_device *dev) 371 { 372 /* nothing to do */ 373 } 374 375 static int 376 ltq_etop_mdio_probe(struct net_device *dev) 377 { 378 struct ltq_etop_priv *priv = netdev_priv(dev); 379 struct phy_device *phydev = NULL; 380 int phy_addr; 381 382 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 383 if (priv->mii_bus->phy_map[phy_addr]) { 384 phydev = priv->mii_bus->phy_map[phy_addr]; 385 break; 386 } 387 } 388 389 if (!phydev) { 390 netdev_err(dev, "no PHY found\n"); 391 return -ENODEV; 392 } 393 394 phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, 395 0, priv->pldata->mii_mode); 396 397 if (IS_ERR(phydev)) { 398 netdev_err(dev, "Could not attach to PHY\n"); 399 return PTR_ERR(phydev); 400 } 401 402 phydev->supported &= (SUPPORTED_10baseT_Half 403 | SUPPORTED_10baseT_Full 404 | SUPPORTED_100baseT_Half 405 | SUPPORTED_100baseT_Full 406 | SUPPORTED_Autoneg 407 | SUPPORTED_MII 408 | SUPPORTED_TP); 409 410 phydev->advertising = phydev->supported; 411 priv->phydev = phydev; 412 pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", 413 dev->name, phydev->drv->name, 414 dev_name(&phydev->dev), phydev->irq); 415 416 return 0; 417 } 418 419 static int 420 ltq_etop_mdio_init(struct net_device *dev) 421 { 422 struct ltq_etop_priv *priv = netdev_priv(dev); 423 int i; 424 int err; 425 426 priv->mii_bus = mdiobus_alloc(); 427 if (!priv->mii_bus) { 428 netdev_err(dev, "failed to allocate mii bus\n"); 429 err = -ENOMEM; 430 goto err_out; 431 } 432 433 priv->mii_bus->priv = dev; 434 priv->mii_bus->read = ltq_etop_mdio_rd; 435 priv->mii_bus->write = ltq_etop_mdio_wr; 436 priv->mii_bus->name = "ltq_mii"; 437 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); 438 priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 439 if (!priv->mii_bus->irq) { 440 err = -ENOMEM; 441 goto err_out_free_mdiobus; 442 } 443 444 for (i = 0; i < PHY_MAX_ADDR; ++i) 445 priv->mii_bus->irq[i] = PHY_POLL; 446 447 if (mdiobus_register(priv->mii_bus)) { 448 err = -ENXIO; 449 goto err_out_free_mdio_irq; 450 } 451 452 if (ltq_etop_mdio_probe(dev)) { 453 err = -ENXIO; 454 goto err_out_unregister_bus; 455 } 456 return 0; 457 458 err_out_unregister_bus: 459 mdiobus_unregister(priv->mii_bus); 460 err_out_free_mdio_irq: 461 kfree(priv->mii_bus->irq); 462 err_out_free_mdiobus: 463 mdiobus_free(priv->mii_bus); 464 err_out: 465 return err; 466 } 467 468 static void 469 ltq_etop_mdio_cleanup(struct net_device *dev) 470 { 471 struct ltq_etop_priv *priv = netdev_priv(dev); 472 473 phy_disconnect(priv->phydev); 474 mdiobus_unregister(priv->mii_bus); 475 kfree(priv->mii_bus->irq); 476 mdiobus_free(priv->mii_bus); 477 } 478 479 static int 480 ltq_etop_open(struct net_device *dev) 481 { 482 struct ltq_etop_priv *priv = netdev_priv(dev); 483 int i; 484 485 for (i = 0; i < MAX_DMA_CHAN; i++) { 486 struct ltq_etop_chan *ch = &priv->ch[i]; 487 488 if (!IS_TX(i) && (!IS_RX(i))) 489 continue; 490 ltq_dma_open(&ch->dma); 491 napi_enable(&ch->napi); 492 } 493 phy_start(priv->phydev); 494 netif_tx_start_all_queues(dev); 495 return 0; 496 } 497 498 static int 499 ltq_etop_stop(struct net_device *dev) 500 { 501 struct ltq_etop_priv *priv = netdev_priv(dev); 502 int i; 503 504 netif_tx_stop_all_queues(dev); 505 phy_stop(priv->phydev); 506 for (i = 0; i < MAX_DMA_CHAN; i++) { 507 struct ltq_etop_chan *ch = &priv->ch[i]; 508 509 if (!IS_RX(i) && !IS_TX(i)) 510 continue; 511 napi_disable(&ch->napi); 512 ltq_dma_close(&ch->dma); 513 } 514 return 0; 515 } 516 517 static int 518 ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) 519 { 520 int queue = skb_get_queue_mapping(skb); 521 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); 522 struct ltq_etop_priv *priv = netdev_priv(dev); 523 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; 524 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; 525 int len; 526 unsigned long flags; 527 u32 byte_offset; 528 529 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; 530 531 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { 532 dev_kfree_skb_any(skb); 533 netdev_err(dev, "tx ring full\n"); 534 netif_tx_stop_queue(txq); 535 return NETDEV_TX_BUSY; 536 } 537 538 /* dma needs to start on a 16 byte aligned address */ 539 byte_offset = CPHYSADDR(skb->data) % 16; 540 ch->skb[ch->dma.desc] = skb; 541 542 dev->trans_start = jiffies; 543 544 spin_lock_irqsave(&priv->lock, flags); 545 desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, 546 DMA_TO_DEVICE)) - byte_offset; 547 wmb(); 548 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | 549 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); 550 ch->dma.desc++; 551 ch->dma.desc %= LTQ_DESC_NUM; 552 spin_unlock_irqrestore(&priv->lock, flags); 553 554 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) 555 netif_tx_stop_queue(txq); 556 557 return NETDEV_TX_OK; 558 } 559 560 static int 561 ltq_etop_change_mtu(struct net_device *dev, int new_mtu) 562 { 563 int ret = eth_change_mtu(dev, new_mtu); 564 565 if (!ret) { 566 struct ltq_etop_priv *priv = netdev_priv(dev); 567 unsigned long flags; 568 569 spin_lock_irqsave(&priv->lock, flags); 570 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, 571 LTQ_ETOP_IGPLEN); 572 spin_unlock_irqrestore(&priv->lock, flags); 573 } 574 return ret; 575 } 576 577 static int 578 ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 579 { 580 struct ltq_etop_priv *priv = netdev_priv(dev); 581 582 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ 583 return phy_mii_ioctl(priv->phydev, rq, cmd); 584 } 585 586 static int 587 ltq_etop_set_mac_address(struct net_device *dev, void *p) 588 { 589 int ret = eth_mac_addr(dev, p); 590 591 if (!ret) { 592 struct ltq_etop_priv *priv = netdev_priv(dev); 593 unsigned long flags; 594 595 /* store the mac for the unicast filter */ 596 spin_lock_irqsave(&priv->lock, flags); 597 ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); 598 ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, 599 LTQ_ETOP_MAC_DA1); 600 spin_unlock_irqrestore(&priv->lock, flags); 601 } 602 return ret; 603 } 604 605 static void 606 ltq_etop_set_multicast_list(struct net_device *dev) 607 { 608 struct ltq_etop_priv *priv = netdev_priv(dev); 609 unsigned long flags; 610 611 /* ensure that the unicast filter is not enabled in promiscious mode */ 612 spin_lock_irqsave(&priv->lock, flags); 613 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) 614 ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); 615 else 616 ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); 617 spin_unlock_irqrestore(&priv->lock, flags); 618 } 619 620 static u16 621 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622 { 623 /* we are currently only using the first queue */ 624 return 0; 625 } 626 627 static int 628 ltq_etop_init(struct net_device *dev) 629 { 630 struct ltq_etop_priv *priv = netdev_priv(dev); 631 struct sockaddr mac; 632 int err; 633 634 ether_setup(dev); 635 dev->watchdog_timeo = 10 * HZ; 636 err = ltq_etop_hw_init(dev); 637 if (err) 638 goto err_hw; 639 ltq_etop_change_mtu(dev, 1500); 640 641 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); 642 if (!is_valid_ether_addr(mac.sa_data)) { 643 pr_warn("etop: invalid MAC, using random\n"); 644 random_ether_addr(mac.sa_data); 645 } 646 647 err = ltq_etop_set_mac_address(dev, &mac); 648 if (err) 649 goto err_netdev; 650 ltq_etop_set_multicast_list(dev); 651 err = ltq_etop_mdio_init(dev); 652 if (err) 653 goto err_netdev; 654 return 0; 655 656 err_netdev: 657 unregister_netdev(dev); 658 free_netdev(dev); 659 err_hw: 660 ltq_etop_hw_exit(dev); 661 return err; 662 } 663 664 static void 665 ltq_etop_tx_timeout(struct net_device *dev) 666 { 667 int err; 668 669 ltq_etop_hw_exit(dev); 670 err = ltq_etop_hw_init(dev); 671 if (err) 672 goto err_hw; 673 dev->trans_start = jiffies; 674 netif_wake_queue(dev); 675 return; 676 677 err_hw: 678 ltq_etop_hw_exit(dev); 679 netdev_err(dev, "failed to restart etop after TX timeout\n"); 680 } 681 682 static const struct net_device_ops ltq_eth_netdev_ops = { 683 .ndo_open = ltq_etop_open, 684 .ndo_stop = ltq_etop_stop, 685 .ndo_start_xmit = ltq_etop_tx, 686 .ndo_change_mtu = ltq_etop_change_mtu, 687 .ndo_do_ioctl = ltq_etop_ioctl, 688 .ndo_set_mac_address = ltq_etop_set_mac_address, 689 .ndo_validate_addr = eth_validate_addr, 690 .ndo_set_rx_mode = ltq_etop_set_multicast_list, 691 .ndo_select_queue = ltq_etop_select_queue, 692 .ndo_init = ltq_etop_init, 693 .ndo_tx_timeout = ltq_etop_tx_timeout, 694 }; 695 696 static int __init 697 ltq_etop_probe(struct platform_device *pdev) 698 { 699 struct net_device *dev; 700 struct ltq_etop_priv *priv; 701 struct resource *res; 702 int err; 703 int i; 704 705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 706 if (!res) { 707 dev_err(&pdev->dev, "failed to get etop resource\n"); 708 err = -ENOENT; 709 goto err_out; 710 } 711 712 res = devm_request_mem_region(&pdev->dev, res->start, 713 resource_size(res), dev_name(&pdev->dev)); 714 if (!res) { 715 dev_err(&pdev->dev, "failed to request etop resource\n"); 716 err = -EBUSY; 717 goto err_out; 718 } 719 720 ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, 721 res->start, resource_size(res)); 722 if (!ltq_etop_membase) { 723 dev_err(&pdev->dev, "failed to remap etop engine %d\n", 724 pdev->id); 725 err = -ENOMEM; 726 goto err_out; 727 } 728 729 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); 730 strcpy(dev->name, "eth%d"); 731 dev->netdev_ops = <q_eth_netdev_ops; 732 dev->ethtool_ops = <q_etop_ethtool_ops; 733 priv = netdev_priv(dev); 734 priv->res = res; 735 priv->pldata = dev_get_platdata(&pdev->dev); 736 priv->netdev = dev; 737 spin_lock_init(&priv->lock); 738 739 for (i = 0; i < MAX_DMA_CHAN; i++) { 740 if (IS_TX(i)) 741 netif_napi_add(dev, &priv->ch[i].napi, 742 ltq_etop_poll_tx, 8); 743 else if (IS_RX(i)) 744 netif_napi_add(dev, &priv->ch[i].napi, 745 ltq_etop_poll_rx, 32); 746 priv->ch[i].netdev = dev; 747 } 748 749 err = register_netdev(dev); 750 if (err) 751 goto err_free; 752 753 platform_set_drvdata(pdev, dev); 754 return 0; 755 756 err_free: 757 kfree(dev); 758 err_out: 759 return err; 760 } 761 762 static int __devexit 763 ltq_etop_remove(struct platform_device *pdev) 764 { 765 struct net_device *dev = platform_get_drvdata(pdev); 766 767 if (dev) { 768 netif_tx_stop_all_queues(dev); 769 ltq_etop_hw_exit(dev); 770 ltq_etop_mdio_cleanup(dev); 771 unregister_netdev(dev); 772 } 773 return 0; 774 } 775 776 static struct platform_driver ltq_mii_driver = { 777 .remove = __devexit_p(ltq_etop_remove), 778 .driver = { 779 .name = "ltq_etop", 780 .owner = THIS_MODULE, 781 }, 782 }; 783 784 int __init 785 init_ltq_etop(void) 786 { 787 int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); 788 789 if (ret) 790 pr_err("ltq_etop: Error registering platfom driver!"); 791 return ret; 792 } 793 794 static void __exit 795 exit_ltq_etop(void) 796 { 797 platform_driver_unregister(<q_mii_driver); 798 } 799 800 module_init(init_ltq_etop); 801 module_exit(exit_ltq_etop); 802 803 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 804 MODULE_DESCRIPTION("Lantiq SoC ETOP"); 805 MODULE_LICENSE("GPL"); 806