1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dave DNET Ethernet Controller driver 4 * 5 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu> 6 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com> 7 */ 8 #include <linux/io.h> 9 #include <linux/module.h> 10 #include <linux/moduleparam.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/delay.h> 15 #include <linux/interrupt.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/platform_device.h> 20 #include <linux/phy.h> 21 22 #include "dnet.h" 23 24 #undef DEBUG 25 26 /* function for reading internal MAC register */ 27 static u16 dnet_readw_mac(struct dnet *bp, u16 reg) 28 { 29 u16 data_read; 30 31 /* issue a read */ 32 dnet_writel(bp, reg, MACREG_ADDR); 33 34 /* since a read/write op to the MAC is very slow, 35 * we must wait before reading the data */ 36 ndelay(500); 37 38 /* read data read from the MAC register */ 39 data_read = dnet_readl(bp, MACREG_DATA); 40 41 /* all done */ 42 return data_read; 43 } 44 45 /* function for writing internal MAC register */ 46 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) 47 { 48 /* load data to write */ 49 dnet_writel(bp, val, MACREG_DATA); 50 51 /* issue a write */ 52 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); 53 54 /* since a read/write op to the MAC is very slow, 55 * we must wait before exiting */ 56 ndelay(500); 57 } 58 59 static void __dnet_set_hwaddr(struct dnet *bp) 60 { 61 u16 tmp; 62 63 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); 64 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); 65 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); 66 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); 67 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); 68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); 69 } 70 71 static void dnet_get_hwaddr(struct dnet *bp) 72 { 73 u16 tmp; 74 u8 addr[6]; 75 76 /* 77 * from MAC docs: 78 * "Note that the MAC address is stored in the registers in Hexadecimal 79 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 80 * would require writing 0xAC (octet 0) to address 0x0B (high byte of 81 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of 82 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of 83 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of 84 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of 85 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of 86 * Mac_addr[15:0]). 87 */ 88 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); 89 *((__be16 *)addr) = cpu_to_be16(tmp); 90 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); 91 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); 92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); 93 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); 94 95 if (is_valid_ether_addr(addr)) 96 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 97 } 98 99 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 100 { 101 struct dnet *bp = bus->priv; 102 u16 value; 103 104 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 105 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 106 cpu_relax(); 107 108 /* only 5 bits allowed for phy-addr and reg_offset */ 109 mii_id &= 0x1f; 110 regnum &= 0x1f; 111 112 /* prepare reg_value for a read */ 113 value = (mii_id << 8); 114 value |= regnum; 115 116 /* write control word */ 117 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); 118 119 /* wait for end of transfer */ 120 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 121 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 122 cpu_relax(); 123 124 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); 125 126 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); 127 128 return value; 129 } 130 131 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 132 u16 value) 133 { 134 struct dnet *bp = bus->priv; 135 u16 tmp; 136 137 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); 138 139 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 140 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 141 cpu_relax(); 142 143 /* prepare for a write operation */ 144 tmp = (1 << 13); 145 146 /* only 5 bits allowed for phy-addr and reg_offset */ 147 mii_id &= 0x1f; 148 regnum &= 0x1f; 149 150 /* only 16 bits on data */ 151 value &= 0xffff; 152 153 /* prepare reg_value for a write */ 154 tmp |= (mii_id << 8); 155 tmp |= regnum; 156 157 /* write data to write first */ 158 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); 159 160 /* write control word */ 161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); 162 163 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 164 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 165 cpu_relax(); 166 167 return 0; 168 } 169 170 static void dnet_handle_link_change(struct net_device *dev) 171 { 172 struct dnet *bp = netdev_priv(dev); 173 struct phy_device *phydev = dev->phydev; 174 unsigned long flags; 175 u32 mode_reg, ctl_reg; 176 177 int status_change = 0; 178 179 spin_lock_irqsave(&bp->lock, flags); 180 181 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); 182 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 183 184 if (phydev->link) { 185 if (bp->duplex != phydev->duplex) { 186 if (phydev->duplex) 187 ctl_reg &= 188 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); 189 else 190 ctl_reg |= 191 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; 192 193 bp->duplex = phydev->duplex; 194 status_change = 1; 195 } 196 197 if (bp->speed != phydev->speed) { 198 status_change = 1; 199 switch (phydev->speed) { 200 case 1000: 201 mode_reg |= DNET_INTERNAL_MODE_GBITEN; 202 break; 203 case 100: 204 case 10: 205 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; 206 break; 207 default: 208 printk(KERN_WARNING 209 "%s: Ack! Speed (%d) is not " 210 "10/100/1000!\n", dev->name, 211 phydev->speed); 212 break; 213 } 214 bp->speed = phydev->speed; 215 } 216 } 217 218 if (phydev->link != bp->link) { 219 if (phydev->link) { 220 mode_reg |= 221 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); 222 } else { 223 mode_reg &= 224 ~(DNET_INTERNAL_MODE_RXEN | 225 DNET_INTERNAL_MODE_TXEN); 226 bp->speed = 0; 227 bp->duplex = -1; 228 } 229 bp->link = phydev->link; 230 231 status_change = 1; 232 } 233 234 if (status_change) { 235 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); 236 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); 237 } 238 239 spin_unlock_irqrestore(&bp->lock, flags); 240 241 if (status_change) { 242 if (phydev->link) 243 printk(KERN_INFO "%s: link up (%d/%s)\n", 244 dev->name, phydev->speed, 245 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 246 else 247 printk(KERN_INFO "%s: link down\n", dev->name); 248 } 249 } 250 251 static int dnet_mii_probe(struct net_device *dev) 252 { 253 struct dnet *bp = netdev_priv(dev); 254 struct phy_device *phydev = NULL; 255 256 /* find the first phy */ 257 phydev = phy_find_first(bp->mii_bus); 258 259 if (!phydev) { 260 printk(KERN_ERR "%s: no PHY found\n", dev->name); 261 return -ENODEV; 262 } 263 264 /* TODO : add pin_irq */ 265 266 /* attach the mac to the phy */ 267 if (bp->capabilities & DNET_HAS_RMII) { 268 phydev = phy_connect(dev, phydev_name(phydev), 269 &dnet_handle_link_change, 270 PHY_INTERFACE_MODE_RMII); 271 } else { 272 phydev = phy_connect(dev, phydev_name(phydev), 273 &dnet_handle_link_change, 274 PHY_INTERFACE_MODE_MII); 275 } 276 277 if (IS_ERR(phydev)) { 278 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 279 return PTR_ERR(phydev); 280 } 281 282 /* mask with MAC supported features */ 283 if (bp->capabilities & DNET_HAS_GIGABIT) 284 phy_set_max_speed(phydev, SPEED_1000); 285 else 286 phy_set_max_speed(phydev, SPEED_100); 287 288 phy_support_asym_pause(phydev); 289 290 bp->link = 0; 291 bp->speed = 0; 292 bp->duplex = -1; 293 294 return 0; 295 } 296 297 static int dnet_mii_init(struct dnet *bp) 298 { 299 int err; 300 301 bp->mii_bus = mdiobus_alloc(); 302 if (bp->mii_bus == NULL) 303 return -ENOMEM; 304 305 bp->mii_bus->name = "dnet_mii_bus"; 306 bp->mii_bus->read = &dnet_mdio_read; 307 bp->mii_bus->write = &dnet_mdio_write; 308 309 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 310 bp->pdev->name, bp->pdev->id); 311 312 bp->mii_bus->priv = bp; 313 314 if (mdiobus_register(bp->mii_bus)) { 315 err = -ENXIO; 316 goto err_out; 317 } 318 319 if (dnet_mii_probe(bp->dev) != 0) { 320 err = -ENXIO; 321 goto err_out_unregister_bus; 322 } 323 324 return 0; 325 326 err_out_unregister_bus: 327 mdiobus_unregister(bp->mii_bus); 328 err_out: 329 mdiobus_free(bp->mii_bus); 330 return err; 331 } 332 333 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ 334 static int dnet_phy_marvell_fixup(struct phy_device *phydev) 335 { 336 return phy_write(phydev, 0x18, 0x4148); 337 } 338 339 static void dnet_update_stats(struct dnet *bp) 340 { 341 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; 342 u32 *p = &bp->hw_stats.rx_pkt_ignr; 343 u32 *end = &bp->hw_stats.rx_byte + 1; 344 345 WARN_ON((unsigned long)(end - p - 1) != 346 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); 347 348 for (; p < end; p++, reg++) 349 *p += readl(reg); 350 351 reg = bp->regs + DNET_TX_UNICAST_CNT; 352 p = &bp->hw_stats.tx_unicast; 353 end = &bp->hw_stats.tx_byte + 1; 354 355 WARN_ON((unsigned long)(end - p - 1) != 356 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); 357 358 for (; p < end; p++, reg++) 359 *p += readl(reg); 360 } 361 362 static int dnet_poll(struct napi_struct *napi, int budget) 363 { 364 struct dnet *bp = container_of(napi, struct dnet, napi); 365 struct net_device *dev = bp->dev; 366 int npackets = 0; 367 unsigned int pkt_len; 368 struct sk_buff *skb; 369 unsigned int *data_ptr; 370 u32 int_enable; 371 u32 cmd_word; 372 int i; 373 374 while (npackets < budget) { 375 /* 376 * break out of while loop if there are no more 377 * packets waiting 378 */ 379 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) 380 break; 381 382 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 383 pkt_len = cmd_word & 0xFFFF; 384 385 if (cmd_word & 0xDF180000) 386 printk(KERN_ERR "%s packet receive error %x\n", 387 __func__, cmd_word); 388 389 skb = netdev_alloc_skb(dev, pkt_len + 5); 390 if (skb != NULL) { 391 /* Align IP on 16 byte boundaries */ 392 skb_reserve(skb, 2); 393 /* 394 * 'skb_put()' points to the start of sk_buff 395 * data area. 396 */ 397 data_ptr = skb_put(skb, pkt_len); 398 for (i = 0; i < (pkt_len + 3) >> 2; i++) 399 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); 400 skb->protocol = eth_type_trans(skb, dev); 401 netif_receive_skb(skb); 402 npackets++; 403 } else 404 printk(KERN_NOTICE 405 "%s: No memory to allocate a sk_buff of " 406 "size %u.\n", dev->name, pkt_len); 407 } 408 409 if (npackets < budget) { 410 /* We processed all packets available. Tell NAPI it can 411 * stop polling then re-enable rx interrupts. 412 */ 413 napi_complete_done(napi, npackets); 414 int_enable = dnet_readl(bp, INTR_ENB); 415 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 416 dnet_writel(bp, int_enable, INTR_ENB); 417 } 418 419 return npackets; 420 } 421 422 static irqreturn_t dnet_interrupt(int irq, void *dev_id) 423 { 424 struct net_device *dev = dev_id; 425 struct dnet *bp = netdev_priv(dev); 426 u32 int_src, int_enable, int_current; 427 unsigned long flags; 428 unsigned int handled = 0; 429 430 spin_lock_irqsave(&bp->lock, flags); 431 432 /* read and clear the DNET irq (clear on read) */ 433 int_src = dnet_readl(bp, INTR_SRC); 434 int_enable = dnet_readl(bp, INTR_ENB); 435 int_current = int_src & int_enable; 436 437 /* restart the queue if we had stopped it for TX fifo almost full */ 438 if (int_current & DNET_INTR_SRC_TX_FIFOAE) { 439 int_enable = dnet_readl(bp, INTR_ENB); 440 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; 441 dnet_writel(bp, int_enable, INTR_ENB); 442 netif_wake_queue(dev); 443 handled = 1; 444 } 445 446 /* RX FIFO error checking */ 447 if (int_current & 448 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { 449 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, 450 dnet_readl(bp, RX_STATUS), int_current); 451 /* we can only flush the RX FIFOs */ 452 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); 453 ndelay(500); 454 dnet_writel(bp, 0, SYS_CTL); 455 handled = 1; 456 } 457 458 /* TX FIFO error checking */ 459 if (int_current & 460 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { 461 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, 462 dnet_readl(bp, TX_STATUS), int_current); 463 /* we can only flush the TX FIFOs */ 464 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); 465 ndelay(500); 466 dnet_writel(bp, 0, SYS_CTL); 467 handled = 1; 468 } 469 470 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { 471 if (napi_schedule_prep(&bp->napi)) { 472 /* 473 * There's no point taking any more interrupts 474 * until we have processed the buffers 475 */ 476 /* Disable Rx interrupts and schedule NAPI poll */ 477 int_enable = dnet_readl(bp, INTR_ENB); 478 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; 479 dnet_writel(bp, int_enable, INTR_ENB); 480 __napi_schedule(&bp->napi); 481 } 482 handled = 1; 483 } 484 485 if (!handled) 486 pr_debug("%s: irq %x remains\n", __func__, int_current); 487 488 spin_unlock_irqrestore(&bp->lock, flags); 489 490 return IRQ_RETVAL(handled); 491 } 492 493 #ifdef DEBUG 494 static inline void dnet_print_skb(struct sk_buff *skb) 495 { 496 int k; 497 printk(KERN_DEBUG PFX "data:"); 498 for (k = 0; k < skb->len; k++) 499 printk(" %02x", (unsigned int)skb->data[k]); 500 printk("\n"); 501 } 502 #else 503 #define dnet_print_skb(skb) do {} while (0) 504 #endif 505 506 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 507 { 508 509 struct dnet *bp = netdev_priv(dev); 510 unsigned int i, tx_cmd, wrsz; 511 unsigned long flags; 512 unsigned int *bufp; 513 u32 irq_enable; 514 515 dnet_readl(bp, TX_STATUS); 516 517 pr_debug("start_xmit: len %u head %p data %p\n", 518 skb->len, skb->head, skb->data); 519 dnet_print_skb(skb); 520 521 spin_lock_irqsave(&bp->lock, flags); 522 523 dnet_readl(bp, TX_STATUS); 524 525 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); 526 wrsz = (u32) skb->len + 3; 527 wrsz += ((unsigned long) skb->data) & 0x3; 528 wrsz >>= 2; 529 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; 530 531 /* check if there is enough room for the current frame */ 532 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { 533 for (i = 0; i < wrsz; i++) 534 dnet_writel(bp, *bufp++, TX_DATA_FIFO); 535 536 /* 537 * inform MAC that a packet's written and ready to be 538 * shipped out 539 */ 540 dnet_writel(bp, tx_cmd, TX_LEN_FIFO); 541 } 542 543 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { 544 netif_stop_queue(dev); 545 dnet_readl(bp, INTR_SRC); 546 irq_enable = dnet_readl(bp, INTR_ENB); 547 irq_enable |= DNET_INTR_ENB_TX_FIFOAE; 548 dnet_writel(bp, irq_enable, INTR_ENB); 549 } 550 551 skb_tx_timestamp(skb); 552 553 /* free the buffer */ 554 dev_kfree_skb(skb); 555 556 spin_unlock_irqrestore(&bp->lock, flags); 557 558 return NETDEV_TX_OK; 559 } 560 561 static void dnet_reset_hw(struct dnet *bp) 562 { 563 /* put ts_mac in IDLE state i.e. disable rx/tx */ 564 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); 565 566 /* 567 * RX FIFO almost full threshold: only cmd FIFO almost full is 568 * implemented for RX side 569 */ 570 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); 571 /* 572 * TX FIFO almost empty threshold: only data FIFO almost empty 573 * is implemented for TX side 574 */ 575 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); 576 577 /* flush rx/tx fifos */ 578 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, 579 SYS_CTL); 580 msleep(1); 581 dnet_writel(bp, 0, SYS_CTL); 582 } 583 584 static void dnet_init_hw(struct dnet *bp) 585 { 586 u32 config; 587 588 dnet_reset_hw(bp); 589 __dnet_set_hwaddr(bp); 590 591 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 592 593 if (bp->dev->flags & IFF_PROMISC) 594 /* Copy All Frames */ 595 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; 596 if (!(bp->dev->flags & IFF_BROADCAST)) 597 /* No BroadCast */ 598 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; 599 600 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | 601 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | 602 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | 603 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; 604 605 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); 606 607 /* clear irq before enabling them */ 608 config = dnet_readl(bp, INTR_SRC); 609 610 /* enable RX/TX interrupt, recv packet ready interrupt */ 611 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | 612 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | 613 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | 614 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | 615 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); 616 } 617 618 static int dnet_open(struct net_device *dev) 619 { 620 struct dnet *bp = netdev_priv(dev); 621 622 /* if the phy is not yet register, retry later */ 623 if (!dev->phydev) 624 return -EAGAIN; 625 626 napi_enable(&bp->napi); 627 dnet_init_hw(bp); 628 629 phy_start_aneg(dev->phydev); 630 631 /* schedule a link state check */ 632 phy_start(dev->phydev); 633 634 netif_start_queue(dev); 635 636 return 0; 637 } 638 639 static int dnet_close(struct net_device *dev) 640 { 641 struct dnet *bp = netdev_priv(dev); 642 643 netif_stop_queue(dev); 644 napi_disable(&bp->napi); 645 646 if (dev->phydev) 647 phy_stop(dev->phydev); 648 649 dnet_reset_hw(bp); 650 netif_carrier_off(dev); 651 652 return 0; 653 } 654 655 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) 656 { 657 pr_debug("%s\n", __func__); 658 pr_debug("----------------------------- RX statistics " 659 "-------------------------------\n"); 660 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); 661 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); 662 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); 663 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); 664 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); 665 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); 666 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); 667 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); 668 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); 669 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); 670 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); 671 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); 672 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); 673 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); 674 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); 675 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); 676 pr_debug("----------------------------- TX statistics " 677 "-------------------------------\n"); 678 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); 679 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); 680 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); 681 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); 682 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); 683 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); 684 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); 685 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); 686 } 687 688 static struct net_device_stats *dnet_get_stats(struct net_device *dev) 689 { 690 691 struct dnet *bp = netdev_priv(dev); 692 struct net_device_stats *nstat = &dev->stats; 693 struct dnet_stats *hwstat = &bp->hw_stats; 694 695 /* read stats from hardware */ 696 dnet_update_stats(bp); 697 698 /* Convert HW stats into netdevice stats */ 699 nstat->rx_errors = (hwstat->rx_len_chk_err + 700 hwstat->rx_lng_frm + hwstat->rx_shrt_frm + 701 /* ignore IGP violation error 702 hwstat->rx_ipg_viol + */ 703 hwstat->rx_crc_err + 704 hwstat->rx_pre_shrink + 705 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); 706 nstat->tx_errors = hwstat->tx_bad_fcs; 707 nstat->rx_length_errors = (hwstat->rx_len_chk_err + 708 hwstat->rx_lng_frm + 709 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); 710 nstat->rx_crc_errors = hwstat->rx_crc_err; 711 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; 712 nstat->rx_packets = hwstat->rx_ok_pkt; 713 nstat->tx_packets = (hwstat->tx_unicast + 714 hwstat->tx_multicast + hwstat->tx_brdcast); 715 nstat->rx_bytes = hwstat->rx_byte; 716 nstat->tx_bytes = hwstat->tx_byte; 717 nstat->multicast = hwstat->rx_multicast; 718 nstat->rx_missed_errors = hwstat->rx_pkt_ignr; 719 720 dnet_print_pretty_hwstats(hwstat); 721 722 return nstat; 723 } 724 725 static void dnet_get_drvinfo(struct net_device *dev, 726 struct ethtool_drvinfo *info) 727 { 728 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 729 strlcpy(info->bus_info, "0", sizeof(info->bus_info)); 730 } 731 732 static const struct ethtool_ops dnet_ethtool_ops = { 733 .get_drvinfo = dnet_get_drvinfo, 734 .get_link = ethtool_op_get_link, 735 .get_ts_info = ethtool_op_get_ts_info, 736 .get_link_ksettings = phy_ethtool_get_link_ksettings, 737 .set_link_ksettings = phy_ethtool_set_link_ksettings, 738 }; 739 740 static const struct net_device_ops dnet_netdev_ops = { 741 .ndo_open = dnet_open, 742 .ndo_stop = dnet_close, 743 .ndo_get_stats = dnet_get_stats, 744 .ndo_start_xmit = dnet_start_xmit, 745 .ndo_do_ioctl = phy_do_ioctl_running, 746 .ndo_set_mac_address = eth_mac_addr, 747 .ndo_validate_addr = eth_validate_addr, 748 }; 749 750 static int dnet_probe(struct platform_device *pdev) 751 { 752 struct resource *res; 753 struct net_device *dev; 754 struct dnet *bp; 755 struct phy_device *phydev; 756 int err; 757 unsigned int irq; 758 759 irq = platform_get_irq(pdev, 0); 760 761 dev = alloc_etherdev(sizeof(*bp)); 762 if (!dev) 763 return -ENOMEM; 764 765 /* TODO: Actually, we have some interesting features... */ 766 dev->features |= 0; 767 768 bp = netdev_priv(dev); 769 bp->dev = dev; 770 771 platform_set_drvdata(pdev, dev); 772 SET_NETDEV_DEV(dev, &pdev->dev); 773 774 spin_lock_init(&bp->lock); 775 776 bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 777 if (IS_ERR(bp->regs)) { 778 err = PTR_ERR(bp->regs); 779 goto err_out_free_dev; 780 } 781 782 dev->irq = irq; 783 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); 784 if (err) { 785 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", 786 irq, err); 787 goto err_out_free_dev; 788 } 789 790 dev->netdev_ops = &dnet_netdev_ops; 791 netif_napi_add(dev, &bp->napi, dnet_poll, 64); 792 dev->ethtool_ops = &dnet_ethtool_ops; 793 794 dev->base_addr = (unsigned long)bp->regs; 795 796 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; 797 798 dnet_get_hwaddr(bp); 799 800 if (!is_valid_ether_addr(dev->dev_addr)) { 801 /* choose a random ethernet address */ 802 eth_hw_addr_random(dev); 803 __dnet_set_hwaddr(bp); 804 } 805 806 err = register_netdev(dev); 807 if (err) { 808 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 809 goto err_out_free_irq; 810 } 811 812 /* register the PHY board fixup (for Marvell 88E1111) */ 813 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, 814 dnet_phy_marvell_fixup); 815 /* we can live without it, so just issue a warning */ 816 if (err) 817 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); 818 819 err = dnet_mii_init(bp); 820 if (err) 821 goto err_out_unregister_netdev; 822 823 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 824 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); 825 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", 826 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", 827 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 828 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 829 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); 830 phydev = dev->phydev; 831 phy_attached_info(phydev); 832 833 return 0; 834 835 err_out_unregister_netdev: 836 unregister_netdev(dev); 837 err_out_free_irq: 838 free_irq(dev->irq, dev); 839 err_out_free_dev: 840 free_netdev(dev); 841 return err; 842 } 843 844 static int dnet_remove(struct platform_device *pdev) 845 { 846 847 struct net_device *dev; 848 struct dnet *bp; 849 850 dev = platform_get_drvdata(pdev); 851 852 if (dev) { 853 bp = netdev_priv(dev); 854 if (dev->phydev) 855 phy_disconnect(dev->phydev); 856 mdiobus_unregister(bp->mii_bus); 857 mdiobus_free(bp->mii_bus); 858 unregister_netdev(dev); 859 free_irq(dev->irq, dev); 860 free_netdev(dev); 861 } 862 863 return 0; 864 } 865 866 static struct platform_driver dnet_driver = { 867 .probe = dnet_probe, 868 .remove = dnet_remove, 869 .driver = { 870 .name = "dnet", 871 }, 872 }; 873 874 module_platform_driver(dnet_driver); 875 876 MODULE_LICENSE("GPL"); 877 MODULE_DESCRIPTION("Dave DNET Ethernet driver"); 878 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, " 879 "Matteo Vit <matteo.vit@dave.eu>"); 880