1 /* 2 * Cadence MACB/GEM Ethernet Controller driver 3 * 4 * Copyright (C) 2004-2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 #include <linux/clk.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/circ_buf.h> 18 #include <linux/slab.h> 19 #include <linux/init.h> 20 #include <linux/io.h> 21 #include <linux/gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/interrupt.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/platform_data/macb.h> 28 #include <linux/platform_device.h> 29 #include <linux/phy.h> 30 #include <linux/of.h> 31 #include <linux/of_device.h> 32 #include <linux/of_gpio.h> 33 #include <linux/of_mdio.h> 34 #include <linux/of_net.h> 35 #include <linux/ip.h> 36 #include <linux/udp.h> 37 #include <linux/tcp.h> 38 #include "macb.h" 39 40 #define MACB_RX_BUFFER_SIZE 128 41 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 42 43 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 44 #define MIN_RX_RING_SIZE 64 45 #define MAX_RX_RING_SIZE 8192 46 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 47 * (bp)->rx_ring_size) 48 49 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 50 #define MIN_TX_RING_SIZE 64 51 #define MAX_TX_RING_SIZE 4096 52 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 53 * (bp)->tx_ring_size) 54 55 /* level of occupied TX descriptors under which we wake up TX process */ 56 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 57 58 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 59 | MACB_BIT(ISR_ROVR)) 60 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 61 | MACB_BIT(ISR_RLE) \ 62 | MACB_BIT(TXERR)) 63 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 64 65 /* Max length of transmit frame must be a multiple of 8 bytes */ 66 #define MACB_TX_LEN_ALIGN 8 67 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 68 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 69 70 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 71 #define MACB_NETIF_LSO NETIF_F_TSO 72 73 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 74 #define MACB_WOL_ENABLED (0x1 << 1) 75 76 /* Graceful stop timeouts in us. We should allow up to 77 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 78 */ 79 #define MACB_HALT_TIMEOUT 1230 80 81 /* DMA buffer descriptor might be different size 82 * depends on hardware configuration: 83 * 84 * 1. dma address width 32 bits: 85 * word 1: 32 bit address of Data Buffer 86 * word 2: control 87 * 88 * 2. dma address width 64 bits: 89 * word 1: 32 bit address of Data Buffer 90 * word 2: control 91 * word 3: upper 32 bit address of Data Buffer 92 * word 4: unused 93 * 94 * 3. dma address width 32 bits with hardware timestamping: 95 * word 1: 32 bit address of Data Buffer 96 * word 2: control 97 * word 3: timestamp word 1 98 * word 4: timestamp word 2 99 * 100 * 4. dma address width 64 bits with hardware timestamping: 101 * word 1: 32 bit address of Data Buffer 102 * word 2: control 103 * word 3: upper 32 bit address of Data Buffer 104 * word 4: unused 105 * word 5: timestamp word 1 106 * word 6: timestamp word 2 107 */ 108 static unsigned int macb_dma_desc_get_size(struct macb *bp) 109 { 110 #ifdef MACB_EXT_DESC 111 unsigned int desc_size; 112 113 switch (bp->hw_dma_cap) { 114 case HW_DMA_CAP_64B: 115 desc_size = sizeof(struct macb_dma_desc) 116 + sizeof(struct macb_dma_desc_64); 117 break; 118 case HW_DMA_CAP_PTP: 119 desc_size = sizeof(struct macb_dma_desc) 120 + sizeof(struct macb_dma_desc_ptp); 121 break; 122 case HW_DMA_CAP_64B_PTP: 123 desc_size = sizeof(struct macb_dma_desc) 124 + sizeof(struct macb_dma_desc_64) 125 + sizeof(struct macb_dma_desc_ptp); 126 break; 127 default: 128 desc_size = sizeof(struct macb_dma_desc); 129 } 130 return desc_size; 131 #endif 132 return sizeof(struct macb_dma_desc); 133 } 134 135 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 136 { 137 #ifdef MACB_EXT_DESC 138 switch (bp->hw_dma_cap) { 139 case HW_DMA_CAP_64B: 140 case HW_DMA_CAP_PTP: 141 desc_idx <<= 1; 142 break; 143 case HW_DMA_CAP_64B_PTP: 144 desc_idx *= 3; 145 break; 146 default: 147 break; 148 } 149 #endif 150 return desc_idx; 151 } 152 153 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 154 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 155 { 156 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 157 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); 158 return NULL; 159 } 160 #endif 161 162 /* Ring buffer accessors */ 163 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 164 { 165 return index & (bp->tx_ring_size - 1); 166 } 167 168 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 169 unsigned int index) 170 { 171 index = macb_tx_ring_wrap(queue->bp, index); 172 index = macb_adj_dma_desc_idx(queue->bp, index); 173 return &queue->tx_ring[index]; 174 } 175 176 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 177 unsigned int index) 178 { 179 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 180 } 181 182 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 183 { 184 dma_addr_t offset; 185 186 offset = macb_tx_ring_wrap(queue->bp, index) * 187 macb_dma_desc_get_size(queue->bp); 188 189 return queue->tx_ring_dma + offset; 190 } 191 192 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 193 { 194 return index & (bp->rx_ring_size - 1); 195 } 196 197 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 198 { 199 index = macb_rx_ring_wrap(queue->bp, index); 200 index = macb_adj_dma_desc_idx(queue->bp, index); 201 return &queue->rx_ring[index]; 202 } 203 204 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 205 { 206 return queue->rx_buffers + queue->bp->rx_buffer_size * 207 macb_rx_ring_wrap(queue->bp, index); 208 } 209 210 /* I/O accessors */ 211 static u32 hw_readl_native(struct macb *bp, int offset) 212 { 213 return __raw_readl(bp->regs + offset); 214 } 215 216 static void hw_writel_native(struct macb *bp, int offset, u32 value) 217 { 218 __raw_writel(value, bp->regs + offset); 219 } 220 221 static u32 hw_readl(struct macb *bp, int offset) 222 { 223 return readl_relaxed(bp->regs + offset); 224 } 225 226 static void hw_writel(struct macb *bp, int offset, u32 value) 227 { 228 writel_relaxed(value, bp->regs + offset); 229 } 230 231 /* Find the CPU endianness by using the loopback bit of NCR register. When the 232 * CPU is in big endian we need to program swapped mode for management 233 * descriptor access. 234 */ 235 static bool hw_is_native_io(void __iomem *addr) 236 { 237 u32 value = MACB_BIT(LLB); 238 239 __raw_writel(value, addr + MACB_NCR); 240 value = __raw_readl(addr + MACB_NCR); 241 242 /* Write 0 back to disable everything */ 243 __raw_writel(0, addr + MACB_NCR); 244 245 return value == MACB_BIT(LLB); 246 } 247 248 static bool hw_is_gem(void __iomem *addr, bool native_io) 249 { 250 u32 id; 251 252 if (native_io) 253 id = __raw_readl(addr + MACB_MID); 254 else 255 id = readl_relaxed(addr + MACB_MID); 256 257 return MACB_BFEXT(IDNUM, id) >= 0x2; 258 } 259 260 static void macb_set_hwaddr(struct macb *bp) 261 { 262 u32 bottom; 263 u16 top; 264 265 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 266 macb_or_gem_writel(bp, SA1B, bottom); 267 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 268 macb_or_gem_writel(bp, SA1T, top); 269 270 /* Clear unused address register sets */ 271 macb_or_gem_writel(bp, SA2B, 0); 272 macb_or_gem_writel(bp, SA2T, 0); 273 macb_or_gem_writel(bp, SA3B, 0); 274 macb_or_gem_writel(bp, SA3T, 0); 275 macb_or_gem_writel(bp, SA4B, 0); 276 macb_or_gem_writel(bp, SA4T, 0); 277 } 278 279 static void macb_get_hwaddr(struct macb *bp) 280 { 281 struct macb_platform_data *pdata; 282 u32 bottom; 283 u16 top; 284 u8 addr[6]; 285 int i; 286 287 pdata = dev_get_platdata(&bp->pdev->dev); 288 289 /* Check all 4 address register for valid address */ 290 for (i = 0; i < 4; i++) { 291 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 292 top = macb_or_gem_readl(bp, SA1T + i * 8); 293 294 if (pdata && pdata->rev_eth_addr) { 295 addr[5] = bottom & 0xff; 296 addr[4] = (bottom >> 8) & 0xff; 297 addr[3] = (bottom >> 16) & 0xff; 298 addr[2] = (bottom >> 24) & 0xff; 299 addr[1] = top & 0xff; 300 addr[0] = (top & 0xff00) >> 8; 301 } else { 302 addr[0] = bottom & 0xff; 303 addr[1] = (bottom >> 8) & 0xff; 304 addr[2] = (bottom >> 16) & 0xff; 305 addr[3] = (bottom >> 24) & 0xff; 306 addr[4] = top & 0xff; 307 addr[5] = (top >> 8) & 0xff; 308 } 309 310 if (is_valid_ether_addr(addr)) { 311 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 312 return; 313 } 314 } 315 316 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 317 eth_hw_addr_random(bp->dev); 318 } 319 320 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 321 { 322 struct macb *bp = bus->priv; 323 int value; 324 325 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 326 | MACB_BF(RW, MACB_MAN_READ) 327 | MACB_BF(PHYA, mii_id) 328 | MACB_BF(REGA, regnum) 329 | MACB_BF(CODE, MACB_MAN_CODE))); 330 331 /* wait for end of transfer */ 332 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 333 cpu_relax(); 334 335 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 336 337 return value; 338 } 339 340 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 341 u16 value) 342 { 343 struct macb *bp = bus->priv; 344 345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 346 | MACB_BF(RW, MACB_MAN_WRITE) 347 | MACB_BF(PHYA, mii_id) 348 | MACB_BF(REGA, regnum) 349 | MACB_BF(CODE, MACB_MAN_CODE) 350 | MACB_BF(DATA, value))); 351 352 /* wait for end of transfer */ 353 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 354 cpu_relax(); 355 356 return 0; 357 } 358 359 /** 360 * macb_set_tx_clk() - Set a clock to a new frequency 361 * @clk Pointer to the clock to change 362 * @rate New frequency in Hz 363 * @dev Pointer to the struct net_device 364 */ 365 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 366 { 367 long ferr, rate, rate_rounded; 368 369 if (!clk) 370 return; 371 372 switch (speed) { 373 case SPEED_10: 374 rate = 2500000; 375 break; 376 case SPEED_100: 377 rate = 25000000; 378 break; 379 case SPEED_1000: 380 rate = 125000000; 381 break; 382 default: 383 return; 384 } 385 386 rate_rounded = clk_round_rate(clk, rate); 387 if (rate_rounded < 0) 388 return; 389 390 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 391 * is not satisfied. 392 */ 393 ferr = abs(rate_rounded - rate); 394 ferr = DIV_ROUND_UP(ferr, rate / 100000); 395 if (ferr > 5) 396 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 397 rate); 398 399 if (clk_set_rate(clk, rate_rounded)) 400 netdev_err(dev, "adjusting tx_clk failed.\n"); 401 } 402 403 static void macb_handle_link_change(struct net_device *dev) 404 { 405 struct macb *bp = netdev_priv(dev); 406 struct phy_device *phydev = dev->phydev; 407 unsigned long flags; 408 int status_change = 0; 409 410 spin_lock_irqsave(&bp->lock, flags); 411 412 if (phydev->link) { 413 if ((bp->speed != phydev->speed) || 414 (bp->duplex != phydev->duplex)) { 415 u32 reg; 416 417 reg = macb_readl(bp, NCFGR); 418 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 419 if (macb_is_gem(bp)) 420 reg &= ~GEM_BIT(GBE); 421 422 if (phydev->duplex) 423 reg |= MACB_BIT(FD); 424 if (phydev->speed == SPEED_100) 425 reg |= MACB_BIT(SPD); 426 if (phydev->speed == SPEED_1000 && 427 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 428 reg |= GEM_BIT(GBE); 429 430 macb_or_gem_writel(bp, NCFGR, reg); 431 432 bp->speed = phydev->speed; 433 bp->duplex = phydev->duplex; 434 status_change = 1; 435 } 436 } 437 438 if (phydev->link != bp->link) { 439 if (!phydev->link) { 440 bp->speed = 0; 441 bp->duplex = -1; 442 } 443 bp->link = phydev->link; 444 445 status_change = 1; 446 } 447 448 spin_unlock_irqrestore(&bp->lock, flags); 449 450 if (status_change) { 451 if (phydev->link) { 452 /* Update the TX clock rate if and only if the link is 453 * up and there has been a link change. 454 */ 455 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); 456 457 netif_carrier_on(dev); 458 netdev_info(dev, "link up (%d/%s)\n", 459 phydev->speed, 460 phydev->duplex == DUPLEX_FULL ? 461 "Full" : "Half"); 462 } else { 463 netif_carrier_off(dev); 464 netdev_info(dev, "link down\n"); 465 } 466 } 467 } 468 469 /* based on au1000_eth. c*/ 470 static int macb_mii_probe(struct net_device *dev) 471 { 472 struct macb *bp = netdev_priv(dev); 473 struct macb_platform_data *pdata; 474 struct phy_device *phydev; 475 int phy_irq; 476 int ret; 477 478 if (bp->phy_node) { 479 phydev = of_phy_connect(dev, bp->phy_node, 480 &macb_handle_link_change, 0, 481 bp->phy_interface); 482 if (!phydev) 483 return -ENODEV; 484 } else { 485 phydev = phy_find_first(bp->mii_bus); 486 if (!phydev) { 487 netdev_err(dev, "no PHY found\n"); 488 return -ENXIO; 489 } 490 491 pdata = dev_get_platdata(&bp->pdev->dev); 492 if (pdata) { 493 if (gpio_is_valid(pdata->phy_irq_pin)) { 494 ret = devm_gpio_request(&bp->pdev->dev, 495 pdata->phy_irq_pin, "phy int"); 496 if (!ret) { 497 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 498 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 499 } 500 } else { 501 phydev->irq = PHY_POLL; 502 } 503 } 504 505 /* attach the mac to the phy */ 506 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 507 bp->phy_interface); 508 if (ret) { 509 netdev_err(dev, "Could not attach to PHY\n"); 510 return ret; 511 } 512 } 513 514 /* mask with MAC supported features */ 515 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 516 phydev->supported &= PHY_GBIT_FEATURES; 517 else 518 phydev->supported &= PHY_BASIC_FEATURES; 519 520 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 521 phydev->supported &= ~SUPPORTED_1000baseT_Half; 522 523 phydev->advertising = phydev->supported; 524 525 bp->link = 0; 526 bp->speed = 0; 527 bp->duplex = -1; 528 529 return 0; 530 } 531 532 static int macb_mii_init(struct macb *bp) 533 { 534 struct macb_platform_data *pdata; 535 struct device_node *np; 536 int err = -ENXIO, i; 537 538 /* Enable management port */ 539 macb_writel(bp, NCR, MACB_BIT(MPE)); 540 541 bp->mii_bus = mdiobus_alloc(); 542 if (!bp->mii_bus) { 543 err = -ENOMEM; 544 goto err_out; 545 } 546 547 bp->mii_bus->name = "MACB_mii_bus"; 548 bp->mii_bus->read = &macb_mdio_read; 549 bp->mii_bus->write = &macb_mdio_write; 550 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 551 bp->pdev->name, bp->pdev->id); 552 bp->mii_bus->priv = bp; 553 bp->mii_bus->parent = &bp->pdev->dev; 554 pdata = dev_get_platdata(&bp->pdev->dev); 555 556 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 557 558 np = bp->pdev->dev.of_node; 559 if (np) { 560 if (of_phy_is_fixed_link(np)) { 561 if (of_phy_register_fixed_link(np) < 0) { 562 dev_err(&bp->pdev->dev, 563 "broken fixed-link specification\n"); 564 goto err_out_unregister_bus; 565 } 566 bp->phy_node = of_node_get(np); 567 568 err = mdiobus_register(bp->mii_bus); 569 } else { 570 /* try dt phy registration */ 571 err = of_mdiobus_register(bp->mii_bus, np); 572 573 /* fallback to standard phy registration if no phy were 574 * found during dt phy registration 575 */ 576 if (!err && !phy_find_first(bp->mii_bus)) { 577 for (i = 0; i < PHY_MAX_ADDR; i++) { 578 struct phy_device *phydev; 579 580 phydev = mdiobus_scan(bp->mii_bus, i); 581 if (IS_ERR(phydev) && 582 PTR_ERR(phydev) != -ENODEV) { 583 err = PTR_ERR(phydev); 584 break; 585 } 586 } 587 588 if (err) 589 goto err_out_unregister_bus; 590 } 591 } 592 } else { 593 for (i = 0; i < PHY_MAX_ADDR; i++) 594 bp->mii_bus->irq[i] = PHY_POLL; 595 596 if (pdata) 597 bp->mii_bus->phy_mask = pdata->phy_mask; 598 599 err = mdiobus_register(bp->mii_bus); 600 } 601 602 if (err) 603 goto err_out_free_mdiobus; 604 605 err = macb_mii_probe(bp->dev); 606 if (err) 607 goto err_out_unregister_bus; 608 609 return 0; 610 611 err_out_unregister_bus: 612 mdiobus_unregister(bp->mii_bus); 613 err_out_free_mdiobus: 614 of_node_put(bp->phy_node); 615 if (np && of_phy_is_fixed_link(np)) 616 of_phy_deregister_fixed_link(np); 617 mdiobus_free(bp->mii_bus); 618 err_out: 619 return err; 620 } 621 622 static void macb_update_stats(struct macb *bp) 623 { 624 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 625 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 626 int offset = MACB_PFR; 627 628 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 629 630 for (; p < end; p++, offset += 4) 631 *p += bp->macb_reg_readl(bp, offset); 632 } 633 634 static int macb_halt_tx(struct macb *bp) 635 { 636 unsigned long halt_time, timeout; 637 u32 status; 638 639 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 640 641 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 642 do { 643 halt_time = jiffies; 644 status = macb_readl(bp, TSR); 645 if (!(status & MACB_BIT(TGO))) 646 return 0; 647 648 usleep_range(10, 250); 649 } while (time_before(halt_time, timeout)); 650 651 return -ETIMEDOUT; 652 } 653 654 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 655 { 656 if (tx_skb->mapping) { 657 if (tx_skb->mapped_as_page) 658 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 659 tx_skb->size, DMA_TO_DEVICE); 660 else 661 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 662 tx_skb->size, DMA_TO_DEVICE); 663 tx_skb->mapping = 0; 664 } 665 666 if (tx_skb->skb) { 667 dev_kfree_skb_any(tx_skb->skb); 668 tx_skb->skb = NULL; 669 } 670 } 671 672 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 673 { 674 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 675 struct macb_dma_desc_64 *desc_64; 676 677 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 678 desc_64 = macb_64b_desc(bp, desc); 679 desc_64->addrh = upper_32_bits(addr); 680 } 681 #endif 682 desc->addr = lower_32_bits(addr); 683 } 684 685 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 686 { 687 dma_addr_t addr = 0; 688 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 689 struct macb_dma_desc_64 *desc_64; 690 691 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 692 desc_64 = macb_64b_desc(bp, desc); 693 addr = ((u64)(desc_64->addrh) << 32); 694 } 695 #endif 696 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 697 return addr; 698 } 699 700 static void macb_tx_error_task(struct work_struct *work) 701 { 702 struct macb_queue *queue = container_of(work, struct macb_queue, 703 tx_error_task); 704 struct macb *bp = queue->bp; 705 struct macb_tx_skb *tx_skb; 706 struct macb_dma_desc *desc; 707 struct sk_buff *skb; 708 unsigned int tail; 709 unsigned long flags; 710 711 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 712 (unsigned int)(queue - bp->queues), 713 queue->tx_tail, queue->tx_head); 714 715 /* Prevent the queue IRQ handlers from running: each of them may call 716 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 717 * As explained below, we have to halt the transmission before updating 718 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 719 * network engine about the macb/gem being halted. 720 */ 721 spin_lock_irqsave(&bp->lock, flags); 722 723 /* Make sure nobody is trying to queue up new packets */ 724 netif_tx_stop_all_queues(bp->dev); 725 726 /* Stop transmission now 727 * (in case we have just queued new packets) 728 * macb/gem must be halted to write TBQP register 729 */ 730 if (macb_halt_tx(bp)) 731 /* Just complain for now, reinitializing TX path can be good */ 732 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 733 734 /* Treat frames in TX queue including the ones that caused the error. 735 * Free transmit buffers in upper layer. 736 */ 737 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 738 u32 ctrl; 739 740 desc = macb_tx_desc(queue, tail); 741 ctrl = desc->ctrl; 742 tx_skb = macb_tx_skb(queue, tail); 743 skb = tx_skb->skb; 744 745 if (ctrl & MACB_BIT(TX_USED)) { 746 /* skb is set for the last buffer of the frame */ 747 while (!skb) { 748 macb_tx_unmap(bp, tx_skb); 749 tail++; 750 tx_skb = macb_tx_skb(queue, tail); 751 skb = tx_skb->skb; 752 } 753 754 /* ctrl still refers to the first buffer descriptor 755 * since it's the only one written back by the hardware 756 */ 757 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 758 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 759 macb_tx_ring_wrap(bp, tail), 760 skb->data); 761 bp->dev->stats.tx_packets++; 762 queue->stats.tx_packets++; 763 bp->dev->stats.tx_bytes += skb->len; 764 queue->stats.tx_bytes += skb->len; 765 } 766 } else { 767 /* "Buffers exhausted mid-frame" errors may only happen 768 * if the driver is buggy, so complain loudly about 769 * those. Statistics are updated by hardware. 770 */ 771 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 772 netdev_err(bp->dev, 773 "BUG: TX buffers exhausted mid-frame\n"); 774 775 desc->ctrl = ctrl | MACB_BIT(TX_USED); 776 } 777 778 macb_tx_unmap(bp, tx_skb); 779 } 780 781 /* Set end of TX queue */ 782 desc = macb_tx_desc(queue, 0); 783 macb_set_addr(bp, desc, 0); 784 desc->ctrl = MACB_BIT(TX_USED); 785 786 /* Make descriptor updates visible to hardware */ 787 wmb(); 788 789 /* Reinitialize the TX desc queue */ 790 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 791 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 792 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 793 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 794 #endif 795 /* Make TX ring reflect state of hardware */ 796 queue->tx_head = 0; 797 queue->tx_tail = 0; 798 799 /* Housework before enabling TX IRQ */ 800 macb_writel(bp, TSR, macb_readl(bp, TSR)); 801 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 802 803 /* Now we are ready to start transmission again */ 804 netif_tx_start_all_queues(bp->dev); 805 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 806 807 spin_unlock_irqrestore(&bp->lock, flags); 808 } 809 810 static void macb_tx_interrupt(struct macb_queue *queue) 811 { 812 unsigned int tail; 813 unsigned int head; 814 u32 status; 815 struct macb *bp = queue->bp; 816 u16 queue_index = queue - bp->queues; 817 818 status = macb_readl(bp, TSR); 819 macb_writel(bp, TSR, status); 820 821 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 822 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 823 824 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 825 (unsigned long)status); 826 827 head = queue->tx_head; 828 for (tail = queue->tx_tail; tail != head; tail++) { 829 struct macb_tx_skb *tx_skb; 830 struct sk_buff *skb; 831 struct macb_dma_desc *desc; 832 u32 ctrl; 833 834 desc = macb_tx_desc(queue, tail); 835 836 /* Make hw descriptor updates visible to CPU */ 837 rmb(); 838 839 ctrl = desc->ctrl; 840 841 /* TX_USED bit is only set by hardware on the very first buffer 842 * descriptor of the transmitted frame. 843 */ 844 if (!(ctrl & MACB_BIT(TX_USED))) 845 break; 846 847 /* Process all buffers of the current transmitted frame */ 848 for (;; tail++) { 849 tx_skb = macb_tx_skb(queue, tail); 850 skb = tx_skb->skb; 851 852 /* First, update TX stats if needed */ 853 if (skb) { 854 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { 855 /* skb now belongs to timestamp buffer 856 * and will be removed later 857 */ 858 tx_skb->skb = NULL; 859 } 860 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 861 macb_tx_ring_wrap(bp, tail), 862 skb->data); 863 bp->dev->stats.tx_packets++; 864 queue->stats.tx_packets++; 865 bp->dev->stats.tx_bytes += skb->len; 866 queue->stats.tx_bytes += skb->len; 867 } 868 869 /* Now we can safely release resources */ 870 macb_tx_unmap(bp, tx_skb); 871 872 /* skb is set only for the last buffer of the frame. 873 * WARNING: at this point skb has been freed by 874 * macb_tx_unmap(). 875 */ 876 if (skb) 877 break; 878 } 879 } 880 881 queue->tx_tail = tail; 882 if (__netif_subqueue_stopped(bp->dev, queue_index) && 883 CIRC_CNT(queue->tx_head, queue->tx_tail, 884 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 885 netif_wake_subqueue(bp->dev, queue_index); 886 } 887 888 static void gem_rx_refill(struct macb_queue *queue) 889 { 890 unsigned int entry; 891 struct sk_buff *skb; 892 dma_addr_t paddr; 893 struct macb *bp = queue->bp; 894 struct macb_dma_desc *desc; 895 896 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 897 bp->rx_ring_size) > 0) { 898 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 899 900 /* Make hw descriptor updates visible to CPU */ 901 rmb(); 902 903 queue->rx_prepared_head++; 904 desc = macb_rx_desc(queue, entry); 905 906 if (!queue->rx_skbuff[entry]) { 907 /* allocate sk_buff for this free entry in ring */ 908 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 909 if (unlikely(!skb)) { 910 netdev_err(bp->dev, 911 "Unable to allocate sk_buff\n"); 912 break; 913 } 914 915 /* now fill corresponding descriptor entry */ 916 paddr = dma_map_single(&bp->pdev->dev, skb->data, 917 bp->rx_buffer_size, 918 DMA_FROM_DEVICE); 919 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 920 dev_kfree_skb(skb); 921 break; 922 } 923 924 queue->rx_skbuff[entry] = skb; 925 926 if (entry == bp->rx_ring_size - 1) 927 paddr |= MACB_BIT(RX_WRAP); 928 macb_set_addr(bp, desc, paddr); 929 desc->ctrl = 0; 930 931 /* properly align Ethernet header */ 932 skb_reserve(skb, NET_IP_ALIGN); 933 } else { 934 desc->addr &= ~MACB_BIT(RX_USED); 935 desc->ctrl = 0; 936 } 937 } 938 939 /* Make descriptor updates visible to hardware */ 940 wmb(); 941 942 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 943 queue, queue->rx_prepared_head, queue->rx_tail); 944 } 945 946 /* Mark DMA descriptors from begin up to and not including end as unused */ 947 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 948 unsigned int end) 949 { 950 unsigned int frag; 951 952 for (frag = begin; frag != end; frag++) { 953 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 954 955 desc->addr &= ~MACB_BIT(RX_USED); 956 } 957 958 /* Make descriptor updates visible to hardware */ 959 wmb(); 960 961 /* When this happens, the hardware stats registers for 962 * whatever caused this is updated, so we don't have to record 963 * anything. 964 */ 965 } 966 967 static int gem_rx(struct macb_queue *queue, int budget) 968 { 969 struct macb *bp = queue->bp; 970 unsigned int len; 971 unsigned int entry; 972 struct sk_buff *skb; 973 struct macb_dma_desc *desc; 974 int count = 0; 975 976 while (count < budget) { 977 u32 ctrl; 978 dma_addr_t addr; 979 bool rxused; 980 981 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 982 desc = macb_rx_desc(queue, entry); 983 984 /* Make hw descriptor updates visible to CPU */ 985 rmb(); 986 987 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 988 addr = macb_get_addr(bp, desc); 989 ctrl = desc->ctrl; 990 991 if (!rxused) 992 break; 993 994 queue->rx_tail++; 995 count++; 996 997 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 998 netdev_err(bp->dev, 999 "not whole frame pointed by descriptor\n"); 1000 bp->dev->stats.rx_dropped++; 1001 queue->stats.rx_dropped++; 1002 break; 1003 } 1004 skb = queue->rx_skbuff[entry]; 1005 if (unlikely(!skb)) { 1006 netdev_err(bp->dev, 1007 "inconsistent Rx descriptor chain\n"); 1008 bp->dev->stats.rx_dropped++; 1009 queue->stats.rx_dropped++; 1010 break; 1011 } 1012 /* now everything is ready for receiving packet */ 1013 queue->rx_skbuff[entry] = NULL; 1014 len = ctrl & bp->rx_frm_len_mask; 1015 1016 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1017 1018 skb_put(skb, len); 1019 dma_unmap_single(&bp->pdev->dev, addr, 1020 bp->rx_buffer_size, DMA_FROM_DEVICE); 1021 1022 skb->protocol = eth_type_trans(skb, bp->dev); 1023 skb_checksum_none_assert(skb); 1024 if (bp->dev->features & NETIF_F_RXCSUM && 1025 !(bp->dev->flags & IFF_PROMISC) && 1026 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1027 skb->ip_summed = CHECKSUM_UNNECESSARY; 1028 1029 bp->dev->stats.rx_packets++; 1030 queue->stats.rx_packets++; 1031 bp->dev->stats.rx_bytes += skb->len; 1032 queue->stats.rx_bytes += skb->len; 1033 1034 gem_ptp_do_rxstamp(bp, skb, desc); 1035 1036 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1037 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1038 skb->len, skb->csum); 1039 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1040 skb_mac_header(skb), 16, true); 1041 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1042 skb->data, 32, true); 1043 #endif 1044 1045 netif_receive_skb(skb); 1046 } 1047 1048 gem_rx_refill(queue); 1049 1050 return count; 1051 } 1052 1053 static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, 1054 unsigned int last_frag) 1055 { 1056 unsigned int len; 1057 unsigned int frag; 1058 unsigned int offset; 1059 struct sk_buff *skb; 1060 struct macb_dma_desc *desc; 1061 struct macb *bp = queue->bp; 1062 1063 desc = macb_rx_desc(queue, last_frag); 1064 len = desc->ctrl & bp->rx_frm_len_mask; 1065 1066 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1067 macb_rx_ring_wrap(bp, first_frag), 1068 macb_rx_ring_wrap(bp, last_frag), len); 1069 1070 /* The ethernet header starts NET_IP_ALIGN bytes into the 1071 * first buffer. Since the header is 14 bytes, this makes the 1072 * payload word-aligned. 1073 * 1074 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1075 * the two padding bytes into the skb so that we avoid hitting 1076 * the slowpath in memcpy(), and pull them off afterwards. 1077 */ 1078 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1079 if (!skb) { 1080 bp->dev->stats.rx_dropped++; 1081 for (frag = first_frag; ; frag++) { 1082 desc = macb_rx_desc(queue, frag); 1083 desc->addr &= ~MACB_BIT(RX_USED); 1084 if (frag == last_frag) 1085 break; 1086 } 1087 1088 /* Make descriptor updates visible to hardware */ 1089 wmb(); 1090 1091 return 1; 1092 } 1093 1094 offset = 0; 1095 len += NET_IP_ALIGN; 1096 skb_checksum_none_assert(skb); 1097 skb_put(skb, len); 1098 1099 for (frag = first_frag; ; frag++) { 1100 unsigned int frag_len = bp->rx_buffer_size; 1101 1102 if (offset + frag_len > len) { 1103 if (unlikely(frag != last_frag)) { 1104 dev_kfree_skb_any(skb); 1105 return -1; 1106 } 1107 frag_len = len - offset; 1108 } 1109 skb_copy_to_linear_data_offset(skb, offset, 1110 macb_rx_buffer(queue, frag), 1111 frag_len); 1112 offset += bp->rx_buffer_size; 1113 desc = macb_rx_desc(queue, frag); 1114 desc->addr &= ~MACB_BIT(RX_USED); 1115 1116 if (frag == last_frag) 1117 break; 1118 } 1119 1120 /* Make descriptor updates visible to hardware */ 1121 wmb(); 1122 1123 __skb_pull(skb, NET_IP_ALIGN); 1124 skb->protocol = eth_type_trans(skb, bp->dev); 1125 1126 bp->dev->stats.rx_packets++; 1127 bp->dev->stats.rx_bytes += skb->len; 1128 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1129 skb->len, skb->csum); 1130 netif_receive_skb(skb); 1131 1132 return 0; 1133 } 1134 1135 static inline void macb_init_rx_ring(struct macb_queue *queue) 1136 { 1137 struct macb *bp = queue->bp; 1138 dma_addr_t addr; 1139 struct macb_dma_desc *desc = NULL; 1140 int i; 1141 1142 addr = queue->rx_buffers_dma; 1143 for (i = 0; i < bp->rx_ring_size; i++) { 1144 desc = macb_rx_desc(queue, i); 1145 macb_set_addr(bp, desc, addr); 1146 desc->ctrl = 0; 1147 addr += bp->rx_buffer_size; 1148 } 1149 desc->addr |= MACB_BIT(RX_WRAP); 1150 queue->rx_tail = 0; 1151 } 1152 1153 static int macb_rx(struct macb_queue *queue, int budget) 1154 { 1155 struct macb *bp = queue->bp; 1156 bool reset_rx_queue = false; 1157 int received = 0; 1158 unsigned int tail; 1159 int first_frag = -1; 1160 1161 for (tail = queue->rx_tail; budget > 0; tail++) { 1162 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1163 u32 ctrl; 1164 1165 /* Make hw descriptor updates visible to CPU */ 1166 rmb(); 1167 1168 ctrl = desc->ctrl; 1169 1170 if (!(desc->addr & MACB_BIT(RX_USED))) 1171 break; 1172 1173 if (ctrl & MACB_BIT(RX_SOF)) { 1174 if (first_frag != -1) 1175 discard_partial_frame(queue, first_frag, tail); 1176 first_frag = tail; 1177 } 1178 1179 if (ctrl & MACB_BIT(RX_EOF)) { 1180 int dropped; 1181 1182 if (unlikely(first_frag == -1)) { 1183 reset_rx_queue = true; 1184 continue; 1185 } 1186 1187 dropped = macb_rx_frame(queue, first_frag, tail); 1188 first_frag = -1; 1189 if (unlikely(dropped < 0)) { 1190 reset_rx_queue = true; 1191 continue; 1192 } 1193 if (!dropped) { 1194 received++; 1195 budget--; 1196 } 1197 } 1198 } 1199 1200 if (unlikely(reset_rx_queue)) { 1201 unsigned long flags; 1202 u32 ctrl; 1203 1204 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1205 1206 spin_lock_irqsave(&bp->lock, flags); 1207 1208 ctrl = macb_readl(bp, NCR); 1209 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1210 1211 macb_init_rx_ring(queue); 1212 queue_writel(queue, RBQP, queue->rx_ring_dma); 1213 1214 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1215 1216 spin_unlock_irqrestore(&bp->lock, flags); 1217 return received; 1218 } 1219 1220 if (first_frag != -1) 1221 queue->rx_tail = first_frag; 1222 else 1223 queue->rx_tail = tail; 1224 1225 return received; 1226 } 1227 1228 static int macb_poll(struct napi_struct *napi, int budget) 1229 { 1230 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1231 struct macb *bp = queue->bp; 1232 int work_done; 1233 u32 status; 1234 1235 status = macb_readl(bp, RSR); 1236 macb_writel(bp, RSR, status); 1237 1238 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1239 (unsigned long)status, budget); 1240 1241 work_done = bp->macbgem_ops.mog_rx(queue, budget); 1242 if (work_done < budget) { 1243 napi_complete_done(napi, work_done); 1244 1245 /* Packets received while interrupts were disabled */ 1246 status = macb_readl(bp, RSR); 1247 if (status) { 1248 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1249 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1250 napi_reschedule(napi); 1251 } else { 1252 queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1253 } 1254 } 1255 1256 /* TODO: Handle errors */ 1257 1258 return work_done; 1259 } 1260 1261 static void macb_hresp_error_task(unsigned long data) 1262 { 1263 struct macb *bp = (struct macb *)data; 1264 struct net_device *dev = bp->dev; 1265 struct macb_queue *queue = bp->queues; 1266 unsigned int q; 1267 u32 ctrl; 1268 1269 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1270 queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1271 MACB_TX_INT_FLAGS | 1272 MACB_BIT(HRESP)); 1273 } 1274 ctrl = macb_readl(bp, NCR); 1275 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1276 macb_writel(bp, NCR, ctrl); 1277 1278 netif_tx_stop_all_queues(dev); 1279 netif_carrier_off(dev); 1280 1281 bp->macbgem_ops.mog_init_rings(bp); 1282 1283 /* Initialize TX and RX buffers */ 1284 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1285 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 1286 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1287 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1288 queue_writel(queue, RBQPH, 1289 upper_32_bits(queue->rx_ring_dma)); 1290 #endif 1291 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 1292 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1293 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1294 queue_writel(queue, TBQPH, 1295 upper_32_bits(queue->tx_ring_dma)); 1296 #endif 1297 1298 /* Enable interrupts */ 1299 queue_writel(queue, IER, 1300 MACB_RX_INT_FLAGS | 1301 MACB_TX_INT_FLAGS | 1302 MACB_BIT(HRESP)); 1303 } 1304 1305 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1306 macb_writel(bp, NCR, ctrl); 1307 1308 netif_carrier_on(dev); 1309 netif_tx_start_all_queues(dev); 1310 } 1311 1312 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1313 { 1314 struct macb_queue *queue = dev_id; 1315 struct macb *bp = queue->bp; 1316 struct net_device *dev = bp->dev; 1317 u32 status, ctrl; 1318 1319 status = queue_readl(queue, ISR); 1320 1321 if (unlikely(!status)) 1322 return IRQ_NONE; 1323 1324 spin_lock(&bp->lock); 1325 1326 while (status) { 1327 /* close possible race with dev_close */ 1328 if (unlikely(!netif_running(dev))) { 1329 queue_writel(queue, IDR, -1); 1330 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1331 queue_writel(queue, ISR, -1); 1332 break; 1333 } 1334 1335 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1336 (unsigned int)(queue - bp->queues), 1337 (unsigned long)status); 1338 1339 if (status & MACB_RX_INT_FLAGS) { 1340 /* There's no point taking any more interrupts 1341 * until we have processed the buffers. The 1342 * scheduling call may fail if the poll routine 1343 * is already scheduled, so disable interrupts 1344 * now. 1345 */ 1346 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1347 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1348 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1349 1350 if (napi_schedule_prep(&queue->napi)) { 1351 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1352 __napi_schedule(&queue->napi); 1353 } 1354 } 1355 1356 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1357 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1358 schedule_work(&queue->tx_error_task); 1359 1360 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1361 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1362 1363 break; 1364 } 1365 1366 if (status & MACB_BIT(TCOMP)) 1367 macb_tx_interrupt(queue); 1368 1369 /* Link change detection isn't possible with RMII, so we'll 1370 * add that if/when we get our hands on a full-blown MII PHY. 1371 */ 1372 1373 /* There is a hardware issue under heavy load where DMA can 1374 * stop, this causes endless "used buffer descriptor read" 1375 * interrupts but it can be cleared by re-enabling RX. See 1376 * the at91 manual, section 41.3.1 or the Zynq manual 1377 * section 16.7.4 for details. 1378 */ 1379 if (status & MACB_BIT(RXUBR)) { 1380 ctrl = macb_readl(bp, NCR); 1381 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1382 wmb(); 1383 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1384 1385 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1386 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1387 } 1388 1389 if (status & MACB_BIT(ISR_ROVR)) { 1390 /* We missed at least one packet */ 1391 if (macb_is_gem(bp)) 1392 bp->hw_stats.gem.rx_overruns++; 1393 else 1394 bp->hw_stats.macb.rx_overruns++; 1395 1396 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1397 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1398 } 1399 1400 if (status & MACB_BIT(HRESP)) { 1401 tasklet_schedule(&bp->hresp_err_tasklet); 1402 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1403 1404 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1405 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1406 } 1407 status = queue_readl(queue, ISR); 1408 } 1409 1410 spin_unlock(&bp->lock); 1411 1412 return IRQ_HANDLED; 1413 } 1414 1415 #ifdef CONFIG_NET_POLL_CONTROLLER 1416 /* Polling receive - used by netconsole and other diagnostic tools 1417 * to allow network i/o with interrupts disabled. 1418 */ 1419 static void macb_poll_controller(struct net_device *dev) 1420 { 1421 struct macb *bp = netdev_priv(dev); 1422 struct macb_queue *queue; 1423 unsigned long flags; 1424 unsigned int q; 1425 1426 local_irq_save(flags); 1427 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1428 macb_interrupt(dev->irq, queue); 1429 local_irq_restore(flags); 1430 } 1431 #endif 1432 1433 static unsigned int macb_tx_map(struct macb *bp, 1434 struct macb_queue *queue, 1435 struct sk_buff *skb, 1436 unsigned int hdrlen) 1437 { 1438 dma_addr_t mapping; 1439 unsigned int len, entry, i, tx_head = queue->tx_head; 1440 struct macb_tx_skb *tx_skb = NULL; 1441 struct macb_dma_desc *desc; 1442 unsigned int offset, size, count = 0; 1443 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1444 unsigned int eof = 1, mss_mfs = 0; 1445 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1446 1447 /* LSO */ 1448 if (skb_shinfo(skb)->gso_size != 0) { 1449 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1450 /* UDP - UFO */ 1451 lso_ctrl = MACB_LSO_UFO_ENABLE; 1452 else 1453 /* TCP - TSO */ 1454 lso_ctrl = MACB_LSO_TSO_ENABLE; 1455 } 1456 1457 /* First, map non-paged data */ 1458 len = skb_headlen(skb); 1459 1460 /* first buffer length */ 1461 size = hdrlen; 1462 1463 offset = 0; 1464 while (len) { 1465 entry = macb_tx_ring_wrap(bp, tx_head); 1466 tx_skb = &queue->tx_skb[entry]; 1467 1468 mapping = dma_map_single(&bp->pdev->dev, 1469 skb->data + offset, 1470 size, DMA_TO_DEVICE); 1471 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1472 goto dma_error; 1473 1474 /* Save info to properly release resources */ 1475 tx_skb->skb = NULL; 1476 tx_skb->mapping = mapping; 1477 tx_skb->size = size; 1478 tx_skb->mapped_as_page = false; 1479 1480 len -= size; 1481 offset += size; 1482 count++; 1483 tx_head++; 1484 1485 size = min(len, bp->max_tx_length); 1486 } 1487 1488 /* Then, map paged data from fragments */ 1489 for (f = 0; f < nr_frags; f++) { 1490 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1491 1492 len = skb_frag_size(frag); 1493 offset = 0; 1494 while (len) { 1495 size = min(len, bp->max_tx_length); 1496 entry = macb_tx_ring_wrap(bp, tx_head); 1497 tx_skb = &queue->tx_skb[entry]; 1498 1499 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1500 offset, size, DMA_TO_DEVICE); 1501 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1502 goto dma_error; 1503 1504 /* Save info to properly release resources */ 1505 tx_skb->skb = NULL; 1506 tx_skb->mapping = mapping; 1507 tx_skb->size = size; 1508 tx_skb->mapped_as_page = true; 1509 1510 len -= size; 1511 offset += size; 1512 count++; 1513 tx_head++; 1514 } 1515 } 1516 1517 /* Should never happen */ 1518 if (unlikely(!tx_skb)) { 1519 netdev_err(bp->dev, "BUG! empty skb!\n"); 1520 return 0; 1521 } 1522 1523 /* This is the last buffer of the frame: save socket buffer */ 1524 tx_skb->skb = skb; 1525 1526 /* Update TX ring: update buffer descriptors in reverse order 1527 * to avoid race condition 1528 */ 1529 1530 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1531 * to set the end of TX queue 1532 */ 1533 i = tx_head; 1534 entry = macb_tx_ring_wrap(bp, i); 1535 ctrl = MACB_BIT(TX_USED); 1536 desc = macb_tx_desc(queue, entry); 1537 desc->ctrl = ctrl; 1538 1539 if (lso_ctrl) { 1540 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1541 /* include header and FCS in value given to h/w */ 1542 mss_mfs = skb_shinfo(skb)->gso_size + 1543 skb_transport_offset(skb) + 1544 ETH_FCS_LEN; 1545 else /* TSO */ { 1546 mss_mfs = skb_shinfo(skb)->gso_size; 1547 /* TCP Sequence Number Source Select 1548 * can be set only for TSO 1549 */ 1550 seq_ctrl = 0; 1551 } 1552 } 1553 1554 do { 1555 i--; 1556 entry = macb_tx_ring_wrap(bp, i); 1557 tx_skb = &queue->tx_skb[entry]; 1558 desc = macb_tx_desc(queue, entry); 1559 1560 ctrl = (u32)tx_skb->size; 1561 if (eof) { 1562 ctrl |= MACB_BIT(TX_LAST); 1563 eof = 0; 1564 } 1565 if (unlikely(entry == (bp->tx_ring_size - 1))) 1566 ctrl |= MACB_BIT(TX_WRAP); 1567 1568 /* First descriptor is header descriptor */ 1569 if (i == queue->tx_head) { 1570 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1571 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1572 } else 1573 /* Only set MSS/MFS on payload descriptors 1574 * (second or later descriptor) 1575 */ 1576 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1577 1578 /* Set TX buffer descriptor */ 1579 macb_set_addr(bp, desc, tx_skb->mapping); 1580 /* desc->addr must be visible to hardware before clearing 1581 * 'TX_USED' bit in desc->ctrl. 1582 */ 1583 wmb(); 1584 desc->ctrl = ctrl; 1585 } while (i != queue->tx_head); 1586 1587 queue->tx_head = tx_head; 1588 1589 return count; 1590 1591 dma_error: 1592 netdev_err(bp->dev, "TX DMA map failed\n"); 1593 1594 for (i = queue->tx_head; i != tx_head; i++) { 1595 tx_skb = macb_tx_skb(queue, i); 1596 1597 macb_tx_unmap(bp, tx_skb); 1598 } 1599 1600 return 0; 1601 } 1602 1603 static netdev_features_t macb_features_check(struct sk_buff *skb, 1604 struct net_device *dev, 1605 netdev_features_t features) 1606 { 1607 unsigned int nr_frags, f; 1608 unsigned int hdrlen; 1609 1610 /* Validate LSO compatibility */ 1611 1612 /* there is only one buffer */ 1613 if (!skb_is_nonlinear(skb)) 1614 return features; 1615 1616 /* length of header */ 1617 hdrlen = skb_transport_offset(skb); 1618 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1619 hdrlen += tcp_hdrlen(skb); 1620 1621 /* For LSO: 1622 * When software supplies two or more payload buffers all payload buffers 1623 * apart from the last must be a multiple of 8 bytes in size. 1624 */ 1625 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1626 return features & ~MACB_NETIF_LSO; 1627 1628 nr_frags = skb_shinfo(skb)->nr_frags; 1629 /* No need to check last fragment */ 1630 nr_frags--; 1631 for (f = 0; f < nr_frags; f++) { 1632 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1633 1634 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1635 return features & ~MACB_NETIF_LSO; 1636 } 1637 return features; 1638 } 1639 1640 static inline int macb_clear_csum(struct sk_buff *skb) 1641 { 1642 /* no change for packets without checksum offloading */ 1643 if (skb->ip_summed != CHECKSUM_PARTIAL) 1644 return 0; 1645 1646 /* make sure we can modify the header */ 1647 if (unlikely(skb_cow_head(skb, 0))) 1648 return -1; 1649 1650 /* initialize checksum field 1651 * This is required - at least for Zynq, which otherwise calculates 1652 * wrong UDP header checksums for UDP packets with UDP data len <=2 1653 */ 1654 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1655 return 0; 1656 } 1657 1658 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1659 { 1660 u16 queue_index = skb_get_queue_mapping(skb); 1661 struct macb *bp = netdev_priv(dev); 1662 struct macb_queue *queue = &bp->queues[queue_index]; 1663 unsigned long flags; 1664 unsigned int desc_cnt, nr_frags, frag_size, f; 1665 unsigned int hdrlen; 1666 bool is_lso, is_udp = 0; 1667 1668 is_lso = (skb_shinfo(skb)->gso_size != 0); 1669 1670 if (is_lso) { 1671 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1672 1673 /* length of headers */ 1674 if (is_udp) 1675 /* only queue eth + ip headers separately for UDP */ 1676 hdrlen = skb_transport_offset(skb); 1677 else 1678 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1679 if (skb_headlen(skb) < hdrlen) { 1680 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1681 /* if this is required, would need to copy to single buffer */ 1682 return NETDEV_TX_BUSY; 1683 } 1684 } else 1685 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1686 1687 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1688 netdev_vdbg(bp->dev, 1689 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1690 queue_index, skb->len, skb->head, skb->data, 1691 skb_tail_pointer(skb), skb_end_pointer(skb)); 1692 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1693 skb->data, 16, true); 1694 #endif 1695 1696 /* Count how many TX buffer descriptors are needed to send this 1697 * socket buffer: skb fragments of jumbo frames may need to be 1698 * split into many buffer descriptors. 1699 */ 1700 if (is_lso && (skb_headlen(skb) > hdrlen)) 1701 /* extra header descriptor if also payload in first buffer */ 1702 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1703 else 1704 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1705 nr_frags = skb_shinfo(skb)->nr_frags; 1706 for (f = 0; f < nr_frags; f++) { 1707 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1708 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1709 } 1710 1711 spin_lock_irqsave(&bp->lock, flags); 1712 1713 /* This is a hard error, log it. */ 1714 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1715 bp->tx_ring_size) < desc_cnt) { 1716 netif_stop_subqueue(dev, queue_index); 1717 spin_unlock_irqrestore(&bp->lock, flags); 1718 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1719 queue->tx_head, queue->tx_tail); 1720 return NETDEV_TX_BUSY; 1721 } 1722 1723 if (macb_clear_csum(skb)) { 1724 dev_kfree_skb_any(skb); 1725 goto unlock; 1726 } 1727 1728 /* Map socket buffer for DMA transfer */ 1729 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 1730 dev_kfree_skb_any(skb); 1731 goto unlock; 1732 } 1733 1734 /* Make newly initialized descriptor visible to hardware */ 1735 wmb(); 1736 skb_tx_timestamp(skb); 1737 1738 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1739 1740 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 1741 netif_stop_subqueue(dev, queue_index); 1742 1743 unlock: 1744 spin_unlock_irqrestore(&bp->lock, flags); 1745 1746 return NETDEV_TX_OK; 1747 } 1748 1749 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 1750 { 1751 if (!macb_is_gem(bp)) { 1752 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1753 } else { 1754 bp->rx_buffer_size = size; 1755 1756 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1757 netdev_dbg(bp->dev, 1758 "RX buffer must be multiple of %d bytes, expanding\n", 1759 RX_BUFFER_MULTIPLE); 1760 bp->rx_buffer_size = 1761 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1762 } 1763 } 1764 1765 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 1766 bp->dev->mtu, bp->rx_buffer_size); 1767 } 1768 1769 static void gem_free_rx_buffers(struct macb *bp) 1770 { 1771 struct sk_buff *skb; 1772 struct macb_dma_desc *desc; 1773 struct macb_queue *queue; 1774 dma_addr_t addr; 1775 unsigned int q; 1776 int i; 1777 1778 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1779 if (!queue->rx_skbuff) 1780 continue; 1781 1782 for (i = 0; i < bp->rx_ring_size; i++) { 1783 skb = queue->rx_skbuff[i]; 1784 1785 if (!skb) 1786 continue; 1787 1788 desc = macb_rx_desc(queue, i); 1789 addr = macb_get_addr(bp, desc); 1790 1791 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1792 DMA_FROM_DEVICE); 1793 dev_kfree_skb_any(skb); 1794 skb = NULL; 1795 } 1796 1797 kfree(queue->rx_skbuff); 1798 queue->rx_skbuff = NULL; 1799 } 1800 } 1801 1802 static void macb_free_rx_buffers(struct macb *bp) 1803 { 1804 struct macb_queue *queue = &bp->queues[0]; 1805 1806 if (queue->rx_buffers) { 1807 dma_free_coherent(&bp->pdev->dev, 1808 bp->rx_ring_size * bp->rx_buffer_size, 1809 queue->rx_buffers, queue->rx_buffers_dma); 1810 queue->rx_buffers = NULL; 1811 } 1812 } 1813 1814 static void macb_free_consistent(struct macb *bp) 1815 { 1816 struct macb_queue *queue; 1817 unsigned int q; 1818 1819 queue = &bp->queues[0]; 1820 bp->macbgem_ops.mog_free_rx_buffers(bp); 1821 if (queue->rx_ring) { 1822 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), 1823 queue->rx_ring, queue->rx_ring_dma); 1824 queue->rx_ring = NULL; 1825 } 1826 1827 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1828 kfree(queue->tx_skb); 1829 queue->tx_skb = NULL; 1830 if (queue->tx_ring) { 1831 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), 1832 queue->tx_ring, queue->tx_ring_dma); 1833 queue->tx_ring = NULL; 1834 } 1835 } 1836 } 1837 1838 static int gem_alloc_rx_buffers(struct macb *bp) 1839 { 1840 struct macb_queue *queue; 1841 unsigned int q; 1842 int size; 1843 1844 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1845 size = bp->rx_ring_size * sizeof(struct sk_buff *); 1846 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 1847 if (!queue->rx_skbuff) 1848 return -ENOMEM; 1849 else 1850 netdev_dbg(bp->dev, 1851 "Allocated %d RX struct sk_buff entries at %p\n", 1852 bp->rx_ring_size, queue->rx_skbuff); 1853 } 1854 return 0; 1855 } 1856 1857 static int macb_alloc_rx_buffers(struct macb *bp) 1858 { 1859 struct macb_queue *queue = &bp->queues[0]; 1860 int size; 1861 1862 size = bp->rx_ring_size * bp->rx_buffer_size; 1863 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 1864 &queue->rx_buffers_dma, GFP_KERNEL); 1865 if (!queue->rx_buffers) 1866 return -ENOMEM; 1867 1868 netdev_dbg(bp->dev, 1869 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1870 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 1871 return 0; 1872 } 1873 1874 static int macb_alloc_consistent(struct macb *bp) 1875 { 1876 struct macb_queue *queue; 1877 unsigned int q; 1878 int size; 1879 1880 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1881 size = TX_RING_BYTES(bp); 1882 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1883 &queue->tx_ring_dma, 1884 GFP_KERNEL); 1885 if (!queue->tx_ring) 1886 goto out_err; 1887 netdev_dbg(bp->dev, 1888 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 1889 q, size, (unsigned long)queue->tx_ring_dma, 1890 queue->tx_ring); 1891 1892 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 1893 queue->tx_skb = kmalloc(size, GFP_KERNEL); 1894 if (!queue->tx_skb) 1895 goto out_err; 1896 1897 size = RX_RING_BYTES(bp); 1898 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1899 &queue->rx_ring_dma, GFP_KERNEL); 1900 if (!queue->rx_ring) 1901 goto out_err; 1902 netdev_dbg(bp->dev, 1903 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1904 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 1905 } 1906 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1907 goto out_err; 1908 1909 return 0; 1910 1911 out_err: 1912 macb_free_consistent(bp); 1913 return -ENOMEM; 1914 } 1915 1916 static void gem_init_rings(struct macb *bp) 1917 { 1918 struct macb_queue *queue; 1919 struct macb_dma_desc *desc = NULL; 1920 unsigned int q; 1921 int i; 1922 1923 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1924 for (i = 0; i < bp->tx_ring_size; i++) { 1925 desc = macb_tx_desc(queue, i); 1926 macb_set_addr(bp, desc, 0); 1927 desc->ctrl = MACB_BIT(TX_USED); 1928 } 1929 desc->ctrl |= MACB_BIT(TX_WRAP); 1930 queue->tx_head = 0; 1931 queue->tx_tail = 0; 1932 1933 queue->rx_tail = 0; 1934 queue->rx_prepared_head = 0; 1935 1936 gem_rx_refill(queue); 1937 } 1938 1939 } 1940 1941 static void macb_init_rings(struct macb *bp) 1942 { 1943 int i; 1944 struct macb_dma_desc *desc = NULL; 1945 1946 macb_init_rx_ring(&bp->queues[0]); 1947 1948 for (i = 0; i < bp->tx_ring_size; i++) { 1949 desc = macb_tx_desc(&bp->queues[0], i); 1950 macb_set_addr(bp, desc, 0); 1951 desc->ctrl = MACB_BIT(TX_USED); 1952 } 1953 bp->queues[0].tx_head = 0; 1954 bp->queues[0].tx_tail = 0; 1955 desc->ctrl |= MACB_BIT(TX_WRAP); 1956 } 1957 1958 static void macb_reset_hw(struct macb *bp) 1959 { 1960 struct macb_queue *queue; 1961 unsigned int q; 1962 1963 /* Disable RX and TX (XXX: Should we halt the transmission 1964 * more gracefully?) 1965 */ 1966 macb_writel(bp, NCR, 0); 1967 1968 /* Clear the stats registers (XXX: Update stats first?) */ 1969 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 1970 1971 /* Clear all status flags */ 1972 macb_writel(bp, TSR, -1); 1973 macb_writel(bp, RSR, -1); 1974 1975 /* Disable all interrupts */ 1976 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1977 queue_writel(queue, IDR, -1); 1978 queue_readl(queue, ISR); 1979 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1980 queue_writel(queue, ISR, -1); 1981 } 1982 } 1983 1984 static u32 gem_mdc_clk_div(struct macb *bp) 1985 { 1986 u32 config; 1987 unsigned long pclk_hz = clk_get_rate(bp->pclk); 1988 1989 if (pclk_hz <= 20000000) 1990 config = GEM_BF(CLK, GEM_CLK_DIV8); 1991 else if (pclk_hz <= 40000000) 1992 config = GEM_BF(CLK, GEM_CLK_DIV16); 1993 else if (pclk_hz <= 80000000) 1994 config = GEM_BF(CLK, GEM_CLK_DIV32); 1995 else if (pclk_hz <= 120000000) 1996 config = GEM_BF(CLK, GEM_CLK_DIV48); 1997 else if (pclk_hz <= 160000000) 1998 config = GEM_BF(CLK, GEM_CLK_DIV64); 1999 else 2000 config = GEM_BF(CLK, GEM_CLK_DIV96); 2001 2002 return config; 2003 } 2004 2005 static u32 macb_mdc_clk_div(struct macb *bp) 2006 { 2007 u32 config; 2008 unsigned long pclk_hz; 2009 2010 if (macb_is_gem(bp)) 2011 return gem_mdc_clk_div(bp); 2012 2013 pclk_hz = clk_get_rate(bp->pclk); 2014 if (pclk_hz <= 20000000) 2015 config = MACB_BF(CLK, MACB_CLK_DIV8); 2016 else if (pclk_hz <= 40000000) 2017 config = MACB_BF(CLK, MACB_CLK_DIV16); 2018 else if (pclk_hz <= 80000000) 2019 config = MACB_BF(CLK, MACB_CLK_DIV32); 2020 else 2021 config = MACB_BF(CLK, MACB_CLK_DIV64); 2022 2023 return config; 2024 } 2025 2026 /* Get the DMA bus width field of the network configuration register that we 2027 * should program. We find the width from decoding the design configuration 2028 * register to find the maximum supported data bus width. 2029 */ 2030 static u32 macb_dbw(struct macb *bp) 2031 { 2032 if (!macb_is_gem(bp)) 2033 return 0; 2034 2035 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2036 case 4: 2037 return GEM_BF(DBW, GEM_DBW128); 2038 case 2: 2039 return GEM_BF(DBW, GEM_DBW64); 2040 case 1: 2041 default: 2042 return GEM_BF(DBW, GEM_DBW32); 2043 } 2044 } 2045 2046 /* Configure the receive DMA engine 2047 * - use the correct receive buffer size 2048 * - set best burst length for DMA operations 2049 * (if not supported by FIFO, it will fallback to default) 2050 * - set both rx/tx packet buffers to full memory size 2051 * These are configurable parameters for GEM. 2052 */ 2053 static void macb_configure_dma(struct macb *bp) 2054 { 2055 struct macb_queue *queue; 2056 u32 buffer_size; 2057 unsigned int q; 2058 u32 dmacfg; 2059 2060 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2061 if (macb_is_gem(bp)) { 2062 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2063 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2064 if (q) 2065 queue_writel(queue, RBQS, buffer_size); 2066 else 2067 dmacfg |= GEM_BF(RXBS, buffer_size); 2068 } 2069 if (bp->dma_burst_length) 2070 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2071 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2072 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2073 2074 if (bp->native_io) 2075 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2076 else 2077 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2078 2079 if (bp->dev->features & NETIF_F_HW_CSUM) 2080 dmacfg |= GEM_BIT(TXCOEN); 2081 else 2082 dmacfg &= ~GEM_BIT(TXCOEN); 2083 2084 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2085 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2086 dmacfg |= GEM_BIT(ADDR64); 2087 #endif 2088 #ifdef CONFIG_MACB_USE_HWSTAMP 2089 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2090 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2091 #endif 2092 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2093 dmacfg); 2094 gem_writel(bp, DMACFG, dmacfg); 2095 } 2096 } 2097 2098 static void macb_init_hw(struct macb *bp) 2099 { 2100 struct macb_queue *queue; 2101 unsigned int q; 2102 2103 u32 config; 2104 2105 macb_reset_hw(bp); 2106 macb_set_hwaddr(bp); 2107 2108 config = macb_mdc_clk_div(bp); 2109 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2110 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2111 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2112 config |= MACB_BIT(PAE); /* PAuse Enable */ 2113 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2114 if (bp->caps & MACB_CAPS_JUMBO) 2115 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2116 else 2117 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2118 if (bp->dev->flags & IFF_PROMISC) 2119 config |= MACB_BIT(CAF); /* Copy All Frames */ 2120 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2121 config |= GEM_BIT(RXCOEN); 2122 if (!(bp->dev->flags & IFF_BROADCAST)) 2123 config |= MACB_BIT(NBC); /* No BroadCast */ 2124 config |= macb_dbw(bp); 2125 macb_writel(bp, NCFGR, config); 2126 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2127 gem_writel(bp, JML, bp->jumbo_max_len); 2128 bp->speed = SPEED_10; 2129 bp->duplex = DUPLEX_HALF; 2130 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2131 if (bp->caps & MACB_CAPS_JUMBO) 2132 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2133 2134 macb_configure_dma(bp); 2135 2136 /* Initialize TX and RX buffers */ 2137 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2138 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 2139 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2140 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2141 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); 2142 #endif 2143 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 2144 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2145 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2146 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 2147 #endif 2148 2149 /* Enable interrupts */ 2150 queue_writel(queue, IER, 2151 MACB_RX_INT_FLAGS | 2152 MACB_TX_INT_FLAGS | 2153 MACB_BIT(HRESP)); 2154 } 2155 2156 /* Enable TX and RX */ 2157 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 2158 } 2159 2160 /* The hash address register is 64 bits long and takes up two 2161 * locations in the memory map. The least significant bits are stored 2162 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2163 * 2164 * The unicast hash enable and the multicast hash enable bits in the 2165 * network configuration register enable the reception of hash matched 2166 * frames. The destination address is reduced to a 6 bit index into 2167 * the 64 bit hash register using the following hash function. The 2168 * hash function is an exclusive or of every sixth bit of the 2169 * destination address. 2170 * 2171 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2172 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2173 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2174 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2175 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2176 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2177 * 2178 * da[0] represents the least significant bit of the first byte 2179 * received, that is, the multicast/unicast indicator, and da[47] 2180 * represents the most significant bit of the last byte received. If 2181 * the hash index, hi[n], points to a bit that is set in the hash 2182 * register then the frame will be matched according to whether the 2183 * frame is multicast or unicast. A multicast match will be signalled 2184 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2185 * index points to a bit set in the hash register. A unicast match 2186 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2187 * and the hash index points to a bit set in the hash register. To 2188 * receive all multicast frames, the hash register should be set with 2189 * all ones and the multicast hash enable bit should be set in the 2190 * network configuration register. 2191 */ 2192 2193 static inline int hash_bit_value(int bitnr, __u8 *addr) 2194 { 2195 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2196 return 1; 2197 return 0; 2198 } 2199 2200 /* Return the hash index value for the specified address. */ 2201 static int hash_get_index(__u8 *addr) 2202 { 2203 int i, j, bitval; 2204 int hash_index = 0; 2205 2206 for (j = 0; j < 6; j++) { 2207 for (i = 0, bitval = 0; i < 8; i++) 2208 bitval ^= hash_bit_value(i * 6 + j, addr); 2209 2210 hash_index |= (bitval << j); 2211 } 2212 2213 return hash_index; 2214 } 2215 2216 /* Add multicast addresses to the internal multicast-hash table. */ 2217 static void macb_sethashtable(struct net_device *dev) 2218 { 2219 struct netdev_hw_addr *ha; 2220 unsigned long mc_filter[2]; 2221 unsigned int bitnr; 2222 struct macb *bp = netdev_priv(dev); 2223 2224 mc_filter[0] = 0; 2225 mc_filter[1] = 0; 2226 2227 netdev_for_each_mc_addr(ha, dev) { 2228 bitnr = hash_get_index(ha->addr); 2229 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2230 } 2231 2232 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2233 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2234 } 2235 2236 /* Enable/Disable promiscuous and multicast modes. */ 2237 static void macb_set_rx_mode(struct net_device *dev) 2238 { 2239 unsigned long cfg; 2240 struct macb *bp = netdev_priv(dev); 2241 2242 cfg = macb_readl(bp, NCFGR); 2243 2244 if (dev->flags & IFF_PROMISC) { 2245 /* Enable promiscuous mode */ 2246 cfg |= MACB_BIT(CAF); 2247 2248 /* Disable RX checksum offload */ 2249 if (macb_is_gem(bp)) 2250 cfg &= ~GEM_BIT(RXCOEN); 2251 } else { 2252 /* Disable promiscuous mode */ 2253 cfg &= ~MACB_BIT(CAF); 2254 2255 /* Enable RX checksum offload only if requested */ 2256 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2257 cfg |= GEM_BIT(RXCOEN); 2258 } 2259 2260 if (dev->flags & IFF_ALLMULTI) { 2261 /* Enable all multicast mode */ 2262 macb_or_gem_writel(bp, HRB, -1); 2263 macb_or_gem_writel(bp, HRT, -1); 2264 cfg |= MACB_BIT(NCFGR_MTI); 2265 } else if (!netdev_mc_empty(dev)) { 2266 /* Enable specific multicasts */ 2267 macb_sethashtable(dev); 2268 cfg |= MACB_BIT(NCFGR_MTI); 2269 } else if (dev->flags & (~IFF_ALLMULTI)) { 2270 /* Disable all multicast mode */ 2271 macb_or_gem_writel(bp, HRB, 0); 2272 macb_or_gem_writel(bp, HRT, 0); 2273 cfg &= ~MACB_BIT(NCFGR_MTI); 2274 } 2275 2276 macb_writel(bp, NCFGR, cfg); 2277 } 2278 2279 static int macb_open(struct net_device *dev) 2280 { 2281 struct macb *bp = netdev_priv(dev); 2282 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2283 struct macb_queue *queue; 2284 unsigned int q; 2285 int err; 2286 2287 netdev_dbg(bp->dev, "open\n"); 2288 2289 /* carrier starts down */ 2290 netif_carrier_off(dev); 2291 2292 /* if the phy is not yet register, retry later*/ 2293 if (!dev->phydev) 2294 return -EAGAIN; 2295 2296 /* RX buffers initialization */ 2297 macb_init_rx_buffer_size(bp, bufsz); 2298 2299 err = macb_alloc_consistent(bp); 2300 if (err) { 2301 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2302 err); 2303 return err; 2304 } 2305 2306 bp->macbgem_ops.mog_init_rings(bp); 2307 macb_init_hw(bp); 2308 2309 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2310 napi_enable(&queue->napi); 2311 2312 /* schedule a link state check */ 2313 phy_start(dev->phydev); 2314 2315 netif_tx_start_all_queues(dev); 2316 2317 if (bp->ptp_info) 2318 bp->ptp_info->ptp_init(dev); 2319 2320 return 0; 2321 } 2322 2323 static int macb_close(struct net_device *dev) 2324 { 2325 struct macb *bp = netdev_priv(dev); 2326 struct macb_queue *queue; 2327 unsigned long flags; 2328 unsigned int q; 2329 2330 netif_tx_stop_all_queues(dev); 2331 2332 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2333 napi_disable(&queue->napi); 2334 2335 if (dev->phydev) 2336 phy_stop(dev->phydev); 2337 2338 spin_lock_irqsave(&bp->lock, flags); 2339 macb_reset_hw(bp); 2340 netif_carrier_off(dev); 2341 spin_unlock_irqrestore(&bp->lock, flags); 2342 2343 macb_free_consistent(bp); 2344 2345 if (bp->ptp_info) 2346 bp->ptp_info->ptp_remove(dev); 2347 2348 return 0; 2349 } 2350 2351 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2352 { 2353 if (netif_running(dev)) 2354 return -EBUSY; 2355 2356 dev->mtu = new_mtu; 2357 2358 return 0; 2359 } 2360 2361 static void gem_update_stats(struct macb *bp) 2362 { 2363 struct macb_queue *queue; 2364 unsigned int i, q, idx; 2365 unsigned long *stat; 2366 2367 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2368 2369 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2370 u32 offset = gem_statistics[i].offset; 2371 u64 val = bp->macb_reg_readl(bp, offset); 2372 2373 bp->ethtool_stats[i] += val; 2374 *p += val; 2375 2376 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2377 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2378 val = bp->macb_reg_readl(bp, offset + 4); 2379 bp->ethtool_stats[i] += ((u64)val) << 32; 2380 *(++p) += val; 2381 } 2382 } 2383 2384 idx = GEM_STATS_LEN; 2385 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2386 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2387 bp->ethtool_stats[idx++] = *stat; 2388 } 2389 2390 static struct net_device_stats *gem_get_stats(struct macb *bp) 2391 { 2392 struct gem_stats *hwstat = &bp->hw_stats.gem; 2393 struct net_device_stats *nstat = &bp->dev->stats; 2394 2395 gem_update_stats(bp); 2396 2397 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2398 hwstat->rx_alignment_errors + 2399 hwstat->rx_resource_errors + 2400 hwstat->rx_overruns + 2401 hwstat->rx_oversize_frames + 2402 hwstat->rx_jabbers + 2403 hwstat->rx_undersized_frames + 2404 hwstat->rx_length_field_frame_errors); 2405 nstat->tx_errors = (hwstat->tx_late_collisions + 2406 hwstat->tx_excessive_collisions + 2407 hwstat->tx_underrun + 2408 hwstat->tx_carrier_sense_errors); 2409 nstat->multicast = hwstat->rx_multicast_frames; 2410 nstat->collisions = (hwstat->tx_single_collision_frames + 2411 hwstat->tx_multiple_collision_frames + 2412 hwstat->tx_excessive_collisions); 2413 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2414 hwstat->rx_jabbers + 2415 hwstat->rx_undersized_frames + 2416 hwstat->rx_length_field_frame_errors); 2417 nstat->rx_over_errors = hwstat->rx_resource_errors; 2418 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2419 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2420 nstat->rx_fifo_errors = hwstat->rx_overruns; 2421 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2422 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2423 nstat->tx_fifo_errors = hwstat->tx_underrun; 2424 2425 return nstat; 2426 } 2427 2428 static void gem_get_ethtool_stats(struct net_device *dev, 2429 struct ethtool_stats *stats, u64 *data) 2430 { 2431 struct macb *bp; 2432 2433 bp = netdev_priv(dev); 2434 gem_update_stats(bp); 2435 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2436 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2437 } 2438 2439 static int gem_get_sset_count(struct net_device *dev, int sset) 2440 { 2441 struct macb *bp = netdev_priv(dev); 2442 2443 switch (sset) { 2444 case ETH_SS_STATS: 2445 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2446 default: 2447 return -EOPNOTSUPP; 2448 } 2449 } 2450 2451 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2452 { 2453 char stat_string[ETH_GSTRING_LEN]; 2454 struct macb *bp = netdev_priv(dev); 2455 struct macb_queue *queue; 2456 unsigned int i; 2457 unsigned int q; 2458 2459 switch (sset) { 2460 case ETH_SS_STATS: 2461 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2462 memcpy(p, gem_statistics[i].stat_string, 2463 ETH_GSTRING_LEN); 2464 2465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2466 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2467 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2468 q, queue_statistics[i].stat_string); 2469 memcpy(p, stat_string, ETH_GSTRING_LEN); 2470 } 2471 } 2472 break; 2473 } 2474 } 2475 2476 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2477 { 2478 struct macb *bp = netdev_priv(dev); 2479 struct net_device_stats *nstat = &bp->dev->stats; 2480 struct macb_stats *hwstat = &bp->hw_stats.macb; 2481 2482 if (macb_is_gem(bp)) 2483 return gem_get_stats(bp); 2484 2485 /* read stats from hardware */ 2486 macb_update_stats(bp); 2487 2488 /* Convert HW stats into netdevice stats */ 2489 nstat->rx_errors = (hwstat->rx_fcs_errors + 2490 hwstat->rx_align_errors + 2491 hwstat->rx_resource_errors + 2492 hwstat->rx_overruns + 2493 hwstat->rx_oversize_pkts + 2494 hwstat->rx_jabbers + 2495 hwstat->rx_undersize_pkts + 2496 hwstat->rx_length_mismatch); 2497 nstat->tx_errors = (hwstat->tx_late_cols + 2498 hwstat->tx_excessive_cols + 2499 hwstat->tx_underruns + 2500 hwstat->tx_carrier_errors + 2501 hwstat->sqe_test_errors); 2502 nstat->collisions = (hwstat->tx_single_cols + 2503 hwstat->tx_multiple_cols + 2504 hwstat->tx_excessive_cols); 2505 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2506 hwstat->rx_jabbers + 2507 hwstat->rx_undersize_pkts + 2508 hwstat->rx_length_mismatch); 2509 nstat->rx_over_errors = hwstat->rx_resource_errors + 2510 hwstat->rx_overruns; 2511 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2512 nstat->rx_frame_errors = hwstat->rx_align_errors; 2513 nstat->rx_fifo_errors = hwstat->rx_overruns; 2514 /* XXX: What does "missed" mean? */ 2515 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2516 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2517 nstat->tx_fifo_errors = hwstat->tx_underruns; 2518 /* Don't know about heartbeat or window errors... */ 2519 2520 return nstat; 2521 } 2522 2523 static int macb_get_regs_len(struct net_device *netdev) 2524 { 2525 return MACB_GREGS_NBR * sizeof(u32); 2526 } 2527 2528 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2529 void *p) 2530 { 2531 struct macb *bp = netdev_priv(dev); 2532 unsigned int tail, head; 2533 u32 *regs_buff = p; 2534 2535 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2536 | MACB_GREGS_VERSION; 2537 2538 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2539 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2540 2541 regs_buff[0] = macb_readl(bp, NCR); 2542 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2543 regs_buff[2] = macb_readl(bp, NSR); 2544 regs_buff[3] = macb_readl(bp, TSR); 2545 regs_buff[4] = macb_readl(bp, RBQP); 2546 regs_buff[5] = macb_readl(bp, TBQP); 2547 regs_buff[6] = macb_readl(bp, RSR); 2548 regs_buff[7] = macb_readl(bp, IMR); 2549 2550 regs_buff[8] = tail; 2551 regs_buff[9] = head; 2552 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2553 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2554 2555 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2556 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2557 if (macb_is_gem(bp)) 2558 regs_buff[13] = gem_readl(bp, DMACFG); 2559 } 2560 2561 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2562 { 2563 struct macb *bp = netdev_priv(netdev); 2564 2565 wol->supported = 0; 2566 wol->wolopts = 0; 2567 2568 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2569 wol->supported = WAKE_MAGIC; 2570 2571 if (bp->wol & MACB_WOL_ENABLED) 2572 wol->wolopts |= WAKE_MAGIC; 2573 } 2574 } 2575 2576 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2577 { 2578 struct macb *bp = netdev_priv(netdev); 2579 2580 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2581 (wol->wolopts & ~WAKE_MAGIC)) 2582 return -EOPNOTSUPP; 2583 2584 if (wol->wolopts & WAKE_MAGIC) 2585 bp->wol |= MACB_WOL_ENABLED; 2586 else 2587 bp->wol &= ~MACB_WOL_ENABLED; 2588 2589 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2590 2591 return 0; 2592 } 2593 2594 static void macb_get_ringparam(struct net_device *netdev, 2595 struct ethtool_ringparam *ring) 2596 { 2597 struct macb *bp = netdev_priv(netdev); 2598 2599 ring->rx_max_pending = MAX_RX_RING_SIZE; 2600 ring->tx_max_pending = MAX_TX_RING_SIZE; 2601 2602 ring->rx_pending = bp->rx_ring_size; 2603 ring->tx_pending = bp->tx_ring_size; 2604 } 2605 2606 static int macb_set_ringparam(struct net_device *netdev, 2607 struct ethtool_ringparam *ring) 2608 { 2609 struct macb *bp = netdev_priv(netdev); 2610 u32 new_rx_size, new_tx_size; 2611 unsigned int reset = 0; 2612 2613 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2614 return -EINVAL; 2615 2616 new_rx_size = clamp_t(u32, ring->rx_pending, 2617 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2618 new_rx_size = roundup_pow_of_two(new_rx_size); 2619 2620 new_tx_size = clamp_t(u32, ring->tx_pending, 2621 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2622 new_tx_size = roundup_pow_of_two(new_tx_size); 2623 2624 if ((new_tx_size == bp->tx_ring_size) && 2625 (new_rx_size == bp->rx_ring_size)) { 2626 /* nothing to do */ 2627 return 0; 2628 } 2629 2630 if (netif_running(bp->dev)) { 2631 reset = 1; 2632 macb_close(bp->dev); 2633 } 2634 2635 bp->rx_ring_size = new_rx_size; 2636 bp->tx_ring_size = new_tx_size; 2637 2638 if (reset) 2639 macb_open(bp->dev); 2640 2641 return 0; 2642 } 2643 2644 #ifdef CONFIG_MACB_USE_HWSTAMP 2645 static unsigned int gem_get_tsu_rate(struct macb *bp) 2646 { 2647 struct clk *tsu_clk; 2648 unsigned int tsu_rate; 2649 2650 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2651 if (!IS_ERR(tsu_clk)) 2652 tsu_rate = clk_get_rate(tsu_clk); 2653 /* try pclk instead */ 2654 else if (!IS_ERR(bp->pclk)) { 2655 tsu_clk = bp->pclk; 2656 tsu_rate = clk_get_rate(tsu_clk); 2657 } else 2658 return -ENOTSUPP; 2659 return tsu_rate; 2660 } 2661 2662 static s32 gem_get_ptp_max_adj(void) 2663 { 2664 return 64000000; 2665 } 2666 2667 static int gem_get_ts_info(struct net_device *dev, 2668 struct ethtool_ts_info *info) 2669 { 2670 struct macb *bp = netdev_priv(dev); 2671 2672 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2673 ethtool_op_get_ts_info(dev, info); 2674 return 0; 2675 } 2676 2677 info->so_timestamping = 2678 SOF_TIMESTAMPING_TX_SOFTWARE | 2679 SOF_TIMESTAMPING_RX_SOFTWARE | 2680 SOF_TIMESTAMPING_SOFTWARE | 2681 SOF_TIMESTAMPING_TX_HARDWARE | 2682 SOF_TIMESTAMPING_RX_HARDWARE | 2683 SOF_TIMESTAMPING_RAW_HARDWARE; 2684 info->tx_types = 2685 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2686 (1 << HWTSTAMP_TX_OFF) | 2687 (1 << HWTSTAMP_TX_ON); 2688 info->rx_filters = 2689 (1 << HWTSTAMP_FILTER_NONE) | 2690 (1 << HWTSTAMP_FILTER_ALL); 2691 2692 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2693 2694 return 0; 2695 } 2696 2697 static struct macb_ptp_info gem_ptp_info = { 2698 .ptp_init = gem_ptp_init, 2699 .ptp_remove = gem_ptp_remove, 2700 .get_ptp_max_adj = gem_get_ptp_max_adj, 2701 .get_tsu_rate = gem_get_tsu_rate, 2702 .get_ts_info = gem_get_ts_info, 2703 .get_hwtst = gem_get_hwtst, 2704 .set_hwtst = gem_set_hwtst, 2705 }; 2706 #endif 2707 2708 static int macb_get_ts_info(struct net_device *netdev, 2709 struct ethtool_ts_info *info) 2710 { 2711 struct macb *bp = netdev_priv(netdev); 2712 2713 if (bp->ptp_info) 2714 return bp->ptp_info->get_ts_info(netdev, info); 2715 2716 return ethtool_op_get_ts_info(netdev, info); 2717 } 2718 2719 static void gem_enable_flow_filters(struct macb *bp, bool enable) 2720 { 2721 struct ethtool_rx_fs_item *item; 2722 u32 t2_scr; 2723 int num_t2_scr; 2724 2725 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 2726 2727 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2728 struct ethtool_rx_flow_spec *fs = &item->fs; 2729 struct ethtool_tcpip4_spec *tp4sp_m; 2730 2731 if (fs->location >= num_t2_scr) 2732 continue; 2733 2734 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 2735 2736 /* enable/disable screener regs for the flow entry */ 2737 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 2738 2739 /* only enable fields with no masking */ 2740 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2741 2742 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 2743 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 2744 else 2745 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 2746 2747 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 2748 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 2749 else 2750 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 2751 2752 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 2753 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 2754 else 2755 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 2756 2757 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 2758 } 2759 } 2760 2761 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 2762 { 2763 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 2764 uint16_t index = fs->location; 2765 u32 w0, w1, t2_scr; 2766 bool cmp_a = false; 2767 bool cmp_b = false; 2768 bool cmp_c = false; 2769 2770 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 2771 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2772 2773 /* ignore field if any masking set */ 2774 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 2775 /* 1st compare reg - IP source address */ 2776 w0 = 0; 2777 w1 = 0; 2778 w0 = tp4sp_v->ip4src; 2779 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2780 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2781 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 2782 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 2783 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 2784 cmp_a = true; 2785 } 2786 2787 /* ignore field if any masking set */ 2788 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 2789 /* 2nd compare reg - IP destination address */ 2790 w0 = 0; 2791 w1 = 0; 2792 w0 = tp4sp_v->ip4dst; 2793 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2794 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2795 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 2796 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 2797 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 2798 cmp_b = true; 2799 } 2800 2801 /* ignore both port fields if masking set in both */ 2802 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 2803 /* 3rd compare reg - source port, destination port */ 2804 w0 = 0; 2805 w1 = 0; 2806 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 2807 if (tp4sp_m->psrc == tp4sp_m->pdst) { 2808 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 2809 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2810 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2811 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2812 } else { 2813 /* only one port definition */ 2814 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 2815 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 2816 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 2817 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 2818 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2819 } else { /* dst port */ 2820 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2821 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 2822 } 2823 } 2824 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 2825 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 2826 cmp_c = true; 2827 } 2828 2829 t2_scr = 0; 2830 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 2831 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 2832 if (cmp_a) 2833 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 2834 if (cmp_b) 2835 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 2836 if (cmp_c) 2837 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 2838 gem_writel_n(bp, SCRT2, index, t2_scr); 2839 } 2840 2841 static int gem_add_flow_filter(struct net_device *netdev, 2842 struct ethtool_rxnfc *cmd) 2843 { 2844 struct macb *bp = netdev_priv(netdev); 2845 struct ethtool_rx_flow_spec *fs = &cmd->fs; 2846 struct ethtool_rx_fs_item *item, *newfs; 2847 unsigned long flags; 2848 int ret = -EINVAL; 2849 bool added = false; 2850 2851 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 2852 if (newfs == NULL) 2853 return -ENOMEM; 2854 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 2855 2856 netdev_dbg(netdev, 2857 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 2858 fs->flow_type, (int)fs->ring_cookie, fs->location, 2859 htonl(fs->h_u.tcp_ip4_spec.ip4src), 2860 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 2861 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 2862 2863 spin_lock_irqsave(&bp->rx_fs_lock, flags); 2864 2865 /* find correct place to add in list */ 2866 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2867 if (item->fs.location > newfs->fs.location) { 2868 list_add_tail(&newfs->list, &item->list); 2869 added = true; 2870 break; 2871 } else if (item->fs.location == fs->location) { 2872 netdev_err(netdev, "Rule not added: location %d not free!\n", 2873 fs->location); 2874 ret = -EBUSY; 2875 goto err; 2876 } 2877 } 2878 if (!added) 2879 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 2880 2881 gem_prog_cmp_regs(bp, fs); 2882 bp->rx_fs_list.count++; 2883 /* enable filtering if NTUPLE on */ 2884 if (netdev->features & NETIF_F_NTUPLE) 2885 gem_enable_flow_filters(bp, 1); 2886 2887 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2888 return 0; 2889 2890 err: 2891 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2892 kfree(newfs); 2893 return ret; 2894 } 2895 2896 static int gem_del_flow_filter(struct net_device *netdev, 2897 struct ethtool_rxnfc *cmd) 2898 { 2899 struct macb *bp = netdev_priv(netdev); 2900 struct ethtool_rx_fs_item *item; 2901 struct ethtool_rx_flow_spec *fs; 2902 unsigned long flags; 2903 2904 spin_lock_irqsave(&bp->rx_fs_lock, flags); 2905 2906 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2907 if (item->fs.location == cmd->fs.location) { 2908 /* disable screener regs for the flow entry */ 2909 fs = &(item->fs); 2910 netdev_dbg(netdev, 2911 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 2912 fs->flow_type, (int)fs->ring_cookie, fs->location, 2913 htonl(fs->h_u.tcp_ip4_spec.ip4src), 2914 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 2915 htons(fs->h_u.tcp_ip4_spec.psrc), 2916 htons(fs->h_u.tcp_ip4_spec.pdst)); 2917 2918 gem_writel_n(bp, SCRT2, fs->location, 0); 2919 2920 list_del(&item->list); 2921 bp->rx_fs_list.count--; 2922 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2923 kfree(item); 2924 return 0; 2925 } 2926 } 2927 2928 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 2929 return -EINVAL; 2930 } 2931 2932 static int gem_get_flow_entry(struct net_device *netdev, 2933 struct ethtool_rxnfc *cmd) 2934 { 2935 struct macb *bp = netdev_priv(netdev); 2936 struct ethtool_rx_fs_item *item; 2937 2938 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2939 if (item->fs.location == cmd->fs.location) { 2940 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 2941 return 0; 2942 } 2943 } 2944 return -EINVAL; 2945 } 2946 2947 static int gem_get_all_flow_entries(struct net_device *netdev, 2948 struct ethtool_rxnfc *cmd, u32 *rule_locs) 2949 { 2950 struct macb *bp = netdev_priv(netdev); 2951 struct ethtool_rx_fs_item *item; 2952 uint32_t cnt = 0; 2953 2954 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2955 if (cnt == cmd->rule_cnt) 2956 return -EMSGSIZE; 2957 rule_locs[cnt] = item->fs.location; 2958 cnt++; 2959 } 2960 cmd->data = bp->max_tuples; 2961 cmd->rule_cnt = cnt; 2962 2963 return 0; 2964 } 2965 2966 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 2967 u32 *rule_locs) 2968 { 2969 struct macb *bp = netdev_priv(netdev); 2970 int ret = 0; 2971 2972 switch (cmd->cmd) { 2973 case ETHTOOL_GRXRINGS: 2974 cmd->data = bp->num_queues; 2975 break; 2976 case ETHTOOL_GRXCLSRLCNT: 2977 cmd->rule_cnt = bp->rx_fs_list.count; 2978 break; 2979 case ETHTOOL_GRXCLSRULE: 2980 ret = gem_get_flow_entry(netdev, cmd); 2981 break; 2982 case ETHTOOL_GRXCLSRLALL: 2983 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 2984 break; 2985 default: 2986 netdev_err(netdev, 2987 "Command parameter %d is not supported\n", cmd->cmd); 2988 ret = -EOPNOTSUPP; 2989 } 2990 2991 return ret; 2992 } 2993 2994 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 2995 { 2996 struct macb *bp = netdev_priv(netdev); 2997 int ret; 2998 2999 switch (cmd->cmd) { 3000 case ETHTOOL_SRXCLSRLINS: 3001 if ((cmd->fs.location >= bp->max_tuples) 3002 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3003 ret = -EINVAL; 3004 break; 3005 } 3006 ret = gem_add_flow_filter(netdev, cmd); 3007 break; 3008 case ETHTOOL_SRXCLSRLDEL: 3009 ret = gem_del_flow_filter(netdev, cmd); 3010 break; 3011 default: 3012 netdev_err(netdev, 3013 "Command parameter %d is not supported\n", cmd->cmd); 3014 ret = -EOPNOTSUPP; 3015 } 3016 3017 return ret; 3018 } 3019 3020 static const struct ethtool_ops macb_ethtool_ops = { 3021 .get_regs_len = macb_get_regs_len, 3022 .get_regs = macb_get_regs, 3023 .get_link = ethtool_op_get_link, 3024 .get_ts_info = ethtool_op_get_ts_info, 3025 .get_wol = macb_get_wol, 3026 .set_wol = macb_set_wol, 3027 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3028 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3029 .get_ringparam = macb_get_ringparam, 3030 .set_ringparam = macb_set_ringparam, 3031 }; 3032 3033 static const struct ethtool_ops gem_ethtool_ops = { 3034 .get_regs_len = macb_get_regs_len, 3035 .get_regs = macb_get_regs, 3036 .get_link = ethtool_op_get_link, 3037 .get_ts_info = macb_get_ts_info, 3038 .get_ethtool_stats = gem_get_ethtool_stats, 3039 .get_strings = gem_get_ethtool_strings, 3040 .get_sset_count = gem_get_sset_count, 3041 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3042 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3043 .get_ringparam = macb_get_ringparam, 3044 .set_ringparam = macb_set_ringparam, 3045 .get_rxnfc = gem_get_rxnfc, 3046 .set_rxnfc = gem_set_rxnfc, 3047 }; 3048 3049 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3050 { 3051 struct phy_device *phydev = dev->phydev; 3052 struct macb *bp = netdev_priv(dev); 3053 3054 if (!netif_running(dev)) 3055 return -EINVAL; 3056 3057 if (!phydev) 3058 return -ENODEV; 3059 3060 if (!bp->ptp_info) 3061 return phy_mii_ioctl(phydev, rq, cmd); 3062 3063 switch (cmd) { 3064 case SIOCSHWTSTAMP: 3065 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3066 case SIOCGHWTSTAMP: 3067 return bp->ptp_info->get_hwtst(dev, rq); 3068 default: 3069 return phy_mii_ioctl(phydev, rq, cmd); 3070 } 3071 } 3072 3073 static int macb_set_features(struct net_device *netdev, 3074 netdev_features_t features) 3075 { 3076 struct macb *bp = netdev_priv(netdev); 3077 netdev_features_t changed = features ^ netdev->features; 3078 3079 /* TX checksum offload */ 3080 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { 3081 u32 dmacfg; 3082 3083 dmacfg = gem_readl(bp, DMACFG); 3084 if (features & NETIF_F_HW_CSUM) 3085 dmacfg |= GEM_BIT(TXCOEN); 3086 else 3087 dmacfg &= ~GEM_BIT(TXCOEN); 3088 gem_writel(bp, DMACFG, dmacfg); 3089 } 3090 3091 /* RX checksum offload */ 3092 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { 3093 u32 netcfg; 3094 3095 netcfg = gem_readl(bp, NCFGR); 3096 if (features & NETIF_F_RXCSUM && 3097 !(netdev->flags & IFF_PROMISC)) 3098 netcfg |= GEM_BIT(RXCOEN); 3099 else 3100 netcfg &= ~GEM_BIT(RXCOEN); 3101 gem_writel(bp, NCFGR, netcfg); 3102 } 3103 3104 /* RX Flow Filters */ 3105 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) { 3106 bool turn_on = features & NETIF_F_NTUPLE; 3107 3108 gem_enable_flow_filters(bp, turn_on); 3109 } 3110 return 0; 3111 } 3112 3113 static const struct net_device_ops macb_netdev_ops = { 3114 .ndo_open = macb_open, 3115 .ndo_stop = macb_close, 3116 .ndo_start_xmit = macb_start_xmit, 3117 .ndo_set_rx_mode = macb_set_rx_mode, 3118 .ndo_get_stats = macb_get_stats, 3119 .ndo_do_ioctl = macb_ioctl, 3120 .ndo_validate_addr = eth_validate_addr, 3121 .ndo_change_mtu = macb_change_mtu, 3122 .ndo_set_mac_address = eth_mac_addr, 3123 #ifdef CONFIG_NET_POLL_CONTROLLER 3124 .ndo_poll_controller = macb_poll_controller, 3125 #endif 3126 .ndo_set_features = macb_set_features, 3127 .ndo_features_check = macb_features_check, 3128 }; 3129 3130 /* Configure peripheral capabilities according to device tree 3131 * and integration options used 3132 */ 3133 static void macb_configure_caps(struct macb *bp, 3134 const struct macb_config *dt_conf) 3135 { 3136 u32 dcfg; 3137 3138 if (dt_conf) 3139 bp->caps = dt_conf->caps; 3140 3141 if (hw_is_gem(bp->regs, bp->native_io)) { 3142 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3143 3144 dcfg = gem_readl(bp, DCFG1); 3145 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3146 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3147 dcfg = gem_readl(bp, DCFG2); 3148 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3149 bp->caps |= MACB_CAPS_FIFO_MODE; 3150 #ifdef CONFIG_MACB_USE_HWSTAMP 3151 if (gem_has_ptp(bp)) { 3152 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3153 pr_err("GEM doesn't support hardware ptp.\n"); 3154 else { 3155 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3156 bp->ptp_info = &gem_ptp_info; 3157 } 3158 } 3159 #endif 3160 } 3161 3162 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3163 } 3164 3165 static void macb_probe_queues(void __iomem *mem, 3166 bool native_io, 3167 unsigned int *queue_mask, 3168 unsigned int *num_queues) 3169 { 3170 unsigned int hw_q; 3171 3172 *queue_mask = 0x1; 3173 *num_queues = 1; 3174 3175 /* is it macb or gem ? 3176 * 3177 * We need to read directly from the hardware here because 3178 * we are early in the probe process and don't have the 3179 * MACB_CAPS_MACB_IS_GEM flag positioned 3180 */ 3181 if (!hw_is_gem(mem, native_io)) 3182 return; 3183 3184 /* bit 0 is never set but queue 0 always exists */ 3185 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 3186 3187 *queue_mask |= 0x1; 3188 3189 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 3190 if (*queue_mask & (1 << hw_q)) 3191 (*num_queues)++; 3192 } 3193 3194 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3195 struct clk **hclk, struct clk **tx_clk, 3196 struct clk **rx_clk) 3197 { 3198 struct macb_platform_data *pdata; 3199 int err; 3200 3201 pdata = dev_get_platdata(&pdev->dev); 3202 if (pdata) { 3203 *pclk = pdata->pclk; 3204 *hclk = pdata->hclk; 3205 } else { 3206 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3207 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3208 } 3209 3210 if (IS_ERR(*pclk)) { 3211 err = PTR_ERR(*pclk); 3212 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 3213 return err; 3214 } 3215 3216 if (IS_ERR(*hclk)) { 3217 err = PTR_ERR(*hclk); 3218 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 3219 return err; 3220 } 3221 3222 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 3223 if (IS_ERR(*tx_clk)) 3224 *tx_clk = NULL; 3225 3226 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); 3227 if (IS_ERR(*rx_clk)) 3228 *rx_clk = NULL; 3229 3230 err = clk_prepare_enable(*pclk); 3231 if (err) { 3232 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3233 return err; 3234 } 3235 3236 err = clk_prepare_enable(*hclk); 3237 if (err) { 3238 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 3239 goto err_disable_pclk; 3240 } 3241 3242 err = clk_prepare_enable(*tx_clk); 3243 if (err) { 3244 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 3245 goto err_disable_hclk; 3246 } 3247 3248 err = clk_prepare_enable(*rx_clk); 3249 if (err) { 3250 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 3251 goto err_disable_txclk; 3252 } 3253 3254 return 0; 3255 3256 err_disable_txclk: 3257 clk_disable_unprepare(*tx_clk); 3258 3259 err_disable_hclk: 3260 clk_disable_unprepare(*hclk); 3261 3262 err_disable_pclk: 3263 clk_disable_unprepare(*pclk); 3264 3265 return err; 3266 } 3267 3268 static int macb_init(struct platform_device *pdev) 3269 { 3270 struct net_device *dev = platform_get_drvdata(pdev); 3271 unsigned int hw_q, q; 3272 struct macb *bp = netdev_priv(dev); 3273 struct macb_queue *queue; 3274 int err; 3275 u32 val, reg; 3276 3277 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3278 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3279 3280 /* set the queue register mapping once for all: queue0 has a special 3281 * register mapping but we don't want to test the queue index then 3282 * compute the corresponding register offset at run time. 3283 */ 3284 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3285 if (!(bp->queue_mask & (1 << hw_q))) 3286 continue; 3287 3288 queue = &bp->queues[q]; 3289 queue->bp = bp; 3290 netif_napi_add(dev, &queue->napi, macb_poll, 64); 3291 if (hw_q) { 3292 queue->ISR = GEM_ISR(hw_q - 1); 3293 queue->IER = GEM_IER(hw_q - 1); 3294 queue->IDR = GEM_IDR(hw_q - 1); 3295 queue->IMR = GEM_IMR(hw_q - 1); 3296 queue->TBQP = GEM_TBQP(hw_q - 1); 3297 queue->RBQP = GEM_RBQP(hw_q - 1); 3298 queue->RBQS = GEM_RBQS(hw_q - 1); 3299 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3300 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3301 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3302 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3303 } 3304 #endif 3305 } else { 3306 /* queue0 uses legacy registers */ 3307 queue->ISR = MACB_ISR; 3308 queue->IER = MACB_IER; 3309 queue->IDR = MACB_IDR; 3310 queue->IMR = MACB_IMR; 3311 queue->TBQP = MACB_TBQP; 3312 queue->RBQP = MACB_RBQP; 3313 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3314 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3315 queue->TBQPH = MACB_TBQPH; 3316 queue->RBQPH = MACB_RBQPH; 3317 } 3318 #endif 3319 } 3320 3321 /* get irq: here we use the linux queue index, not the hardware 3322 * queue index. the queue irq definitions in the device tree 3323 * must remove the optional gaps that could exist in the 3324 * hardware queue mask. 3325 */ 3326 queue->irq = platform_get_irq(pdev, q); 3327 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3328 IRQF_SHARED, dev->name, queue); 3329 if (err) { 3330 dev_err(&pdev->dev, 3331 "Unable to request IRQ %d (error %d)\n", 3332 queue->irq, err); 3333 return err; 3334 } 3335 3336 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3337 q++; 3338 } 3339 3340 dev->netdev_ops = &macb_netdev_ops; 3341 3342 /* setup appropriated routines according to adapter type */ 3343 if (macb_is_gem(bp)) { 3344 bp->max_tx_length = GEM_MAX_TX_LEN; 3345 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3346 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3347 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3348 bp->macbgem_ops.mog_rx = gem_rx; 3349 dev->ethtool_ops = &gem_ethtool_ops; 3350 } else { 3351 bp->max_tx_length = MACB_MAX_TX_LEN; 3352 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3353 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3354 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3355 bp->macbgem_ops.mog_rx = macb_rx; 3356 dev->ethtool_ops = &macb_ethtool_ops; 3357 } 3358 3359 /* Set features */ 3360 dev->hw_features = NETIF_F_SG; 3361 3362 /* Check LSO capability */ 3363 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3364 dev->hw_features |= MACB_NETIF_LSO; 3365 3366 /* Checksum offload is only available on gem with packet buffer */ 3367 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3368 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3369 if (bp->caps & MACB_CAPS_SG_DISABLED) 3370 dev->hw_features &= ~NETIF_F_SG; 3371 dev->features = dev->hw_features; 3372 3373 /* Check RX Flow Filters support. 3374 * Max Rx flows set by availability of screeners & compare regs: 3375 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3376 */ 3377 reg = gem_readl(bp, DCFG8); 3378 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3379 GEM_BFEXT(T2SCR, reg)); 3380 if (bp->max_tuples > 0) { 3381 /* also needs one ethtype match to check IPv4 */ 3382 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3383 /* program this reg now */ 3384 reg = 0; 3385 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3386 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3387 /* Filtering is supported in hw but don't enable it in kernel now */ 3388 dev->hw_features |= NETIF_F_NTUPLE; 3389 /* init Rx flow definitions */ 3390 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3391 bp->rx_fs_list.count = 0; 3392 spin_lock_init(&bp->rx_fs_lock); 3393 } else 3394 bp->max_tuples = 0; 3395 } 3396 3397 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3398 val = 0; 3399 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3400 val = GEM_BIT(RGMII); 3401 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3402 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3403 val = MACB_BIT(RMII); 3404 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3405 val = MACB_BIT(MII); 3406 3407 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3408 val |= MACB_BIT(CLKEN); 3409 3410 macb_or_gem_writel(bp, USRIO, val); 3411 } 3412 3413 /* Set MII management clock divider */ 3414 val = macb_mdc_clk_div(bp); 3415 val |= macb_dbw(bp); 3416 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3417 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3418 macb_writel(bp, NCFGR, val); 3419 3420 return 0; 3421 } 3422 3423 #if defined(CONFIG_OF) 3424 /* 1518 rounded up */ 3425 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3426 /* max number of receive buffers */ 3427 #define AT91ETHER_MAX_RX_DESCR 9 3428 3429 /* Initialize and start the Receiver and Transmit subsystems */ 3430 static int at91ether_start(struct net_device *dev) 3431 { 3432 struct macb *lp = netdev_priv(dev); 3433 struct macb_queue *q = &lp->queues[0]; 3434 struct macb_dma_desc *desc; 3435 dma_addr_t addr; 3436 u32 ctl; 3437 int i; 3438 3439 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3440 (AT91ETHER_MAX_RX_DESCR * 3441 macb_dma_desc_get_size(lp)), 3442 &q->rx_ring_dma, GFP_KERNEL); 3443 if (!q->rx_ring) 3444 return -ENOMEM; 3445 3446 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3447 AT91ETHER_MAX_RX_DESCR * 3448 AT91ETHER_MAX_RBUFF_SZ, 3449 &q->rx_buffers_dma, GFP_KERNEL); 3450 if (!q->rx_buffers) { 3451 dma_free_coherent(&lp->pdev->dev, 3452 AT91ETHER_MAX_RX_DESCR * 3453 macb_dma_desc_get_size(lp), 3454 q->rx_ring, q->rx_ring_dma); 3455 q->rx_ring = NULL; 3456 return -ENOMEM; 3457 } 3458 3459 addr = q->rx_buffers_dma; 3460 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3461 desc = macb_rx_desc(q, i); 3462 macb_set_addr(lp, desc, addr); 3463 desc->ctrl = 0; 3464 addr += AT91ETHER_MAX_RBUFF_SZ; 3465 } 3466 3467 /* Set the Wrap bit on the last descriptor */ 3468 desc->addr |= MACB_BIT(RX_WRAP); 3469 3470 /* Reset buffer index */ 3471 q->rx_tail = 0; 3472 3473 /* Program address of descriptor list in Rx Buffer Queue register */ 3474 macb_writel(lp, RBQP, q->rx_ring_dma); 3475 3476 /* Enable Receive and Transmit */ 3477 ctl = macb_readl(lp, NCR); 3478 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3479 3480 return 0; 3481 } 3482 3483 /* Open the ethernet interface */ 3484 static int at91ether_open(struct net_device *dev) 3485 { 3486 struct macb *lp = netdev_priv(dev); 3487 u32 ctl; 3488 int ret; 3489 3490 /* Clear internal statistics */ 3491 ctl = macb_readl(lp, NCR); 3492 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3493 3494 macb_set_hwaddr(lp); 3495 3496 ret = at91ether_start(dev); 3497 if (ret) 3498 return ret; 3499 3500 /* Enable MAC interrupts */ 3501 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3502 MACB_BIT(RXUBR) | 3503 MACB_BIT(ISR_TUND) | 3504 MACB_BIT(ISR_RLE) | 3505 MACB_BIT(TCOMP) | 3506 MACB_BIT(ISR_ROVR) | 3507 MACB_BIT(HRESP)); 3508 3509 /* schedule a link state check */ 3510 phy_start(dev->phydev); 3511 3512 netif_start_queue(dev); 3513 3514 return 0; 3515 } 3516 3517 /* Close the interface */ 3518 static int at91ether_close(struct net_device *dev) 3519 { 3520 struct macb *lp = netdev_priv(dev); 3521 struct macb_queue *q = &lp->queues[0]; 3522 u32 ctl; 3523 3524 /* Disable Receiver and Transmitter */ 3525 ctl = macb_readl(lp, NCR); 3526 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3527 3528 /* Disable MAC interrupts */ 3529 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3530 MACB_BIT(RXUBR) | 3531 MACB_BIT(ISR_TUND) | 3532 MACB_BIT(ISR_RLE) | 3533 MACB_BIT(TCOMP) | 3534 MACB_BIT(ISR_ROVR) | 3535 MACB_BIT(HRESP)); 3536 3537 netif_stop_queue(dev); 3538 3539 dma_free_coherent(&lp->pdev->dev, 3540 AT91ETHER_MAX_RX_DESCR * 3541 macb_dma_desc_get_size(lp), 3542 q->rx_ring, q->rx_ring_dma); 3543 q->rx_ring = NULL; 3544 3545 dma_free_coherent(&lp->pdev->dev, 3546 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3547 q->rx_buffers, q->rx_buffers_dma); 3548 q->rx_buffers = NULL; 3549 3550 return 0; 3551 } 3552 3553 /* Transmit packet */ 3554 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) 3555 { 3556 struct macb *lp = netdev_priv(dev); 3557 3558 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3559 netif_stop_queue(dev); 3560 3561 /* Store packet information (to free when Tx completed) */ 3562 lp->skb = skb; 3563 lp->skb_length = skb->len; 3564 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 3565 DMA_TO_DEVICE); 3566 if (dma_mapping_error(NULL, lp->skb_physaddr)) { 3567 dev_kfree_skb_any(skb); 3568 dev->stats.tx_dropped++; 3569 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3570 return NETDEV_TX_OK; 3571 } 3572 3573 /* Set address of the data in the Transmit Address register */ 3574 macb_writel(lp, TAR, lp->skb_physaddr); 3575 /* Set length of the packet in the Transmit Control register */ 3576 macb_writel(lp, TCR, skb->len); 3577 3578 } else { 3579 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3580 return NETDEV_TX_BUSY; 3581 } 3582 3583 return NETDEV_TX_OK; 3584 } 3585 3586 /* Extract received frame from buffer descriptors and sent to upper layers. 3587 * (Called from interrupt context) 3588 */ 3589 static void at91ether_rx(struct net_device *dev) 3590 { 3591 struct macb *lp = netdev_priv(dev); 3592 struct macb_queue *q = &lp->queues[0]; 3593 struct macb_dma_desc *desc; 3594 unsigned char *p_recv; 3595 struct sk_buff *skb; 3596 unsigned int pktlen; 3597 3598 desc = macb_rx_desc(q, q->rx_tail); 3599 while (desc->addr & MACB_BIT(RX_USED)) { 3600 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3601 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3602 skb = netdev_alloc_skb(dev, pktlen + 2); 3603 if (skb) { 3604 skb_reserve(skb, 2); 3605 skb_put_data(skb, p_recv, pktlen); 3606 3607 skb->protocol = eth_type_trans(skb, dev); 3608 dev->stats.rx_packets++; 3609 dev->stats.rx_bytes += pktlen; 3610 netif_rx(skb); 3611 } else { 3612 dev->stats.rx_dropped++; 3613 } 3614 3615 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3616 dev->stats.multicast++; 3617 3618 /* reset ownership bit */ 3619 desc->addr &= ~MACB_BIT(RX_USED); 3620 3621 /* wrap after last buffer */ 3622 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3623 q->rx_tail = 0; 3624 else 3625 q->rx_tail++; 3626 3627 desc = macb_rx_desc(q, q->rx_tail); 3628 } 3629 } 3630 3631 /* MAC interrupt handler */ 3632 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3633 { 3634 struct net_device *dev = dev_id; 3635 struct macb *lp = netdev_priv(dev); 3636 u32 intstatus, ctl; 3637 3638 /* MAC Interrupt Status register indicates what interrupts are pending. 3639 * It is automatically cleared once read. 3640 */ 3641 intstatus = macb_readl(lp, ISR); 3642 3643 /* Receive complete */ 3644 if (intstatus & MACB_BIT(RCOMP)) 3645 at91ether_rx(dev); 3646 3647 /* Transmit complete */ 3648 if (intstatus & MACB_BIT(TCOMP)) { 3649 /* The TCOM bit is set even if the transmission failed */ 3650 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3651 dev->stats.tx_errors++; 3652 3653 if (lp->skb) { 3654 dev_kfree_skb_irq(lp->skb); 3655 lp->skb = NULL; 3656 dma_unmap_single(NULL, lp->skb_physaddr, 3657 lp->skb_length, DMA_TO_DEVICE); 3658 dev->stats.tx_packets++; 3659 dev->stats.tx_bytes += lp->skb_length; 3660 } 3661 netif_wake_queue(dev); 3662 } 3663 3664 /* Work-around for EMAC Errata section 41.3.1 */ 3665 if (intstatus & MACB_BIT(RXUBR)) { 3666 ctl = macb_readl(lp, NCR); 3667 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 3668 wmb(); 3669 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 3670 } 3671 3672 if (intstatus & MACB_BIT(ISR_ROVR)) 3673 netdev_err(dev, "ROVR error\n"); 3674 3675 return IRQ_HANDLED; 3676 } 3677 3678 #ifdef CONFIG_NET_POLL_CONTROLLER 3679 static void at91ether_poll_controller(struct net_device *dev) 3680 { 3681 unsigned long flags; 3682 3683 local_irq_save(flags); 3684 at91ether_interrupt(dev->irq, dev); 3685 local_irq_restore(flags); 3686 } 3687 #endif 3688 3689 static const struct net_device_ops at91ether_netdev_ops = { 3690 .ndo_open = at91ether_open, 3691 .ndo_stop = at91ether_close, 3692 .ndo_start_xmit = at91ether_start_xmit, 3693 .ndo_get_stats = macb_get_stats, 3694 .ndo_set_rx_mode = macb_set_rx_mode, 3695 .ndo_set_mac_address = eth_mac_addr, 3696 .ndo_do_ioctl = macb_ioctl, 3697 .ndo_validate_addr = eth_validate_addr, 3698 #ifdef CONFIG_NET_POLL_CONTROLLER 3699 .ndo_poll_controller = at91ether_poll_controller, 3700 #endif 3701 }; 3702 3703 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 3704 struct clk **hclk, struct clk **tx_clk, 3705 struct clk **rx_clk) 3706 { 3707 int err; 3708 3709 *hclk = NULL; 3710 *tx_clk = NULL; 3711 *rx_clk = NULL; 3712 3713 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 3714 if (IS_ERR(*pclk)) 3715 return PTR_ERR(*pclk); 3716 3717 err = clk_prepare_enable(*pclk); 3718 if (err) { 3719 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3720 return err; 3721 } 3722 3723 return 0; 3724 } 3725 3726 static int at91ether_init(struct platform_device *pdev) 3727 { 3728 struct net_device *dev = platform_get_drvdata(pdev); 3729 struct macb *bp = netdev_priv(dev); 3730 int err; 3731 u32 reg; 3732 3733 dev->netdev_ops = &at91ether_netdev_ops; 3734 dev->ethtool_ops = &macb_ethtool_ops; 3735 3736 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 3737 0, dev->name, dev); 3738 if (err) 3739 return err; 3740 3741 macb_writel(bp, NCR, 0); 3742 3743 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); 3744 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 3745 reg |= MACB_BIT(RM9200_RMII); 3746 3747 macb_writel(bp, NCFGR, reg); 3748 3749 return 0; 3750 } 3751 3752 static const struct macb_config at91sam9260_config = { 3753 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3754 .clk_init = macb_clk_init, 3755 .init = macb_init, 3756 }; 3757 3758 static const struct macb_config pc302gem_config = { 3759 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3760 .dma_burst_length = 16, 3761 .clk_init = macb_clk_init, 3762 .init = macb_init, 3763 }; 3764 3765 static const struct macb_config sama5d2_config = { 3766 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3767 .dma_burst_length = 16, 3768 .clk_init = macb_clk_init, 3769 .init = macb_init, 3770 }; 3771 3772 static const struct macb_config sama5d3_config = { 3773 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 3774 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 3775 .dma_burst_length = 16, 3776 .clk_init = macb_clk_init, 3777 .init = macb_init, 3778 .jumbo_max_len = 10240, 3779 }; 3780 3781 static const struct macb_config sama5d4_config = { 3782 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 3783 .dma_burst_length = 4, 3784 .clk_init = macb_clk_init, 3785 .init = macb_init, 3786 }; 3787 3788 static const struct macb_config emac_config = { 3789 .clk_init = at91ether_clk_init, 3790 .init = at91ether_init, 3791 }; 3792 3793 static const struct macb_config np4_config = { 3794 .caps = MACB_CAPS_USRIO_DISABLED, 3795 .clk_init = macb_clk_init, 3796 .init = macb_init, 3797 }; 3798 3799 static const struct macb_config zynqmp_config = { 3800 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3801 MACB_CAPS_JUMBO | 3802 MACB_CAPS_GEM_HAS_PTP, 3803 .dma_burst_length = 16, 3804 .clk_init = macb_clk_init, 3805 .init = macb_init, 3806 .jumbo_max_len = 10240, 3807 }; 3808 3809 static const struct macb_config zynq_config = { 3810 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3811 .dma_burst_length = 16, 3812 .clk_init = macb_clk_init, 3813 .init = macb_init, 3814 }; 3815 3816 static const struct of_device_id macb_dt_ids[] = { 3817 { .compatible = "cdns,at32ap7000-macb" }, 3818 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 3819 { .compatible = "cdns,macb" }, 3820 { .compatible = "cdns,np4-macb", .data = &np4_config }, 3821 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 3822 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3823 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3824 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3825 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3826 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3827 { .compatible = "cdns,emac", .data = &emac_config }, 3828 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 3829 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 3830 { /* sentinel */ } 3831 }; 3832 MODULE_DEVICE_TABLE(of, macb_dt_ids); 3833 #endif /* CONFIG_OF */ 3834 3835 static const struct macb_config default_gem_config = { 3836 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3837 MACB_CAPS_JUMBO | 3838 MACB_CAPS_GEM_HAS_PTP, 3839 .dma_burst_length = 16, 3840 .clk_init = macb_clk_init, 3841 .init = macb_init, 3842 .jumbo_max_len = 10240, 3843 }; 3844 3845 static int macb_probe(struct platform_device *pdev) 3846 { 3847 const struct macb_config *macb_config = &default_gem_config; 3848 int (*clk_init)(struct platform_device *, struct clk **, 3849 struct clk **, struct clk **, struct clk **) 3850 = macb_config->clk_init; 3851 int (*init)(struct platform_device *) = macb_config->init; 3852 struct device_node *np = pdev->dev.of_node; 3853 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 3854 unsigned int queue_mask, num_queues; 3855 struct macb_platform_data *pdata; 3856 bool native_io; 3857 struct phy_device *phydev; 3858 struct net_device *dev; 3859 struct resource *regs; 3860 void __iomem *mem; 3861 const char *mac; 3862 struct macb *bp; 3863 int err; 3864 3865 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3866 mem = devm_ioremap_resource(&pdev->dev, regs); 3867 if (IS_ERR(mem)) 3868 return PTR_ERR(mem); 3869 3870 if (np) { 3871 const struct of_device_id *match; 3872 3873 match = of_match_node(macb_dt_ids, np); 3874 if (match && match->data) { 3875 macb_config = match->data; 3876 clk_init = macb_config->clk_init; 3877 init = macb_config->init; 3878 } 3879 } 3880 3881 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); 3882 if (err) 3883 return err; 3884 3885 native_io = hw_is_native_io(mem); 3886 3887 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 3888 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 3889 if (!dev) { 3890 err = -ENOMEM; 3891 goto err_disable_clocks; 3892 } 3893 3894 dev->base_addr = regs->start; 3895 3896 SET_NETDEV_DEV(dev, &pdev->dev); 3897 3898 bp = netdev_priv(dev); 3899 bp->pdev = pdev; 3900 bp->dev = dev; 3901 bp->regs = mem; 3902 bp->native_io = native_io; 3903 if (native_io) { 3904 bp->macb_reg_readl = hw_readl_native; 3905 bp->macb_reg_writel = hw_writel_native; 3906 } else { 3907 bp->macb_reg_readl = hw_readl; 3908 bp->macb_reg_writel = hw_writel; 3909 } 3910 bp->num_queues = num_queues; 3911 bp->queue_mask = queue_mask; 3912 if (macb_config) 3913 bp->dma_burst_length = macb_config->dma_burst_length; 3914 bp->pclk = pclk; 3915 bp->hclk = hclk; 3916 bp->tx_clk = tx_clk; 3917 bp->rx_clk = rx_clk; 3918 if (macb_config) 3919 bp->jumbo_max_len = macb_config->jumbo_max_len; 3920 3921 bp->wol = 0; 3922 if (of_get_property(np, "magic-packet", NULL)) 3923 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 3924 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3925 3926 spin_lock_init(&bp->lock); 3927 3928 /* setup capabilities */ 3929 macb_configure_caps(bp, macb_config); 3930 3931 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3932 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 3933 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3934 bp->hw_dma_cap |= HW_DMA_CAP_64B; 3935 } 3936 #endif 3937 platform_set_drvdata(pdev, dev); 3938 3939 dev->irq = platform_get_irq(pdev, 0); 3940 if (dev->irq < 0) { 3941 err = dev->irq; 3942 goto err_out_free_netdev; 3943 } 3944 3945 /* MTU range: 68 - 1500 or 10240 */ 3946 dev->min_mtu = GEM_MTU_MIN_SIZE; 3947 if (bp->caps & MACB_CAPS_JUMBO) 3948 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 3949 else 3950 dev->max_mtu = ETH_DATA_LEN; 3951 3952 mac = of_get_mac_address(np); 3953 if (mac) 3954 ether_addr_copy(bp->dev->dev_addr, mac); 3955 else 3956 macb_get_hwaddr(bp); 3957 3958 err = of_get_phy_mode(np); 3959 if (err < 0) { 3960 pdata = dev_get_platdata(&pdev->dev); 3961 if (pdata && pdata->is_rmii) 3962 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 3963 else 3964 bp->phy_interface = PHY_INTERFACE_MODE_MII; 3965 } else { 3966 bp->phy_interface = err; 3967 } 3968 3969 /* IP specific init */ 3970 err = init(pdev); 3971 if (err) 3972 goto err_out_free_netdev; 3973 3974 err = macb_mii_init(bp); 3975 if (err) 3976 goto err_out_free_netdev; 3977 3978 phydev = dev->phydev; 3979 3980 netif_carrier_off(dev); 3981 3982 err = register_netdev(dev); 3983 if (err) { 3984 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 3985 goto err_out_unregister_mdio; 3986 } 3987 3988 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 3989 (unsigned long)bp); 3990 3991 phy_attached_info(phydev); 3992 3993 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 3994 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 3995 dev->base_addr, dev->irq, dev->dev_addr); 3996 3997 return 0; 3998 3999 err_out_unregister_mdio: 4000 phy_disconnect(dev->phydev); 4001 mdiobus_unregister(bp->mii_bus); 4002 of_node_put(bp->phy_node); 4003 if (np && of_phy_is_fixed_link(np)) 4004 of_phy_deregister_fixed_link(np); 4005 mdiobus_free(bp->mii_bus); 4006 4007 err_out_free_netdev: 4008 free_netdev(dev); 4009 4010 err_disable_clocks: 4011 clk_disable_unprepare(tx_clk); 4012 clk_disable_unprepare(hclk); 4013 clk_disable_unprepare(pclk); 4014 clk_disable_unprepare(rx_clk); 4015 4016 return err; 4017 } 4018 4019 static int macb_remove(struct platform_device *pdev) 4020 { 4021 struct net_device *dev; 4022 struct macb *bp; 4023 struct device_node *np = pdev->dev.of_node; 4024 4025 dev = platform_get_drvdata(pdev); 4026 4027 if (dev) { 4028 bp = netdev_priv(dev); 4029 if (dev->phydev) 4030 phy_disconnect(dev->phydev); 4031 mdiobus_unregister(bp->mii_bus); 4032 if (np && of_phy_is_fixed_link(np)) 4033 of_phy_deregister_fixed_link(np); 4034 dev->phydev = NULL; 4035 mdiobus_free(bp->mii_bus); 4036 4037 unregister_netdev(dev); 4038 clk_disable_unprepare(bp->tx_clk); 4039 clk_disable_unprepare(bp->hclk); 4040 clk_disable_unprepare(bp->pclk); 4041 clk_disable_unprepare(bp->rx_clk); 4042 of_node_put(bp->phy_node); 4043 free_netdev(dev); 4044 } 4045 4046 return 0; 4047 } 4048 4049 static int __maybe_unused macb_suspend(struct device *dev) 4050 { 4051 struct platform_device *pdev = to_platform_device(dev); 4052 struct net_device *netdev = platform_get_drvdata(pdev); 4053 struct macb *bp = netdev_priv(netdev); 4054 4055 netif_carrier_off(netdev); 4056 netif_device_detach(netdev); 4057 4058 if (bp->wol & MACB_WOL_ENABLED) { 4059 macb_writel(bp, IER, MACB_BIT(WOL)); 4060 macb_writel(bp, WOL, MACB_BIT(MAG)); 4061 enable_irq_wake(bp->queues[0].irq); 4062 } else { 4063 clk_disable_unprepare(bp->tx_clk); 4064 clk_disable_unprepare(bp->hclk); 4065 clk_disable_unprepare(bp->pclk); 4066 clk_disable_unprepare(bp->rx_clk); 4067 } 4068 4069 return 0; 4070 } 4071 4072 static int __maybe_unused macb_resume(struct device *dev) 4073 { 4074 struct platform_device *pdev = to_platform_device(dev); 4075 struct net_device *netdev = platform_get_drvdata(pdev); 4076 struct macb *bp = netdev_priv(netdev); 4077 4078 if (bp->wol & MACB_WOL_ENABLED) { 4079 macb_writel(bp, IDR, MACB_BIT(WOL)); 4080 macb_writel(bp, WOL, 0); 4081 disable_irq_wake(bp->queues[0].irq); 4082 } else { 4083 clk_prepare_enable(bp->pclk); 4084 clk_prepare_enable(bp->hclk); 4085 clk_prepare_enable(bp->tx_clk); 4086 clk_prepare_enable(bp->rx_clk); 4087 } 4088 4089 netif_device_attach(netdev); 4090 4091 return 0; 4092 } 4093 4094 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 4095 4096 static struct platform_driver macb_driver = { 4097 .probe = macb_probe, 4098 .remove = macb_remove, 4099 .driver = { 4100 .name = "macb", 4101 .of_match_table = of_match_ptr(macb_dt_ids), 4102 .pm = &macb_pm_ops, 4103 }, 4104 }; 4105 4106 module_platform_driver(macb_driver); 4107 4108 MODULE_LICENSE("GPL"); 4109 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4110 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4111 MODULE_ALIAS("platform:macb"); 4112