1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/crc32.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/circ_buf.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/interrupt.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/platform_data/macb.h> 27 #include <linux/platform_device.h> 28 #include <linux/phy.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_gpio.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/ip.h> 35 #include <linux/udp.h> 36 #include <linux/tcp.h> 37 #include <linux/iopoll.h> 38 #include <linux/pm_runtime.h> 39 #include "macb.h" 40 41 /* This structure is only used for MACB on SiFive FU540 devices */ 42 struct sifive_fu540_macb_mgmt { 43 void __iomem *reg; 44 unsigned long rate; 45 struct clk_hw hw; 46 }; 47 48 static struct sifive_fu540_macb_mgmt *mgmt; 49 50 #define MACB_RX_BUFFER_SIZE 128 51 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 52 53 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 54 #define MIN_RX_RING_SIZE 64 55 #define MAX_RX_RING_SIZE 8192 56 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 57 * (bp)->rx_ring_size) 58 59 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 60 #define MIN_TX_RING_SIZE 64 61 #define MAX_TX_RING_SIZE 4096 62 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 63 * (bp)->tx_ring_size) 64 65 /* level of occupied TX descriptors under which we wake up TX process */ 66 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 67 68 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 69 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 70 | MACB_BIT(ISR_RLE) \ 71 | MACB_BIT(TXERR)) 72 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ 73 | MACB_BIT(TXUBR)) 74 75 /* Max length of transmit frame must be a multiple of 8 bytes */ 76 #define MACB_TX_LEN_ALIGN 8 77 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 78 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 79 80 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 81 #define MACB_NETIF_LSO NETIF_F_TSO 82 83 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 84 #define MACB_WOL_ENABLED (0x1 << 1) 85 86 /* Graceful stop timeouts in us. We should allow up to 87 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 88 */ 89 #define MACB_HALT_TIMEOUT 1230 90 91 #define MACB_PM_TIMEOUT 100 /* ms */ 92 93 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ 94 95 /* DMA buffer descriptor might be different size 96 * depends on hardware configuration: 97 * 98 * 1. dma address width 32 bits: 99 * word 1: 32 bit address of Data Buffer 100 * word 2: control 101 * 102 * 2. dma address width 64 bits: 103 * word 1: 32 bit address of Data Buffer 104 * word 2: control 105 * word 3: upper 32 bit address of Data Buffer 106 * word 4: unused 107 * 108 * 3. dma address width 32 bits with hardware timestamping: 109 * word 1: 32 bit address of Data Buffer 110 * word 2: control 111 * word 3: timestamp word 1 112 * word 4: timestamp word 2 113 * 114 * 4. dma address width 64 bits with hardware timestamping: 115 * word 1: 32 bit address of Data Buffer 116 * word 2: control 117 * word 3: upper 32 bit address of Data Buffer 118 * word 4: unused 119 * word 5: timestamp word 1 120 * word 6: timestamp word 2 121 */ 122 static unsigned int macb_dma_desc_get_size(struct macb *bp) 123 { 124 #ifdef MACB_EXT_DESC 125 unsigned int desc_size; 126 127 switch (bp->hw_dma_cap) { 128 case HW_DMA_CAP_64B: 129 desc_size = sizeof(struct macb_dma_desc) 130 + sizeof(struct macb_dma_desc_64); 131 break; 132 case HW_DMA_CAP_PTP: 133 desc_size = sizeof(struct macb_dma_desc) 134 + sizeof(struct macb_dma_desc_ptp); 135 break; 136 case HW_DMA_CAP_64B_PTP: 137 desc_size = sizeof(struct macb_dma_desc) 138 + sizeof(struct macb_dma_desc_64) 139 + sizeof(struct macb_dma_desc_ptp); 140 break; 141 default: 142 desc_size = sizeof(struct macb_dma_desc); 143 } 144 return desc_size; 145 #endif 146 return sizeof(struct macb_dma_desc); 147 } 148 149 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 150 { 151 #ifdef MACB_EXT_DESC 152 switch (bp->hw_dma_cap) { 153 case HW_DMA_CAP_64B: 154 case HW_DMA_CAP_PTP: 155 desc_idx <<= 1; 156 break; 157 case HW_DMA_CAP_64B_PTP: 158 desc_idx *= 3; 159 break; 160 default: 161 break; 162 } 163 #endif 164 return desc_idx; 165 } 166 167 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 168 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 169 { 170 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 171 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); 172 return NULL; 173 } 174 #endif 175 176 /* Ring buffer accessors */ 177 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 178 { 179 return index & (bp->tx_ring_size - 1); 180 } 181 182 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 183 unsigned int index) 184 { 185 index = macb_tx_ring_wrap(queue->bp, index); 186 index = macb_adj_dma_desc_idx(queue->bp, index); 187 return &queue->tx_ring[index]; 188 } 189 190 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 191 unsigned int index) 192 { 193 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 194 } 195 196 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 197 { 198 dma_addr_t offset; 199 200 offset = macb_tx_ring_wrap(queue->bp, index) * 201 macb_dma_desc_get_size(queue->bp); 202 203 return queue->tx_ring_dma + offset; 204 } 205 206 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 207 { 208 return index & (bp->rx_ring_size - 1); 209 } 210 211 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 212 { 213 index = macb_rx_ring_wrap(queue->bp, index); 214 index = macb_adj_dma_desc_idx(queue->bp, index); 215 return &queue->rx_ring[index]; 216 } 217 218 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 219 { 220 return queue->rx_buffers + queue->bp->rx_buffer_size * 221 macb_rx_ring_wrap(queue->bp, index); 222 } 223 224 /* I/O accessors */ 225 static u32 hw_readl_native(struct macb *bp, int offset) 226 { 227 return __raw_readl(bp->regs + offset); 228 } 229 230 static void hw_writel_native(struct macb *bp, int offset, u32 value) 231 { 232 __raw_writel(value, bp->regs + offset); 233 } 234 235 static u32 hw_readl(struct macb *bp, int offset) 236 { 237 return readl_relaxed(bp->regs + offset); 238 } 239 240 static void hw_writel(struct macb *bp, int offset, u32 value) 241 { 242 writel_relaxed(value, bp->regs + offset); 243 } 244 245 /* Find the CPU endianness by using the loopback bit of NCR register. When the 246 * CPU is in big endian we need to program swapped mode for management 247 * descriptor access. 248 */ 249 static bool hw_is_native_io(void __iomem *addr) 250 { 251 u32 value = MACB_BIT(LLB); 252 253 __raw_writel(value, addr + MACB_NCR); 254 value = __raw_readl(addr + MACB_NCR); 255 256 /* Write 0 back to disable everything */ 257 __raw_writel(0, addr + MACB_NCR); 258 259 return value == MACB_BIT(LLB); 260 } 261 262 static bool hw_is_gem(void __iomem *addr, bool native_io) 263 { 264 u32 id; 265 266 if (native_io) 267 id = __raw_readl(addr + MACB_MID); 268 else 269 id = readl_relaxed(addr + MACB_MID); 270 271 return MACB_BFEXT(IDNUM, id) >= 0x2; 272 } 273 274 static void macb_set_hwaddr(struct macb *bp) 275 { 276 u32 bottom; 277 u16 top; 278 279 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 280 macb_or_gem_writel(bp, SA1B, bottom); 281 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 282 macb_or_gem_writel(bp, SA1T, top); 283 284 /* Clear unused address register sets */ 285 macb_or_gem_writel(bp, SA2B, 0); 286 macb_or_gem_writel(bp, SA2T, 0); 287 macb_or_gem_writel(bp, SA3B, 0); 288 macb_or_gem_writel(bp, SA3T, 0); 289 macb_or_gem_writel(bp, SA4B, 0); 290 macb_or_gem_writel(bp, SA4T, 0); 291 } 292 293 static void macb_get_hwaddr(struct macb *bp) 294 { 295 u32 bottom; 296 u16 top; 297 u8 addr[6]; 298 int i; 299 300 /* Check all 4 address register for valid address */ 301 for (i = 0; i < 4; i++) { 302 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 303 top = macb_or_gem_readl(bp, SA1T + i * 8); 304 305 addr[0] = bottom & 0xff; 306 addr[1] = (bottom >> 8) & 0xff; 307 addr[2] = (bottom >> 16) & 0xff; 308 addr[3] = (bottom >> 24) & 0xff; 309 addr[4] = top & 0xff; 310 addr[5] = (top >> 8) & 0xff; 311 312 if (is_valid_ether_addr(addr)) { 313 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 314 return; 315 } 316 } 317 318 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 319 eth_hw_addr_random(bp->dev); 320 } 321 322 static int macb_mdio_wait_for_idle(struct macb *bp) 323 { 324 u32 val; 325 326 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), 327 1, MACB_MDIO_TIMEOUT); 328 } 329 330 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 331 { 332 struct macb *bp = bus->priv; 333 int status; 334 335 status = pm_runtime_get_sync(&bp->pdev->dev); 336 if (status < 0) 337 goto mdio_pm_exit; 338 339 status = macb_mdio_wait_for_idle(bp); 340 if (status < 0) 341 goto mdio_read_exit; 342 343 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 344 | MACB_BF(RW, MACB_MAN_READ) 345 | MACB_BF(PHYA, mii_id) 346 | MACB_BF(REGA, regnum) 347 | MACB_BF(CODE, MACB_MAN_CODE))); 348 349 status = macb_mdio_wait_for_idle(bp); 350 if (status < 0) 351 goto mdio_read_exit; 352 353 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 354 355 mdio_read_exit: 356 pm_runtime_mark_last_busy(&bp->pdev->dev); 357 pm_runtime_put_autosuspend(&bp->pdev->dev); 358 mdio_pm_exit: 359 return status; 360 } 361 362 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 363 u16 value) 364 { 365 struct macb *bp = bus->priv; 366 int status; 367 368 status = pm_runtime_get_sync(&bp->pdev->dev); 369 if (status < 0) 370 goto mdio_pm_exit; 371 372 status = macb_mdio_wait_for_idle(bp); 373 if (status < 0) 374 goto mdio_write_exit; 375 376 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 377 | MACB_BF(RW, MACB_MAN_WRITE) 378 | MACB_BF(PHYA, mii_id) 379 | MACB_BF(REGA, regnum) 380 | MACB_BF(CODE, MACB_MAN_CODE) 381 | MACB_BF(DATA, value))); 382 383 status = macb_mdio_wait_for_idle(bp); 384 if (status < 0) 385 goto mdio_write_exit; 386 387 mdio_write_exit: 388 pm_runtime_mark_last_busy(&bp->pdev->dev); 389 pm_runtime_put_autosuspend(&bp->pdev->dev); 390 mdio_pm_exit: 391 return status; 392 } 393 394 /** 395 * macb_set_tx_clk() - Set a clock to a new frequency 396 * @clk Pointer to the clock to change 397 * @rate New frequency in Hz 398 * @dev Pointer to the struct net_device 399 */ 400 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 401 { 402 long ferr, rate, rate_rounded; 403 404 if (!clk) 405 return; 406 407 switch (speed) { 408 case SPEED_10: 409 rate = 2500000; 410 break; 411 case SPEED_100: 412 rate = 25000000; 413 break; 414 case SPEED_1000: 415 rate = 125000000; 416 break; 417 default: 418 return; 419 } 420 421 rate_rounded = clk_round_rate(clk, rate); 422 if (rate_rounded < 0) 423 return; 424 425 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 426 * is not satisfied. 427 */ 428 ferr = abs(rate_rounded - rate); 429 ferr = DIV_ROUND_UP(ferr, rate / 100000); 430 if (ferr > 5) 431 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 432 rate); 433 434 if (clk_set_rate(clk, rate_rounded)) 435 netdev_err(dev, "adjusting tx_clk failed.\n"); 436 } 437 438 static void macb_handle_link_change(struct net_device *dev) 439 { 440 struct macb *bp = netdev_priv(dev); 441 struct phy_device *phydev = dev->phydev; 442 unsigned long flags; 443 int status_change = 0; 444 445 spin_lock_irqsave(&bp->lock, flags); 446 447 if (phydev->link) { 448 if ((bp->speed != phydev->speed) || 449 (bp->duplex != phydev->duplex)) { 450 u32 reg; 451 452 reg = macb_readl(bp, NCFGR); 453 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 454 if (macb_is_gem(bp)) 455 reg &= ~GEM_BIT(GBE); 456 457 if (phydev->duplex) 458 reg |= MACB_BIT(FD); 459 if (phydev->speed == SPEED_100) 460 reg |= MACB_BIT(SPD); 461 if (phydev->speed == SPEED_1000 && 462 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 463 reg |= GEM_BIT(GBE); 464 465 macb_or_gem_writel(bp, NCFGR, reg); 466 467 bp->speed = phydev->speed; 468 bp->duplex = phydev->duplex; 469 status_change = 1; 470 } 471 } 472 473 if (phydev->link != bp->link) { 474 if (!phydev->link) { 475 bp->speed = 0; 476 bp->duplex = -1; 477 } 478 bp->link = phydev->link; 479 480 status_change = 1; 481 } 482 483 spin_unlock_irqrestore(&bp->lock, flags); 484 485 if (status_change) { 486 if (phydev->link) { 487 /* Update the TX clock rate if and only if the link is 488 * up and there has been a link change. 489 */ 490 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); 491 492 netif_carrier_on(dev); 493 netdev_info(dev, "link up (%d/%s)\n", 494 phydev->speed, 495 phydev->duplex == DUPLEX_FULL ? 496 "Full" : "Half"); 497 } else { 498 netif_carrier_off(dev); 499 netdev_info(dev, "link down\n"); 500 } 501 } 502 } 503 504 /* based on au1000_eth. c*/ 505 static int macb_mii_probe(struct net_device *dev) 506 { 507 struct macb *bp = netdev_priv(dev); 508 struct phy_device *phydev; 509 struct device_node *np; 510 int ret, i; 511 512 np = bp->pdev->dev.of_node; 513 ret = 0; 514 515 if (np) { 516 if (of_phy_is_fixed_link(np)) { 517 bp->phy_node = of_node_get(np); 518 } else { 519 bp->phy_node = of_parse_phandle(np, "phy-handle", 0); 520 /* fallback to standard phy registration if no 521 * phy-handle was found nor any phy found during 522 * dt phy registration 523 */ 524 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { 525 for (i = 0; i < PHY_MAX_ADDR; i++) { 526 phydev = mdiobus_scan(bp->mii_bus, i); 527 if (IS_ERR(phydev) && 528 PTR_ERR(phydev) != -ENODEV) { 529 ret = PTR_ERR(phydev); 530 break; 531 } 532 } 533 534 if (ret) 535 return -ENODEV; 536 } 537 } 538 } 539 540 if (bp->phy_node) { 541 phydev = of_phy_connect(dev, bp->phy_node, 542 &macb_handle_link_change, 0, 543 bp->phy_interface); 544 if (!phydev) 545 return -ENODEV; 546 } else { 547 phydev = phy_find_first(bp->mii_bus); 548 if (!phydev) { 549 netdev_err(dev, "no PHY found\n"); 550 return -ENXIO; 551 } 552 553 /* attach the mac to the phy */ 554 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 555 bp->phy_interface); 556 if (ret) { 557 netdev_err(dev, "Could not attach to PHY\n"); 558 return ret; 559 } 560 } 561 562 /* mask with MAC supported features */ 563 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) 564 phy_set_max_speed(phydev, SPEED_1000); 565 else 566 phy_set_max_speed(phydev, SPEED_100); 567 568 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) 569 phy_remove_link_mode(phydev, 570 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 571 572 bp->link = 0; 573 bp->speed = 0; 574 bp->duplex = -1; 575 576 return 0; 577 } 578 579 static int macb_mii_init(struct macb *bp) 580 { 581 struct device_node *np; 582 int err = -ENXIO; 583 584 /* Enable management port */ 585 macb_writel(bp, NCR, MACB_BIT(MPE)); 586 587 bp->mii_bus = mdiobus_alloc(); 588 if (!bp->mii_bus) { 589 err = -ENOMEM; 590 goto err_out; 591 } 592 593 bp->mii_bus->name = "MACB_mii_bus"; 594 bp->mii_bus->read = &macb_mdio_read; 595 bp->mii_bus->write = &macb_mdio_write; 596 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 597 bp->pdev->name, bp->pdev->id); 598 bp->mii_bus->priv = bp; 599 bp->mii_bus->parent = &bp->pdev->dev; 600 601 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 602 603 np = bp->pdev->dev.of_node; 604 if (np && of_phy_is_fixed_link(np)) { 605 if (of_phy_register_fixed_link(np) < 0) { 606 dev_err(&bp->pdev->dev, 607 "broken fixed-link specification %pOF\n", np); 608 goto err_out_free_mdiobus; 609 } 610 611 err = mdiobus_register(bp->mii_bus); 612 } else { 613 err = of_mdiobus_register(bp->mii_bus, np); 614 } 615 616 if (err) 617 goto err_out_free_fixed_link; 618 619 err = macb_mii_probe(bp->dev); 620 if (err) 621 goto err_out_unregister_bus; 622 623 return 0; 624 625 err_out_unregister_bus: 626 mdiobus_unregister(bp->mii_bus); 627 err_out_free_fixed_link: 628 if (np && of_phy_is_fixed_link(np)) 629 of_phy_deregister_fixed_link(np); 630 err_out_free_mdiobus: 631 of_node_put(bp->phy_node); 632 mdiobus_free(bp->mii_bus); 633 err_out: 634 return err; 635 } 636 637 static void macb_update_stats(struct macb *bp) 638 { 639 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 640 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 641 int offset = MACB_PFR; 642 643 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 644 645 for (; p < end; p++, offset += 4) 646 *p += bp->macb_reg_readl(bp, offset); 647 } 648 649 static int macb_halt_tx(struct macb *bp) 650 { 651 unsigned long halt_time, timeout; 652 u32 status; 653 654 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 655 656 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 657 do { 658 halt_time = jiffies; 659 status = macb_readl(bp, TSR); 660 if (!(status & MACB_BIT(TGO))) 661 return 0; 662 663 udelay(250); 664 } while (time_before(halt_time, timeout)); 665 666 return -ETIMEDOUT; 667 } 668 669 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 670 { 671 if (tx_skb->mapping) { 672 if (tx_skb->mapped_as_page) 673 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 674 tx_skb->size, DMA_TO_DEVICE); 675 else 676 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 677 tx_skb->size, DMA_TO_DEVICE); 678 tx_skb->mapping = 0; 679 } 680 681 if (tx_skb->skb) { 682 dev_kfree_skb_any(tx_skb->skb); 683 tx_skb->skb = NULL; 684 } 685 } 686 687 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 688 { 689 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 690 struct macb_dma_desc_64 *desc_64; 691 692 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 693 desc_64 = macb_64b_desc(bp, desc); 694 desc_64->addrh = upper_32_bits(addr); 695 /* The low bits of RX address contain the RX_USED bit, clearing 696 * of which allows packet RX. Make sure the high bits are also 697 * visible to HW at that point. 698 */ 699 dma_wmb(); 700 } 701 #endif 702 desc->addr = lower_32_bits(addr); 703 } 704 705 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 706 { 707 dma_addr_t addr = 0; 708 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 709 struct macb_dma_desc_64 *desc_64; 710 711 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 712 desc_64 = macb_64b_desc(bp, desc); 713 addr = ((u64)(desc_64->addrh) << 32); 714 } 715 #endif 716 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 717 return addr; 718 } 719 720 static void macb_tx_error_task(struct work_struct *work) 721 { 722 struct macb_queue *queue = container_of(work, struct macb_queue, 723 tx_error_task); 724 struct macb *bp = queue->bp; 725 struct macb_tx_skb *tx_skb; 726 struct macb_dma_desc *desc; 727 struct sk_buff *skb; 728 unsigned int tail; 729 unsigned long flags; 730 731 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 732 (unsigned int)(queue - bp->queues), 733 queue->tx_tail, queue->tx_head); 734 735 /* Prevent the queue IRQ handlers from running: each of them may call 736 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 737 * As explained below, we have to halt the transmission before updating 738 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 739 * network engine about the macb/gem being halted. 740 */ 741 spin_lock_irqsave(&bp->lock, flags); 742 743 /* Make sure nobody is trying to queue up new packets */ 744 netif_tx_stop_all_queues(bp->dev); 745 746 /* Stop transmission now 747 * (in case we have just queued new packets) 748 * macb/gem must be halted to write TBQP register 749 */ 750 if (macb_halt_tx(bp)) 751 /* Just complain for now, reinitializing TX path can be good */ 752 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 753 754 /* Treat frames in TX queue including the ones that caused the error. 755 * Free transmit buffers in upper layer. 756 */ 757 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 758 u32 ctrl; 759 760 desc = macb_tx_desc(queue, tail); 761 ctrl = desc->ctrl; 762 tx_skb = macb_tx_skb(queue, tail); 763 skb = tx_skb->skb; 764 765 if (ctrl & MACB_BIT(TX_USED)) { 766 /* skb is set for the last buffer of the frame */ 767 while (!skb) { 768 macb_tx_unmap(bp, tx_skb); 769 tail++; 770 tx_skb = macb_tx_skb(queue, tail); 771 skb = tx_skb->skb; 772 } 773 774 /* ctrl still refers to the first buffer descriptor 775 * since it's the only one written back by the hardware 776 */ 777 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 778 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 779 macb_tx_ring_wrap(bp, tail), 780 skb->data); 781 bp->dev->stats.tx_packets++; 782 queue->stats.tx_packets++; 783 bp->dev->stats.tx_bytes += skb->len; 784 queue->stats.tx_bytes += skb->len; 785 } 786 } else { 787 /* "Buffers exhausted mid-frame" errors may only happen 788 * if the driver is buggy, so complain loudly about 789 * those. Statistics are updated by hardware. 790 */ 791 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 792 netdev_err(bp->dev, 793 "BUG: TX buffers exhausted mid-frame\n"); 794 795 desc->ctrl = ctrl | MACB_BIT(TX_USED); 796 } 797 798 macb_tx_unmap(bp, tx_skb); 799 } 800 801 /* Set end of TX queue */ 802 desc = macb_tx_desc(queue, 0); 803 macb_set_addr(bp, desc, 0); 804 desc->ctrl = MACB_BIT(TX_USED); 805 806 /* Make descriptor updates visible to hardware */ 807 wmb(); 808 809 /* Reinitialize the TX desc queue */ 810 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 811 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 812 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 813 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 814 #endif 815 /* Make TX ring reflect state of hardware */ 816 queue->tx_head = 0; 817 queue->tx_tail = 0; 818 819 /* Housework before enabling TX IRQ */ 820 macb_writel(bp, TSR, macb_readl(bp, TSR)); 821 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 822 823 /* Now we are ready to start transmission again */ 824 netif_tx_start_all_queues(bp->dev); 825 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 826 827 spin_unlock_irqrestore(&bp->lock, flags); 828 } 829 830 static void macb_tx_interrupt(struct macb_queue *queue) 831 { 832 unsigned int tail; 833 unsigned int head; 834 u32 status; 835 struct macb *bp = queue->bp; 836 u16 queue_index = queue - bp->queues; 837 838 status = macb_readl(bp, TSR); 839 macb_writel(bp, TSR, status); 840 841 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 842 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 843 844 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 845 (unsigned long)status); 846 847 head = queue->tx_head; 848 for (tail = queue->tx_tail; tail != head; tail++) { 849 struct macb_tx_skb *tx_skb; 850 struct sk_buff *skb; 851 struct macb_dma_desc *desc; 852 u32 ctrl; 853 854 desc = macb_tx_desc(queue, tail); 855 856 /* Make hw descriptor updates visible to CPU */ 857 rmb(); 858 859 ctrl = desc->ctrl; 860 861 /* TX_USED bit is only set by hardware on the very first buffer 862 * descriptor of the transmitted frame. 863 */ 864 if (!(ctrl & MACB_BIT(TX_USED))) 865 break; 866 867 /* Process all buffers of the current transmitted frame */ 868 for (;; tail++) { 869 tx_skb = macb_tx_skb(queue, tail); 870 skb = tx_skb->skb; 871 872 /* First, update TX stats if needed */ 873 if (skb) { 874 if (unlikely(skb_shinfo(skb)->tx_flags & 875 SKBTX_HW_TSTAMP) && 876 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 877 /* skb now belongs to timestamp buffer 878 * and will be removed later 879 */ 880 tx_skb->skb = NULL; 881 } 882 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 883 macb_tx_ring_wrap(bp, tail), 884 skb->data); 885 bp->dev->stats.tx_packets++; 886 queue->stats.tx_packets++; 887 bp->dev->stats.tx_bytes += skb->len; 888 queue->stats.tx_bytes += skb->len; 889 } 890 891 /* Now we can safely release resources */ 892 macb_tx_unmap(bp, tx_skb); 893 894 /* skb is set only for the last buffer of the frame. 895 * WARNING: at this point skb has been freed by 896 * macb_tx_unmap(). 897 */ 898 if (skb) 899 break; 900 } 901 } 902 903 queue->tx_tail = tail; 904 if (__netif_subqueue_stopped(bp->dev, queue_index) && 905 CIRC_CNT(queue->tx_head, queue->tx_tail, 906 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 907 netif_wake_subqueue(bp->dev, queue_index); 908 } 909 910 static void gem_rx_refill(struct macb_queue *queue) 911 { 912 unsigned int entry; 913 struct sk_buff *skb; 914 dma_addr_t paddr; 915 struct macb *bp = queue->bp; 916 struct macb_dma_desc *desc; 917 918 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 919 bp->rx_ring_size) > 0) { 920 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 921 922 /* Make hw descriptor updates visible to CPU */ 923 rmb(); 924 925 queue->rx_prepared_head++; 926 desc = macb_rx_desc(queue, entry); 927 928 if (!queue->rx_skbuff[entry]) { 929 /* allocate sk_buff for this free entry in ring */ 930 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 931 if (unlikely(!skb)) { 932 netdev_err(bp->dev, 933 "Unable to allocate sk_buff\n"); 934 break; 935 } 936 937 /* now fill corresponding descriptor entry */ 938 paddr = dma_map_single(&bp->pdev->dev, skb->data, 939 bp->rx_buffer_size, 940 DMA_FROM_DEVICE); 941 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 942 dev_kfree_skb(skb); 943 break; 944 } 945 946 queue->rx_skbuff[entry] = skb; 947 948 if (entry == bp->rx_ring_size - 1) 949 paddr |= MACB_BIT(RX_WRAP); 950 desc->ctrl = 0; 951 /* Setting addr clears RX_USED and allows reception, 952 * make sure ctrl is cleared first to avoid a race. 953 */ 954 dma_wmb(); 955 macb_set_addr(bp, desc, paddr); 956 957 /* properly align Ethernet header */ 958 skb_reserve(skb, NET_IP_ALIGN); 959 } else { 960 desc->ctrl = 0; 961 dma_wmb(); 962 desc->addr &= ~MACB_BIT(RX_USED); 963 } 964 } 965 966 /* Make descriptor updates visible to hardware */ 967 wmb(); 968 969 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 970 queue, queue->rx_prepared_head, queue->rx_tail); 971 } 972 973 /* Mark DMA descriptors from begin up to and not including end as unused */ 974 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 975 unsigned int end) 976 { 977 unsigned int frag; 978 979 for (frag = begin; frag != end; frag++) { 980 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 981 982 desc->addr &= ~MACB_BIT(RX_USED); 983 } 984 985 /* Make descriptor updates visible to hardware */ 986 wmb(); 987 988 /* When this happens, the hardware stats registers for 989 * whatever caused this is updated, so we don't have to record 990 * anything. 991 */ 992 } 993 994 static int gem_rx(struct macb_queue *queue, int budget) 995 { 996 struct macb *bp = queue->bp; 997 unsigned int len; 998 unsigned int entry; 999 struct sk_buff *skb; 1000 struct macb_dma_desc *desc; 1001 int count = 0; 1002 1003 while (count < budget) { 1004 u32 ctrl; 1005 dma_addr_t addr; 1006 bool rxused; 1007 1008 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 1009 desc = macb_rx_desc(queue, entry); 1010 1011 /* Make hw descriptor updates visible to CPU */ 1012 rmb(); 1013 1014 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1015 addr = macb_get_addr(bp, desc); 1016 1017 if (!rxused) 1018 break; 1019 1020 /* Ensure ctrl is at least as up-to-date as rxused */ 1021 dma_rmb(); 1022 1023 ctrl = desc->ctrl; 1024 1025 queue->rx_tail++; 1026 count++; 1027 1028 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1029 netdev_err(bp->dev, 1030 "not whole frame pointed by descriptor\n"); 1031 bp->dev->stats.rx_dropped++; 1032 queue->stats.rx_dropped++; 1033 break; 1034 } 1035 skb = queue->rx_skbuff[entry]; 1036 if (unlikely(!skb)) { 1037 netdev_err(bp->dev, 1038 "inconsistent Rx descriptor chain\n"); 1039 bp->dev->stats.rx_dropped++; 1040 queue->stats.rx_dropped++; 1041 break; 1042 } 1043 /* now everything is ready for receiving packet */ 1044 queue->rx_skbuff[entry] = NULL; 1045 len = ctrl & bp->rx_frm_len_mask; 1046 1047 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1048 1049 skb_put(skb, len); 1050 dma_unmap_single(&bp->pdev->dev, addr, 1051 bp->rx_buffer_size, DMA_FROM_DEVICE); 1052 1053 skb->protocol = eth_type_trans(skb, bp->dev); 1054 skb_checksum_none_assert(skb); 1055 if (bp->dev->features & NETIF_F_RXCSUM && 1056 !(bp->dev->flags & IFF_PROMISC) && 1057 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1058 skb->ip_summed = CHECKSUM_UNNECESSARY; 1059 1060 bp->dev->stats.rx_packets++; 1061 queue->stats.rx_packets++; 1062 bp->dev->stats.rx_bytes += skb->len; 1063 queue->stats.rx_bytes += skb->len; 1064 1065 gem_ptp_do_rxstamp(bp, skb, desc); 1066 1067 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1068 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1069 skb->len, skb->csum); 1070 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1071 skb_mac_header(skb), 16, true); 1072 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1073 skb->data, 32, true); 1074 #endif 1075 1076 netif_receive_skb(skb); 1077 } 1078 1079 gem_rx_refill(queue); 1080 1081 return count; 1082 } 1083 1084 static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, 1085 unsigned int last_frag) 1086 { 1087 unsigned int len; 1088 unsigned int frag; 1089 unsigned int offset; 1090 struct sk_buff *skb; 1091 struct macb_dma_desc *desc; 1092 struct macb *bp = queue->bp; 1093 1094 desc = macb_rx_desc(queue, last_frag); 1095 len = desc->ctrl & bp->rx_frm_len_mask; 1096 1097 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1098 macb_rx_ring_wrap(bp, first_frag), 1099 macb_rx_ring_wrap(bp, last_frag), len); 1100 1101 /* The ethernet header starts NET_IP_ALIGN bytes into the 1102 * first buffer. Since the header is 14 bytes, this makes the 1103 * payload word-aligned. 1104 * 1105 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1106 * the two padding bytes into the skb so that we avoid hitting 1107 * the slowpath in memcpy(), and pull them off afterwards. 1108 */ 1109 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1110 if (!skb) { 1111 bp->dev->stats.rx_dropped++; 1112 for (frag = first_frag; ; frag++) { 1113 desc = macb_rx_desc(queue, frag); 1114 desc->addr &= ~MACB_BIT(RX_USED); 1115 if (frag == last_frag) 1116 break; 1117 } 1118 1119 /* Make descriptor updates visible to hardware */ 1120 wmb(); 1121 1122 return 1; 1123 } 1124 1125 offset = 0; 1126 len += NET_IP_ALIGN; 1127 skb_checksum_none_assert(skb); 1128 skb_put(skb, len); 1129 1130 for (frag = first_frag; ; frag++) { 1131 unsigned int frag_len = bp->rx_buffer_size; 1132 1133 if (offset + frag_len > len) { 1134 if (unlikely(frag != last_frag)) { 1135 dev_kfree_skb_any(skb); 1136 return -1; 1137 } 1138 frag_len = len - offset; 1139 } 1140 skb_copy_to_linear_data_offset(skb, offset, 1141 macb_rx_buffer(queue, frag), 1142 frag_len); 1143 offset += bp->rx_buffer_size; 1144 desc = macb_rx_desc(queue, frag); 1145 desc->addr &= ~MACB_BIT(RX_USED); 1146 1147 if (frag == last_frag) 1148 break; 1149 } 1150 1151 /* Make descriptor updates visible to hardware */ 1152 wmb(); 1153 1154 __skb_pull(skb, NET_IP_ALIGN); 1155 skb->protocol = eth_type_trans(skb, bp->dev); 1156 1157 bp->dev->stats.rx_packets++; 1158 bp->dev->stats.rx_bytes += skb->len; 1159 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1160 skb->len, skb->csum); 1161 netif_receive_skb(skb); 1162 1163 return 0; 1164 } 1165 1166 static inline void macb_init_rx_ring(struct macb_queue *queue) 1167 { 1168 struct macb *bp = queue->bp; 1169 dma_addr_t addr; 1170 struct macb_dma_desc *desc = NULL; 1171 int i; 1172 1173 addr = queue->rx_buffers_dma; 1174 for (i = 0; i < bp->rx_ring_size; i++) { 1175 desc = macb_rx_desc(queue, i); 1176 macb_set_addr(bp, desc, addr); 1177 desc->ctrl = 0; 1178 addr += bp->rx_buffer_size; 1179 } 1180 desc->addr |= MACB_BIT(RX_WRAP); 1181 queue->rx_tail = 0; 1182 } 1183 1184 static int macb_rx(struct macb_queue *queue, int budget) 1185 { 1186 struct macb *bp = queue->bp; 1187 bool reset_rx_queue = false; 1188 int received = 0; 1189 unsigned int tail; 1190 int first_frag = -1; 1191 1192 for (tail = queue->rx_tail; budget > 0; tail++) { 1193 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1194 u32 ctrl; 1195 1196 /* Make hw descriptor updates visible to CPU */ 1197 rmb(); 1198 1199 if (!(desc->addr & MACB_BIT(RX_USED))) 1200 break; 1201 1202 /* Ensure ctrl is at least as up-to-date as addr */ 1203 dma_rmb(); 1204 1205 ctrl = desc->ctrl; 1206 1207 if (ctrl & MACB_BIT(RX_SOF)) { 1208 if (first_frag != -1) 1209 discard_partial_frame(queue, first_frag, tail); 1210 first_frag = tail; 1211 } 1212 1213 if (ctrl & MACB_BIT(RX_EOF)) { 1214 int dropped; 1215 1216 if (unlikely(first_frag == -1)) { 1217 reset_rx_queue = true; 1218 continue; 1219 } 1220 1221 dropped = macb_rx_frame(queue, first_frag, tail); 1222 first_frag = -1; 1223 if (unlikely(dropped < 0)) { 1224 reset_rx_queue = true; 1225 continue; 1226 } 1227 if (!dropped) { 1228 received++; 1229 budget--; 1230 } 1231 } 1232 } 1233 1234 if (unlikely(reset_rx_queue)) { 1235 unsigned long flags; 1236 u32 ctrl; 1237 1238 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1239 1240 spin_lock_irqsave(&bp->lock, flags); 1241 1242 ctrl = macb_readl(bp, NCR); 1243 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1244 1245 macb_init_rx_ring(queue); 1246 queue_writel(queue, RBQP, queue->rx_ring_dma); 1247 1248 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1249 1250 spin_unlock_irqrestore(&bp->lock, flags); 1251 return received; 1252 } 1253 1254 if (first_frag != -1) 1255 queue->rx_tail = first_frag; 1256 else 1257 queue->rx_tail = tail; 1258 1259 return received; 1260 } 1261 1262 static int macb_poll(struct napi_struct *napi, int budget) 1263 { 1264 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1265 struct macb *bp = queue->bp; 1266 int work_done; 1267 u32 status; 1268 1269 status = macb_readl(bp, RSR); 1270 macb_writel(bp, RSR, status); 1271 1272 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1273 (unsigned long)status, budget); 1274 1275 work_done = bp->macbgem_ops.mog_rx(queue, budget); 1276 if (work_done < budget) { 1277 napi_complete_done(napi, work_done); 1278 1279 /* Packets received while interrupts were disabled */ 1280 status = macb_readl(bp, RSR); 1281 if (status) { 1282 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1283 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1284 napi_reschedule(napi); 1285 } else { 1286 queue_writel(queue, IER, bp->rx_intr_mask); 1287 } 1288 } 1289 1290 /* TODO: Handle errors */ 1291 1292 return work_done; 1293 } 1294 1295 static void macb_hresp_error_task(unsigned long data) 1296 { 1297 struct macb *bp = (struct macb *)data; 1298 struct net_device *dev = bp->dev; 1299 struct macb_queue *queue = bp->queues; 1300 unsigned int q; 1301 u32 ctrl; 1302 1303 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1304 queue_writel(queue, IDR, bp->rx_intr_mask | 1305 MACB_TX_INT_FLAGS | 1306 MACB_BIT(HRESP)); 1307 } 1308 ctrl = macb_readl(bp, NCR); 1309 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1310 macb_writel(bp, NCR, ctrl); 1311 1312 netif_tx_stop_all_queues(dev); 1313 netif_carrier_off(dev); 1314 1315 bp->macbgem_ops.mog_init_rings(bp); 1316 1317 /* Initialize TX and RX buffers */ 1318 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1319 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 1320 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1321 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1322 queue_writel(queue, RBQPH, 1323 upper_32_bits(queue->rx_ring_dma)); 1324 #endif 1325 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 1326 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1327 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1328 queue_writel(queue, TBQPH, 1329 upper_32_bits(queue->tx_ring_dma)); 1330 #endif 1331 1332 /* Enable interrupts */ 1333 queue_writel(queue, IER, 1334 bp->rx_intr_mask | 1335 MACB_TX_INT_FLAGS | 1336 MACB_BIT(HRESP)); 1337 } 1338 1339 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1340 macb_writel(bp, NCR, ctrl); 1341 1342 netif_carrier_on(dev); 1343 netif_tx_start_all_queues(dev); 1344 } 1345 1346 static void macb_tx_restart(struct macb_queue *queue) 1347 { 1348 unsigned int head = queue->tx_head; 1349 unsigned int tail = queue->tx_tail; 1350 struct macb *bp = queue->bp; 1351 1352 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1353 queue_writel(queue, ISR, MACB_BIT(TXUBR)); 1354 1355 if (head == tail) 1356 return; 1357 1358 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1359 } 1360 1361 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1362 { 1363 struct macb_queue *queue = dev_id; 1364 struct macb *bp = queue->bp; 1365 struct net_device *dev = bp->dev; 1366 u32 status, ctrl; 1367 1368 status = queue_readl(queue, ISR); 1369 1370 if (unlikely(!status)) 1371 return IRQ_NONE; 1372 1373 spin_lock(&bp->lock); 1374 1375 while (status) { 1376 /* close possible race with dev_close */ 1377 if (unlikely(!netif_running(dev))) { 1378 queue_writel(queue, IDR, -1); 1379 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1380 queue_writel(queue, ISR, -1); 1381 break; 1382 } 1383 1384 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1385 (unsigned int)(queue - bp->queues), 1386 (unsigned long)status); 1387 1388 if (status & bp->rx_intr_mask) { 1389 /* There's no point taking any more interrupts 1390 * until we have processed the buffers. The 1391 * scheduling call may fail if the poll routine 1392 * is already scheduled, so disable interrupts 1393 * now. 1394 */ 1395 queue_writel(queue, IDR, bp->rx_intr_mask); 1396 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1397 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1398 1399 if (napi_schedule_prep(&queue->napi)) { 1400 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1401 __napi_schedule(&queue->napi); 1402 } 1403 } 1404 1405 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1406 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1407 schedule_work(&queue->tx_error_task); 1408 1409 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1410 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1411 1412 break; 1413 } 1414 1415 if (status & MACB_BIT(TCOMP)) 1416 macb_tx_interrupt(queue); 1417 1418 if (status & MACB_BIT(TXUBR)) 1419 macb_tx_restart(queue); 1420 1421 /* Link change detection isn't possible with RMII, so we'll 1422 * add that if/when we get our hands on a full-blown MII PHY. 1423 */ 1424 1425 /* There is a hardware issue under heavy load where DMA can 1426 * stop, this causes endless "used buffer descriptor read" 1427 * interrupts but it can be cleared by re-enabling RX. See 1428 * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1429 * section 16.7.4 for details. RXUBR is only enabled for 1430 * these two versions. 1431 */ 1432 if (status & MACB_BIT(RXUBR)) { 1433 ctrl = macb_readl(bp, NCR); 1434 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1435 wmb(); 1436 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1437 1438 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1439 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1440 } 1441 1442 if (status & MACB_BIT(ISR_ROVR)) { 1443 /* We missed at least one packet */ 1444 if (macb_is_gem(bp)) 1445 bp->hw_stats.gem.rx_overruns++; 1446 else 1447 bp->hw_stats.macb.rx_overruns++; 1448 1449 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1450 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1451 } 1452 1453 if (status & MACB_BIT(HRESP)) { 1454 tasklet_schedule(&bp->hresp_err_tasklet); 1455 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1456 1457 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1458 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1459 } 1460 status = queue_readl(queue, ISR); 1461 } 1462 1463 spin_unlock(&bp->lock); 1464 1465 return IRQ_HANDLED; 1466 } 1467 1468 #ifdef CONFIG_NET_POLL_CONTROLLER 1469 /* Polling receive - used by netconsole and other diagnostic tools 1470 * to allow network i/o with interrupts disabled. 1471 */ 1472 static void macb_poll_controller(struct net_device *dev) 1473 { 1474 struct macb *bp = netdev_priv(dev); 1475 struct macb_queue *queue; 1476 unsigned long flags; 1477 unsigned int q; 1478 1479 local_irq_save(flags); 1480 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1481 macb_interrupt(dev->irq, queue); 1482 local_irq_restore(flags); 1483 } 1484 #endif 1485 1486 static unsigned int macb_tx_map(struct macb *bp, 1487 struct macb_queue *queue, 1488 struct sk_buff *skb, 1489 unsigned int hdrlen) 1490 { 1491 dma_addr_t mapping; 1492 unsigned int len, entry, i, tx_head = queue->tx_head; 1493 struct macb_tx_skb *tx_skb = NULL; 1494 struct macb_dma_desc *desc; 1495 unsigned int offset, size, count = 0; 1496 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1497 unsigned int eof = 1, mss_mfs = 0; 1498 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1499 1500 /* LSO */ 1501 if (skb_shinfo(skb)->gso_size != 0) { 1502 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1503 /* UDP - UFO */ 1504 lso_ctrl = MACB_LSO_UFO_ENABLE; 1505 else 1506 /* TCP - TSO */ 1507 lso_ctrl = MACB_LSO_TSO_ENABLE; 1508 } 1509 1510 /* First, map non-paged data */ 1511 len = skb_headlen(skb); 1512 1513 /* first buffer length */ 1514 size = hdrlen; 1515 1516 offset = 0; 1517 while (len) { 1518 entry = macb_tx_ring_wrap(bp, tx_head); 1519 tx_skb = &queue->tx_skb[entry]; 1520 1521 mapping = dma_map_single(&bp->pdev->dev, 1522 skb->data + offset, 1523 size, DMA_TO_DEVICE); 1524 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1525 goto dma_error; 1526 1527 /* Save info to properly release resources */ 1528 tx_skb->skb = NULL; 1529 tx_skb->mapping = mapping; 1530 tx_skb->size = size; 1531 tx_skb->mapped_as_page = false; 1532 1533 len -= size; 1534 offset += size; 1535 count++; 1536 tx_head++; 1537 1538 size = min(len, bp->max_tx_length); 1539 } 1540 1541 /* Then, map paged data from fragments */ 1542 for (f = 0; f < nr_frags; f++) { 1543 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1544 1545 len = skb_frag_size(frag); 1546 offset = 0; 1547 while (len) { 1548 size = min(len, bp->max_tx_length); 1549 entry = macb_tx_ring_wrap(bp, tx_head); 1550 tx_skb = &queue->tx_skb[entry]; 1551 1552 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1553 offset, size, DMA_TO_DEVICE); 1554 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1555 goto dma_error; 1556 1557 /* Save info to properly release resources */ 1558 tx_skb->skb = NULL; 1559 tx_skb->mapping = mapping; 1560 tx_skb->size = size; 1561 tx_skb->mapped_as_page = true; 1562 1563 len -= size; 1564 offset += size; 1565 count++; 1566 tx_head++; 1567 } 1568 } 1569 1570 /* Should never happen */ 1571 if (unlikely(!tx_skb)) { 1572 netdev_err(bp->dev, "BUG! empty skb!\n"); 1573 return 0; 1574 } 1575 1576 /* This is the last buffer of the frame: save socket buffer */ 1577 tx_skb->skb = skb; 1578 1579 /* Update TX ring: update buffer descriptors in reverse order 1580 * to avoid race condition 1581 */ 1582 1583 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1584 * to set the end of TX queue 1585 */ 1586 i = tx_head; 1587 entry = macb_tx_ring_wrap(bp, i); 1588 ctrl = MACB_BIT(TX_USED); 1589 desc = macb_tx_desc(queue, entry); 1590 desc->ctrl = ctrl; 1591 1592 if (lso_ctrl) { 1593 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1594 /* include header and FCS in value given to h/w */ 1595 mss_mfs = skb_shinfo(skb)->gso_size + 1596 skb_transport_offset(skb) + 1597 ETH_FCS_LEN; 1598 else /* TSO */ { 1599 mss_mfs = skb_shinfo(skb)->gso_size; 1600 /* TCP Sequence Number Source Select 1601 * can be set only for TSO 1602 */ 1603 seq_ctrl = 0; 1604 } 1605 } 1606 1607 do { 1608 i--; 1609 entry = macb_tx_ring_wrap(bp, i); 1610 tx_skb = &queue->tx_skb[entry]; 1611 desc = macb_tx_desc(queue, entry); 1612 1613 ctrl = (u32)tx_skb->size; 1614 if (eof) { 1615 ctrl |= MACB_BIT(TX_LAST); 1616 eof = 0; 1617 } 1618 if (unlikely(entry == (bp->tx_ring_size - 1))) 1619 ctrl |= MACB_BIT(TX_WRAP); 1620 1621 /* First descriptor is header descriptor */ 1622 if (i == queue->tx_head) { 1623 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1624 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1625 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1626 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1627 ctrl |= MACB_BIT(TX_NOCRC); 1628 } else 1629 /* Only set MSS/MFS on payload descriptors 1630 * (second or later descriptor) 1631 */ 1632 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1633 1634 /* Set TX buffer descriptor */ 1635 macb_set_addr(bp, desc, tx_skb->mapping); 1636 /* desc->addr must be visible to hardware before clearing 1637 * 'TX_USED' bit in desc->ctrl. 1638 */ 1639 wmb(); 1640 desc->ctrl = ctrl; 1641 } while (i != queue->tx_head); 1642 1643 queue->tx_head = tx_head; 1644 1645 return count; 1646 1647 dma_error: 1648 netdev_err(bp->dev, "TX DMA map failed\n"); 1649 1650 for (i = queue->tx_head; i != tx_head; i++) { 1651 tx_skb = macb_tx_skb(queue, i); 1652 1653 macb_tx_unmap(bp, tx_skb); 1654 } 1655 1656 return 0; 1657 } 1658 1659 static netdev_features_t macb_features_check(struct sk_buff *skb, 1660 struct net_device *dev, 1661 netdev_features_t features) 1662 { 1663 unsigned int nr_frags, f; 1664 unsigned int hdrlen; 1665 1666 /* Validate LSO compatibility */ 1667 1668 /* there is only one buffer */ 1669 if (!skb_is_nonlinear(skb)) 1670 return features; 1671 1672 /* length of header */ 1673 hdrlen = skb_transport_offset(skb); 1674 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1675 hdrlen += tcp_hdrlen(skb); 1676 1677 /* For LSO: 1678 * When software supplies two or more payload buffers all payload buffers 1679 * apart from the last must be a multiple of 8 bytes in size. 1680 */ 1681 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1682 return features & ~MACB_NETIF_LSO; 1683 1684 nr_frags = skb_shinfo(skb)->nr_frags; 1685 /* No need to check last fragment */ 1686 nr_frags--; 1687 for (f = 0; f < nr_frags; f++) { 1688 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1689 1690 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1691 return features & ~MACB_NETIF_LSO; 1692 } 1693 return features; 1694 } 1695 1696 static inline int macb_clear_csum(struct sk_buff *skb) 1697 { 1698 /* no change for packets without checksum offloading */ 1699 if (skb->ip_summed != CHECKSUM_PARTIAL) 1700 return 0; 1701 1702 /* make sure we can modify the header */ 1703 if (unlikely(skb_cow_head(skb, 0))) 1704 return -1; 1705 1706 /* initialize checksum field 1707 * This is required - at least for Zynq, which otherwise calculates 1708 * wrong UDP header checksums for UDP packets with UDP data len <=2 1709 */ 1710 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1711 return 0; 1712 } 1713 1714 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 1715 { 1716 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 1717 int padlen = ETH_ZLEN - (*skb)->len; 1718 int headroom = skb_headroom(*skb); 1719 int tailroom = skb_tailroom(*skb); 1720 struct sk_buff *nskb; 1721 u32 fcs; 1722 1723 if (!(ndev->features & NETIF_F_HW_CSUM) || 1724 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 1725 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 1726 return 0; 1727 1728 if (padlen <= 0) { 1729 /* FCS could be appeded to tailroom. */ 1730 if (tailroom >= ETH_FCS_LEN) 1731 goto add_fcs; 1732 /* FCS could be appeded by moving data to headroom. */ 1733 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 1734 padlen = 0; 1735 /* No room for FCS, need to reallocate skb. */ 1736 else 1737 padlen = ETH_FCS_LEN; 1738 } else { 1739 /* Add room for FCS. */ 1740 padlen += ETH_FCS_LEN; 1741 } 1742 1743 if (!cloned && headroom + tailroom >= padlen) { 1744 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 1745 skb_set_tail_pointer(*skb, (*skb)->len); 1746 } else { 1747 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 1748 if (!nskb) 1749 return -ENOMEM; 1750 1751 dev_consume_skb_any(*skb); 1752 *skb = nskb; 1753 } 1754 1755 if (padlen > ETH_FCS_LEN) 1756 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1757 1758 add_fcs: 1759 /* set FCS to packet */ 1760 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 1761 fcs = ~fcs; 1762 1763 skb_put_u8(*skb, fcs & 0xff); 1764 skb_put_u8(*skb, (fcs >> 8) & 0xff); 1765 skb_put_u8(*skb, (fcs >> 16) & 0xff); 1766 skb_put_u8(*skb, (fcs >> 24) & 0xff); 1767 1768 return 0; 1769 } 1770 1771 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1772 { 1773 u16 queue_index = skb_get_queue_mapping(skb); 1774 struct macb *bp = netdev_priv(dev); 1775 struct macb_queue *queue = &bp->queues[queue_index]; 1776 unsigned long flags; 1777 unsigned int desc_cnt, nr_frags, frag_size, f; 1778 unsigned int hdrlen; 1779 bool is_lso, is_udp = 0; 1780 netdev_tx_t ret = NETDEV_TX_OK; 1781 1782 if (macb_clear_csum(skb)) { 1783 dev_kfree_skb_any(skb); 1784 return ret; 1785 } 1786 1787 if (macb_pad_and_fcs(&skb, dev)) { 1788 dev_kfree_skb_any(skb); 1789 return ret; 1790 } 1791 1792 is_lso = (skb_shinfo(skb)->gso_size != 0); 1793 1794 if (is_lso) { 1795 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); 1796 1797 /* length of headers */ 1798 if (is_udp) 1799 /* only queue eth + ip headers separately for UDP */ 1800 hdrlen = skb_transport_offset(skb); 1801 else 1802 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1803 if (skb_headlen(skb) < hdrlen) { 1804 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1805 /* if this is required, would need to copy to single buffer */ 1806 return NETDEV_TX_BUSY; 1807 } 1808 } else 1809 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1810 1811 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1812 netdev_vdbg(bp->dev, 1813 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1814 queue_index, skb->len, skb->head, skb->data, 1815 skb_tail_pointer(skb), skb_end_pointer(skb)); 1816 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1817 skb->data, 16, true); 1818 #endif 1819 1820 /* Count how many TX buffer descriptors are needed to send this 1821 * socket buffer: skb fragments of jumbo frames may need to be 1822 * split into many buffer descriptors. 1823 */ 1824 if (is_lso && (skb_headlen(skb) > hdrlen)) 1825 /* extra header descriptor if also payload in first buffer */ 1826 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1827 else 1828 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1829 nr_frags = skb_shinfo(skb)->nr_frags; 1830 for (f = 0; f < nr_frags; f++) { 1831 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1832 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1833 } 1834 1835 spin_lock_irqsave(&bp->lock, flags); 1836 1837 /* This is a hard error, log it. */ 1838 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1839 bp->tx_ring_size) < desc_cnt) { 1840 netif_stop_subqueue(dev, queue_index); 1841 spin_unlock_irqrestore(&bp->lock, flags); 1842 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1843 queue->tx_head, queue->tx_tail); 1844 return NETDEV_TX_BUSY; 1845 } 1846 1847 /* Map socket buffer for DMA transfer */ 1848 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 1849 dev_kfree_skb_any(skb); 1850 goto unlock; 1851 } 1852 1853 /* Make newly initialized descriptor visible to hardware */ 1854 wmb(); 1855 skb_tx_timestamp(skb); 1856 1857 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1858 1859 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 1860 netif_stop_subqueue(dev, queue_index); 1861 1862 unlock: 1863 spin_unlock_irqrestore(&bp->lock, flags); 1864 1865 return ret; 1866 } 1867 1868 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 1869 { 1870 if (!macb_is_gem(bp)) { 1871 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 1872 } else { 1873 bp->rx_buffer_size = size; 1874 1875 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1876 netdev_dbg(bp->dev, 1877 "RX buffer must be multiple of %d bytes, expanding\n", 1878 RX_BUFFER_MULTIPLE); 1879 bp->rx_buffer_size = 1880 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1881 } 1882 } 1883 1884 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 1885 bp->dev->mtu, bp->rx_buffer_size); 1886 } 1887 1888 static void gem_free_rx_buffers(struct macb *bp) 1889 { 1890 struct sk_buff *skb; 1891 struct macb_dma_desc *desc; 1892 struct macb_queue *queue; 1893 dma_addr_t addr; 1894 unsigned int q; 1895 int i; 1896 1897 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1898 if (!queue->rx_skbuff) 1899 continue; 1900 1901 for (i = 0; i < bp->rx_ring_size; i++) { 1902 skb = queue->rx_skbuff[i]; 1903 1904 if (!skb) 1905 continue; 1906 1907 desc = macb_rx_desc(queue, i); 1908 addr = macb_get_addr(bp, desc); 1909 1910 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1911 DMA_FROM_DEVICE); 1912 dev_kfree_skb_any(skb); 1913 skb = NULL; 1914 } 1915 1916 kfree(queue->rx_skbuff); 1917 queue->rx_skbuff = NULL; 1918 } 1919 } 1920 1921 static void macb_free_rx_buffers(struct macb *bp) 1922 { 1923 struct macb_queue *queue = &bp->queues[0]; 1924 1925 if (queue->rx_buffers) { 1926 dma_free_coherent(&bp->pdev->dev, 1927 bp->rx_ring_size * bp->rx_buffer_size, 1928 queue->rx_buffers, queue->rx_buffers_dma); 1929 queue->rx_buffers = NULL; 1930 } 1931 } 1932 1933 static void macb_free_consistent(struct macb *bp) 1934 { 1935 struct macb_queue *queue; 1936 unsigned int q; 1937 int size; 1938 1939 bp->macbgem_ops.mog_free_rx_buffers(bp); 1940 1941 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1942 kfree(queue->tx_skb); 1943 queue->tx_skb = NULL; 1944 if (queue->tx_ring) { 1945 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 1946 dma_free_coherent(&bp->pdev->dev, size, 1947 queue->tx_ring, queue->tx_ring_dma); 1948 queue->tx_ring = NULL; 1949 } 1950 if (queue->rx_ring) { 1951 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 1952 dma_free_coherent(&bp->pdev->dev, size, 1953 queue->rx_ring, queue->rx_ring_dma); 1954 queue->rx_ring = NULL; 1955 } 1956 } 1957 } 1958 1959 static int gem_alloc_rx_buffers(struct macb *bp) 1960 { 1961 struct macb_queue *queue; 1962 unsigned int q; 1963 int size; 1964 1965 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1966 size = bp->rx_ring_size * sizeof(struct sk_buff *); 1967 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 1968 if (!queue->rx_skbuff) 1969 return -ENOMEM; 1970 else 1971 netdev_dbg(bp->dev, 1972 "Allocated %d RX struct sk_buff entries at %p\n", 1973 bp->rx_ring_size, queue->rx_skbuff); 1974 } 1975 return 0; 1976 } 1977 1978 static int macb_alloc_rx_buffers(struct macb *bp) 1979 { 1980 struct macb_queue *queue = &bp->queues[0]; 1981 int size; 1982 1983 size = bp->rx_ring_size * bp->rx_buffer_size; 1984 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 1985 &queue->rx_buffers_dma, GFP_KERNEL); 1986 if (!queue->rx_buffers) 1987 return -ENOMEM; 1988 1989 netdev_dbg(bp->dev, 1990 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1991 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 1992 return 0; 1993 } 1994 1995 static int macb_alloc_consistent(struct macb *bp) 1996 { 1997 struct macb_queue *queue; 1998 unsigned int q; 1999 int size; 2000 2001 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2002 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2003 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2004 &queue->tx_ring_dma, 2005 GFP_KERNEL); 2006 if (!queue->tx_ring) 2007 goto out_err; 2008 netdev_dbg(bp->dev, 2009 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 2010 q, size, (unsigned long)queue->tx_ring_dma, 2011 queue->tx_ring); 2012 2013 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 2014 queue->tx_skb = kmalloc(size, GFP_KERNEL); 2015 if (!queue->tx_skb) 2016 goto out_err; 2017 2018 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2019 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2020 &queue->rx_ring_dma, GFP_KERNEL); 2021 if (!queue->rx_ring) 2022 goto out_err; 2023 netdev_dbg(bp->dev, 2024 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 2025 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 2026 } 2027 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 2028 goto out_err; 2029 2030 return 0; 2031 2032 out_err: 2033 macb_free_consistent(bp); 2034 return -ENOMEM; 2035 } 2036 2037 static void gem_init_rings(struct macb *bp) 2038 { 2039 struct macb_queue *queue; 2040 struct macb_dma_desc *desc = NULL; 2041 unsigned int q; 2042 int i; 2043 2044 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2045 for (i = 0; i < bp->tx_ring_size; i++) { 2046 desc = macb_tx_desc(queue, i); 2047 macb_set_addr(bp, desc, 0); 2048 desc->ctrl = MACB_BIT(TX_USED); 2049 } 2050 desc->ctrl |= MACB_BIT(TX_WRAP); 2051 queue->tx_head = 0; 2052 queue->tx_tail = 0; 2053 2054 queue->rx_tail = 0; 2055 queue->rx_prepared_head = 0; 2056 2057 gem_rx_refill(queue); 2058 } 2059 2060 } 2061 2062 static void macb_init_rings(struct macb *bp) 2063 { 2064 int i; 2065 struct macb_dma_desc *desc = NULL; 2066 2067 macb_init_rx_ring(&bp->queues[0]); 2068 2069 for (i = 0; i < bp->tx_ring_size; i++) { 2070 desc = macb_tx_desc(&bp->queues[0], i); 2071 macb_set_addr(bp, desc, 0); 2072 desc->ctrl = MACB_BIT(TX_USED); 2073 } 2074 bp->queues[0].tx_head = 0; 2075 bp->queues[0].tx_tail = 0; 2076 desc->ctrl |= MACB_BIT(TX_WRAP); 2077 } 2078 2079 static void macb_reset_hw(struct macb *bp) 2080 { 2081 struct macb_queue *queue; 2082 unsigned int q; 2083 u32 ctrl = macb_readl(bp, NCR); 2084 2085 /* Disable RX and TX (XXX: Should we halt the transmission 2086 * more gracefully?) 2087 */ 2088 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2089 2090 /* Clear the stats registers (XXX: Update stats first?) */ 2091 ctrl |= MACB_BIT(CLRSTAT); 2092 2093 macb_writel(bp, NCR, ctrl); 2094 2095 /* Clear all status flags */ 2096 macb_writel(bp, TSR, -1); 2097 macb_writel(bp, RSR, -1); 2098 2099 /* Disable all interrupts */ 2100 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2101 queue_writel(queue, IDR, -1); 2102 queue_readl(queue, ISR); 2103 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2104 queue_writel(queue, ISR, -1); 2105 } 2106 } 2107 2108 static u32 gem_mdc_clk_div(struct macb *bp) 2109 { 2110 u32 config; 2111 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2112 2113 if (pclk_hz <= 20000000) 2114 config = GEM_BF(CLK, GEM_CLK_DIV8); 2115 else if (pclk_hz <= 40000000) 2116 config = GEM_BF(CLK, GEM_CLK_DIV16); 2117 else if (pclk_hz <= 80000000) 2118 config = GEM_BF(CLK, GEM_CLK_DIV32); 2119 else if (pclk_hz <= 120000000) 2120 config = GEM_BF(CLK, GEM_CLK_DIV48); 2121 else if (pclk_hz <= 160000000) 2122 config = GEM_BF(CLK, GEM_CLK_DIV64); 2123 else 2124 config = GEM_BF(CLK, GEM_CLK_DIV96); 2125 2126 return config; 2127 } 2128 2129 static u32 macb_mdc_clk_div(struct macb *bp) 2130 { 2131 u32 config; 2132 unsigned long pclk_hz; 2133 2134 if (macb_is_gem(bp)) 2135 return gem_mdc_clk_div(bp); 2136 2137 pclk_hz = clk_get_rate(bp->pclk); 2138 if (pclk_hz <= 20000000) 2139 config = MACB_BF(CLK, MACB_CLK_DIV8); 2140 else if (pclk_hz <= 40000000) 2141 config = MACB_BF(CLK, MACB_CLK_DIV16); 2142 else if (pclk_hz <= 80000000) 2143 config = MACB_BF(CLK, MACB_CLK_DIV32); 2144 else 2145 config = MACB_BF(CLK, MACB_CLK_DIV64); 2146 2147 return config; 2148 } 2149 2150 /* Get the DMA bus width field of the network configuration register that we 2151 * should program. We find the width from decoding the design configuration 2152 * register to find the maximum supported data bus width. 2153 */ 2154 static u32 macb_dbw(struct macb *bp) 2155 { 2156 if (!macb_is_gem(bp)) 2157 return 0; 2158 2159 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2160 case 4: 2161 return GEM_BF(DBW, GEM_DBW128); 2162 case 2: 2163 return GEM_BF(DBW, GEM_DBW64); 2164 case 1: 2165 default: 2166 return GEM_BF(DBW, GEM_DBW32); 2167 } 2168 } 2169 2170 /* Configure the receive DMA engine 2171 * - use the correct receive buffer size 2172 * - set best burst length for DMA operations 2173 * (if not supported by FIFO, it will fallback to default) 2174 * - set both rx/tx packet buffers to full memory size 2175 * These are configurable parameters for GEM. 2176 */ 2177 static void macb_configure_dma(struct macb *bp) 2178 { 2179 struct macb_queue *queue; 2180 u32 buffer_size; 2181 unsigned int q; 2182 u32 dmacfg; 2183 2184 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2185 if (macb_is_gem(bp)) { 2186 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2187 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2188 if (q) 2189 queue_writel(queue, RBQS, buffer_size); 2190 else 2191 dmacfg |= GEM_BF(RXBS, buffer_size); 2192 } 2193 if (bp->dma_burst_length) 2194 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2195 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2196 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2197 2198 if (bp->native_io) 2199 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2200 else 2201 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2202 2203 if (bp->dev->features & NETIF_F_HW_CSUM) 2204 dmacfg |= GEM_BIT(TXCOEN); 2205 else 2206 dmacfg &= ~GEM_BIT(TXCOEN); 2207 2208 dmacfg &= ~GEM_BIT(ADDR64); 2209 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2210 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2211 dmacfg |= GEM_BIT(ADDR64); 2212 #endif 2213 #ifdef CONFIG_MACB_USE_HWSTAMP 2214 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2215 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2216 #endif 2217 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2218 dmacfg); 2219 gem_writel(bp, DMACFG, dmacfg); 2220 } 2221 } 2222 2223 static void macb_init_hw(struct macb *bp) 2224 { 2225 struct macb_queue *queue; 2226 unsigned int q; 2227 2228 u32 config; 2229 2230 macb_reset_hw(bp); 2231 macb_set_hwaddr(bp); 2232 2233 config = macb_mdc_clk_div(bp); 2234 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 2235 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 2236 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2237 config |= MACB_BIT(PAE); /* PAuse Enable */ 2238 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2239 if (bp->caps & MACB_CAPS_JUMBO) 2240 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2241 else 2242 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2243 if (bp->dev->flags & IFF_PROMISC) 2244 config |= MACB_BIT(CAF); /* Copy All Frames */ 2245 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2246 config |= GEM_BIT(RXCOEN); 2247 if (!(bp->dev->flags & IFF_BROADCAST)) 2248 config |= MACB_BIT(NBC); /* No BroadCast */ 2249 config |= macb_dbw(bp); 2250 macb_writel(bp, NCFGR, config); 2251 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2252 gem_writel(bp, JML, bp->jumbo_max_len); 2253 bp->speed = SPEED_10; 2254 bp->duplex = DUPLEX_HALF; 2255 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2256 if (bp->caps & MACB_CAPS_JUMBO) 2257 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2258 2259 macb_configure_dma(bp); 2260 2261 /* Initialize TX and RX buffers */ 2262 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2263 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 2264 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2265 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2266 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); 2267 #endif 2268 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 2269 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2270 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2271 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 2272 #endif 2273 2274 /* Enable interrupts */ 2275 queue_writel(queue, IER, 2276 bp->rx_intr_mask | 2277 MACB_TX_INT_FLAGS | 2278 MACB_BIT(HRESP)); 2279 } 2280 2281 /* Enable TX and RX */ 2282 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 2283 } 2284 2285 /* The hash address register is 64 bits long and takes up two 2286 * locations in the memory map. The least significant bits are stored 2287 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2288 * 2289 * The unicast hash enable and the multicast hash enable bits in the 2290 * network configuration register enable the reception of hash matched 2291 * frames. The destination address is reduced to a 6 bit index into 2292 * the 64 bit hash register using the following hash function. The 2293 * hash function is an exclusive or of every sixth bit of the 2294 * destination address. 2295 * 2296 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2297 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2298 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2299 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2300 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2301 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2302 * 2303 * da[0] represents the least significant bit of the first byte 2304 * received, that is, the multicast/unicast indicator, and da[47] 2305 * represents the most significant bit of the last byte received. If 2306 * the hash index, hi[n], points to a bit that is set in the hash 2307 * register then the frame will be matched according to whether the 2308 * frame is multicast or unicast. A multicast match will be signalled 2309 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2310 * index points to a bit set in the hash register. A unicast match 2311 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2312 * and the hash index points to a bit set in the hash register. To 2313 * receive all multicast frames, the hash register should be set with 2314 * all ones and the multicast hash enable bit should be set in the 2315 * network configuration register. 2316 */ 2317 2318 static inline int hash_bit_value(int bitnr, __u8 *addr) 2319 { 2320 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2321 return 1; 2322 return 0; 2323 } 2324 2325 /* Return the hash index value for the specified address. */ 2326 static int hash_get_index(__u8 *addr) 2327 { 2328 int i, j, bitval; 2329 int hash_index = 0; 2330 2331 for (j = 0; j < 6; j++) { 2332 for (i = 0, bitval = 0; i < 8; i++) 2333 bitval ^= hash_bit_value(i * 6 + j, addr); 2334 2335 hash_index |= (bitval << j); 2336 } 2337 2338 return hash_index; 2339 } 2340 2341 /* Add multicast addresses to the internal multicast-hash table. */ 2342 static void macb_sethashtable(struct net_device *dev) 2343 { 2344 struct netdev_hw_addr *ha; 2345 unsigned long mc_filter[2]; 2346 unsigned int bitnr; 2347 struct macb *bp = netdev_priv(dev); 2348 2349 mc_filter[0] = 0; 2350 mc_filter[1] = 0; 2351 2352 netdev_for_each_mc_addr(ha, dev) { 2353 bitnr = hash_get_index(ha->addr); 2354 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2355 } 2356 2357 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2358 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2359 } 2360 2361 /* Enable/Disable promiscuous and multicast modes. */ 2362 static void macb_set_rx_mode(struct net_device *dev) 2363 { 2364 unsigned long cfg; 2365 struct macb *bp = netdev_priv(dev); 2366 2367 cfg = macb_readl(bp, NCFGR); 2368 2369 if (dev->flags & IFF_PROMISC) { 2370 /* Enable promiscuous mode */ 2371 cfg |= MACB_BIT(CAF); 2372 2373 /* Disable RX checksum offload */ 2374 if (macb_is_gem(bp)) 2375 cfg &= ~GEM_BIT(RXCOEN); 2376 } else { 2377 /* Disable promiscuous mode */ 2378 cfg &= ~MACB_BIT(CAF); 2379 2380 /* Enable RX checksum offload only if requested */ 2381 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2382 cfg |= GEM_BIT(RXCOEN); 2383 } 2384 2385 if (dev->flags & IFF_ALLMULTI) { 2386 /* Enable all multicast mode */ 2387 macb_or_gem_writel(bp, HRB, -1); 2388 macb_or_gem_writel(bp, HRT, -1); 2389 cfg |= MACB_BIT(NCFGR_MTI); 2390 } else if (!netdev_mc_empty(dev)) { 2391 /* Enable specific multicasts */ 2392 macb_sethashtable(dev); 2393 cfg |= MACB_BIT(NCFGR_MTI); 2394 } else if (dev->flags & (~IFF_ALLMULTI)) { 2395 /* Disable all multicast mode */ 2396 macb_or_gem_writel(bp, HRB, 0); 2397 macb_or_gem_writel(bp, HRT, 0); 2398 cfg &= ~MACB_BIT(NCFGR_MTI); 2399 } 2400 2401 macb_writel(bp, NCFGR, cfg); 2402 } 2403 2404 static int macb_open(struct net_device *dev) 2405 { 2406 struct macb *bp = netdev_priv(dev); 2407 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2408 struct macb_queue *queue; 2409 unsigned int q; 2410 int err; 2411 2412 netdev_dbg(bp->dev, "open\n"); 2413 2414 err = pm_runtime_get_sync(&bp->pdev->dev); 2415 if (err < 0) 2416 goto pm_exit; 2417 2418 /* carrier starts down */ 2419 netif_carrier_off(dev); 2420 2421 /* if the phy is not yet register, retry later*/ 2422 if (!dev->phydev) { 2423 err = -EAGAIN; 2424 goto pm_exit; 2425 } 2426 2427 /* RX buffers initialization */ 2428 macb_init_rx_buffer_size(bp, bufsz); 2429 2430 err = macb_alloc_consistent(bp); 2431 if (err) { 2432 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2433 err); 2434 goto pm_exit; 2435 } 2436 2437 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2438 napi_enable(&queue->napi); 2439 2440 bp->macbgem_ops.mog_init_rings(bp); 2441 macb_init_hw(bp); 2442 2443 /* schedule a link state check */ 2444 phy_start(dev->phydev); 2445 2446 netif_tx_start_all_queues(dev); 2447 2448 if (bp->ptp_info) 2449 bp->ptp_info->ptp_init(dev); 2450 2451 pm_exit: 2452 if (err) { 2453 pm_runtime_put_sync(&bp->pdev->dev); 2454 return err; 2455 } 2456 return 0; 2457 } 2458 2459 static int macb_close(struct net_device *dev) 2460 { 2461 struct macb *bp = netdev_priv(dev); 2462 struct macb_queue *queue; 2463 unsigned long flags; 2464 unsigned int q; 2465 2466 netif_tx_stop_all_queues(dev); 2467 2468 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2469 napi_disable(&queue->napi); 2470 2471 if (dev->phydev) 2472 phy_stop(dev->phydev); 2473 2474 spin_lock_irqsave(&bp->lock, flags); 2475 macb_reset_hw(bp); 2476 netif_carrier_off(dev); 2477 spin_unlock_irqrestore(&bp->lock, flags); 2478 2479 macb_free_consistent(bp); 2480 2481 if (bp->ptp_info) 2482 bp->ptp_info->ptp_remove(dev); 2483 2484 pm_runtime_put(&bp->pdev->dev); 2485 2486 return 0; 2487 } 2488 2489 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2490 { 2491 if (netif_running(dev)) 2492 return -EBUSY; 2493 2494 dev->mtu = new_mtu; 2495 2496 return 0; 2497 } 2498 2499 static void gem_update_stats(struct macb *bp) 2500 { 2501 struct macb_queue *queue; 2502 unsigned int i, q, idx; 2503 unsigned long *stat; 2504 2505 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2506 2507 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2508 u32 offset = gem_statistics[i].offset; 2509 u64 val = bp->macb_reg_readl(bp, offset); 2510 2511 bp->ethtool_stats[i] += val; 2512 *p += val; 2513 2514 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2515 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2516 val = bp->macb_reg_readl(bp, offset + 4); 2517 bp->ethtool_stats[i] += ((u64)val) << 32; 2518 *(++p) += val; 2519 } 2520 } 2521 2522 idx = GEM_STATS_LEN; 2523 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2524 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2525 bp->ethtool_stats[idx++] = *stat; 2526 } 2527 2528 static struct net_device_stats *gem_get_stats(struct macb *bp) 2529 { 2530 struct gem_stats *hwstat = &bp->hw_stats.gem; 2531 struct net_device_stats *nstat = &bp->dev->stats; 2532 2533 gem_update_stats(bp); 2534 2535 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2536 hwstat->rx_alignment_errors + 2537 hwstat->rx_resource_errors + 2538 hwstat->rx_overruns + 2539 hwstat->rx_oversize_frames + 2540 hwstat->rx_jabbers + 2541 hwstat->rx_undersized_frames + 2542 hwstat->rx_length_field_frame_errors); 2543 nstat->tx_errors = (hwstat->tx_late_collisions + 2544 hwstat->tx_excessive_collisions + 2545 hwstat->tx_underrun + 2546 hwstat->tx_carrier_sense_errors); 2547 nstat->multicast = hwstat->rx_multicast_frames; 2548 nstat->collisions = (hwstat->tx_single_collision_frames + 2549 hwstat->tx_multiple_collision_frames + 2550 hwstat->tx_excessive_collisions); 2551 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2552 hwstat->rx_jabbers + 2553 hwstat->rx_undersized_frames + 2554 hwstat->rx_length_field_frame_errors); 2555 nstat->rx_over_errors = hwstat->rx_resource_errors; 2556 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2557 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2558 nstat->rx_fifo_errors = hwstat->rx_overruns; 2559 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2560 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2561 nstat->tx_fifo_errors = hwstat->tx_underrun; 2562 2563 return nstat; 2564 } 2565 2566 static void gem_get_ethtool_stats(struct net_device *dev, 2567 struct ethtool_stats *stats, u64 *data) 2568 { 2569 struct macb *bp; 2570 2571 bp = netdev_priv(dev); 2572 gem_update_stats(bp); 2573 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2574 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2575 } 2576 2577 static int gem_get_sset_count(struct net_device *dev, int sset) 2578 { 2579 struct macb *bp = netdev_priv(dev); 2580 2581 switch (sset) { 2582 case ETH_SS_STATS: 2583 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2584 default: 2585 return -EOPNOTSUPP; 2586 } 2587 } 2588 2589 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2590 { 2591 char stat_string[ETH_GSTRING_LEN]; 2592 struct macb *bp = netdev_priv(dev); 2593 struct macb_queue *queue; 2594 unsigned int i; 2595 unsigned int q; 2596 2597 switch (sset) { 2598 case ETH_SS_STATS: 2599 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2600 memcpy(p, gem_statistics[i].stat_string, 2601 ETH_GSTRING_LEN); 2602 2603 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2604 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2605 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2606 q, queue_statistics[i].stat_string); 2607 memcpy(p, stat_string, ETH_GSTRING_LEN); 2608 } 2609 } 2610 break; 2611 } 2612 } 2613 2614 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2615 { 2616 struct macb *bp = netdev_priv(dev); 2617 struct net_device_stats *nstat = &bp->dev->stats; 2618 struct macb_stats *hwstat = &bp->hw_stats.macb; 2619 2620 if (macb_is_gem(bp)) 2621 return gem_get_stats(bp); 2622 2623 /* read stats from hardware */ 2624 macb_update_stats(bp); 2625 2626 /* Convert HW stats into netdevice stats */ 2627 nstat->rx_errors = (hwstat->rx_fcs_errors + 2628 hwstat->rx_align_errors + 2629 hwstat->rx_resource_errors + 2630 hwstat->rx_overruns + 2631 hwstat->rx_oversize_pkts + 2632 hwstat->rx_jabbers + 2633 hwstat->rx_undersize_pkts + 2634 hwstat->rx_length_mismatch); 2635 nstat->tx_errors = (hwstat->tx_late_cols + 2636 hwstat->tx_excessive_cols + 2637 hwstat->tx_underruns + 2638 hwstat->tx_carrier_errors + 2639 hwstat->sqe_test_errors); 2640 nstat->collisions = (hwstat->tx_single_cols + 2641 hwstat->tx_multiple_cols + 2642 hwstat->tx_excessive_cols); 2643 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2644 hwstat->rx_jabbers + 2645 hwstat->rx_undersize_pkts + 2646 hwstat->rx_length_mismatch); 2647 nstat->rx_over_errors = hwstat->rx_resource_errors + 2648 hwstat->rx_overruns; 2649 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2650 nstat->rx_frame_errors = hwstat->rx_align_errors; 2651 nstat->rx_fifo_errors = hwstat->rx_overruns; 2652 /* XXX: What does "missed" mean? */ 2653 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2654 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2655 nstat->tx_fifo_errors = hwstat->tx_underruns; 2656 /* Don't know about heartbeat or window errors... */ 2657 2658 return nstat; 2659 } 2660 2661 static int macb_get_regs_len(struct net_device *netdev) 2662 { 2663 return MACB_GREGS_NBR * sizeof(u32); 2664 } 2665 2666 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2667 void *p) 2668 { 2669 struct macb *bp = netdev_priv(dev); 2670 unsigned int tail, head; 2671 u32 *regs_buff = p; 2672 2673 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2674 | MACB_GREGS_VERSION; 2675 2676 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2677 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2678 2679 regs_buff[0] = macb_readl(bp, NCR); 2680 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2681 regs_buff[2] = macb_readl(bp, NSR); 2682 regs_buff[3] = macb_readl(bp, TSR); 2683 regs_buff[4] = macb_readl(bp, RBQP); 2684 regs_buff[5] = macb_readl(bp, TBQP); 2685 regs_buff[6] = macb_readl(bp, RSR); 2686 regs_buff[7] = macb_readl(bp, IMR); 2687 2688 regs_buff[8] = tail; 2689 regs_buff[9] = head; 2690 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2691 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2692 2693 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2694 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2695 if (macb_is_gem(bp)) 2696 regs_buff[13] = gem_readl(bp, DMACFG); 2697 } 2698 2699 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2700 { 2701 struct macb *bp = netdev_priv(netdev); 2702 2703 wol->supported = 0; 2704 wol->wolopts = 0; 2705 2706 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2707 wol->supported = WAKE_MAGIC; 2708 2709 if (bp->wol & MACB_WOL_ENABLED) 2710 wol->wolopts |= WAKE_MAGIC; 2711 } 2712 } 2713 2714 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2715 { 2716 struct macb *bp = netdev_priv(netdev); 2717 2718 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2719 (wol->wolopts & ~WAKE_MAGIC)) 2720 return -EOPNOTSUPP; 2721 2722 if (wol->wolopts & WAKE_MAGIC) 2723 bp->wol |= MACB_WOL_ENABLED; 2724 else 2725 bp->wol &= ~MACB_WOL_ENABLED; 2726 2727 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2728 2729 return 0; 2730 } 2731 2732 static void macb_get_ringparam(struct net_device *netdev, 2733 struct ethtool_ringparam *ring) 2734 { 2735 struct macb *bp = netdev_priv(netdev); 2736 2737 ring->rx_max_pending = MAX_RX_RING_SIZE; 2738 ring->tx_max_pending = MAX_TX_RING_SIZE; 2739 2740 ring->rx_pending = bp->rx_ring_size; 2741 ring->tx_pending = bp->tx_ring_size; 2742 } 2743 2744 static int macb_set_ringparam(struct net_device *netdev, 2745 struct ethtool_ringparam *ring) 2746 { 2747 struct macb *bp = netdev_priv(netdev); 2748 u32 new_rx_size, new_tx_size; 2749 unsigned int reset = 0; 2750 2751 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2752 return -EINVAL; 2753 2754 new_rx_size = clamp_t(u32, ring->rx_pending, 2755 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2756 new_rx_size = roundup_pow_of_two(new_rx_size); 2757 2758 new_tx_size = clamp_t(u32, ring->tx_pending, 2759 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2760 new_tx_size = roundup_pow_of_two(new_tx_size); 2761 2762 if ((new_tx_size == bp->tx_ring_size) && 2763 (new_rx_size == bp->rx_ring_size)) { 2764 /* nothing to do */ 2765 return 0; 2766 } 2767 2768 if (netif_running(bp->dev)) { 2769 reset = 1; 2770 macb_close(bp->dev); 2771 } 2772 2773 bp->rx_ring_size = new_rx_size; 2774 bp->tx_ring_size = new_tx_size; 2775 2776 if (reset) 2777 macb_open(bp->dev); 2778 2779 return 0; 2780 } 2781 2782 #ifdef CONFIG_MACB_USE_HWSTAMP 2783 static unsigned int gem_get_tsu_rate(struct macb *bp) 2784 { 2785 struct clk *tsu_clk; 2786 unsigned int tsu_rate; 2787 2788 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2789 if (!IS_ERR(tsu_clk)) 2790 tsu_rate = clk_get_rate(tsu_clk); 2791 /* try pclk instead */ 2792 else if (!IS_ERR(bp->pclk)) { 2793 tsu_clk = bp->pclk; 2794 tsu_rate = clk_get_rate(tsu_clk); 2795 } else 2796 return -ENOTSUPP; 2797 return tsu_rate; 2798 } 2799 2800 static s32 gem_get_ptp_max_adj(void) 2801 { 2802 return 64000000; 2803 } 2804 2805 static int gem_get_ts_info(struct net_device *dev, 2806 struct ethtool_ts_info *info) 2807 { 2808 struct macb *bp = netdev_priv(dev); 2809 2810 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2811 ethtool_op_get_ts_info(dev, info); 2812 return 0; 2813 } 2814 2815 info->so_timestamping = 2816 SOF_TIMESTAMPING_TX_SOFTWARE | 2817 SOF_TIMESTAMPING_RX_SOFTWARE | 2818 SOF_TIMESTAMPING_SOFTWARE | 2819 SOF_TIMESTAMPING_TX_HARDWARE | 2820 SOF_TIMESTAMPING_RX_HARDWARE | 2821 SOF_TIMESTAMPING_RAW_HARDWARE; 2822 info->tx_types = 2823 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2824 (1 << HWTSTAMP_TX_OFF) | 2825 (1 << HWTSTAMP_TX_ON); 2826 info->rx_filters = 2827 (1 << HWTSTAMP_FILTER_NONE) | 2828 (1 << HWTSTAMP_FILTER_ALL); 2829 2830 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2831 2832 return 0; 2833 } 2834 2835 static struct macb_ptp_info gem_ptp_info = { 2836 .ptp_init = gem_ptp_init, 2837 .ptp_remove = gem_ptp_remove, 2838 .get_ptp_max_adj = gem_get_ptp_max_adj, 2839 .get_tsu_rate = gem_get_tsu_rate, 2840 .get_ts_info = gem_get_ts_info, 2841 .get_hwtst = gem_get_hwtst, 2842 .set_hwtst = gem_set_hwtst, 2843 }; 2844 #endif 2845 2846 static int macb_get_ts_info(struct net_device *netdev, 2847 struct ethtool_ts_info *info) 2848 { 2849 struct macb *bp = netdev_priv(netdev); 2850 2851 if (bp->ptp_info) 2852 return bp->ptp_info->get_ts_info(netdev, info); 2853 2854 return ethtool_op_get_ts_info(netdev, info); 2855 } 2856 2857 static void gem_enable_flow_filters(struct macb *bp, bool enable) 2858 { 2859 struct net_device *netdev = bp->dev; 2860 struct ethtool_rx_fs_item *item; 2861 u32 t2_scr; 2862 int num_t2_scr; 2863 2864 if (!(netdev->features & NETIF_F_NTUPLE)) 2865 return; 2866 2867 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 2868 2869 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 2870 struct ethtool_rx_flow_spec *fs = &item->fs; 2871 struct ethtool_tcpip4_spec *tp4sp_m; 2872 2873 if (fs->location >= num_t2_scr) 2874 continue; 2875 2876 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 2877 2878 /* enable/disable screener regs for the flow entry */ 2879 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 2880 2881 /* only enable fields with no masking */ 2882 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2883 2884 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 2885 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 2886 else 2887 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 2888 2889 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 2890 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 2891 else 2892 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 2893 2894 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 2895 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 2896 else 2897 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 2898 2899 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 2900 } 2901 } 2902 2903 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 2904 { 2905 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 2906 uint16_t index = fs->location; 2907 u32 w0, w1, t2_scr; 2908 bool cmp_a = false; 2909 bool cmp_b = false; 2910 bool cmp_c = false; 2911 2912 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 2913 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 2914 2915 /* ignore field if any masking set */ 2916 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 2917 /* 1st compare reg - IP source address */ 2918 w0 = 0; 2919 w1 = 0; 2920 w0 = tp4sp_v->ip4src; 2921 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2922 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2923 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 2924 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 2925 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 2926 cmp_a = true; 2927 } 2928 2929 /* ignore field if any masking set */ 2930 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 2931 /* 2nd compare reg - IP destination address */ 2932 w0 = 0; 2933 w1 = 0; 2934 w0 = tp4sp_v->ip4dst; 2935 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2936 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 2937 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 2938 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 2939 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 2940 cmp_b = true; 2941 } 2942 2943 /* ignore both port fields if masking set in both */ 2944 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 2945 /* 3rd compare reg - source port, destination port */ 2946 w0 = 0; 2947 w1 = 0; 2948 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 2949 if (tp4sp_m->psrc == tp4sp_m->pdst) { 2950 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 2951 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2952 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 2953 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2954 } else { 2955 /* only one port definition */ 2956 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 2957 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 2958 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 2959 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 2960 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 2961 } else { /* dst port */ 2962 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 2963 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 2964 } 2965 } 2966 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 2967 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 2968 cmp_c = true; 2969 } 2970 2971 t2_scr = 0; 2972 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 2973 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 2974 if (cmp_a) 2975 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 2976 if (cmp_b) 2977 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 2978 if (cmp_c) 2979 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 2980 gem_writel_n(bp, SCRT2, index, t2_scr); 2981 } 2982 2983 static int gem_add_flow_filter(struct net_device *netdev, 2984 struct ethtool_rxnfc *cmd) 2985 { 2986 struct macb *bp = netdev_priv(netdev); 2987 struct ethtool_rx_flow_spec *fs = &cmd->fs; 2988 struct ethtool_rx_fs_item *item, *newfs; 2989 unsigned long flags; 2990 int ret = -EINVAL; 2991 bool added = false; 2992 2993 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 2994 if (newfs == NULL) 2995 return -ENOMEM; 2996 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 2997 2998 netdev_dbg(netdev, 2999 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3000 fs->flow_type, (int)fs->ring_cookie, fs->location, 3001 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3002 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3003 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 3004 3005 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3006 3007 /* find correct place to add in list */ 3008 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3009 if (item->fs.location > newfs->fs.location) { 3010 list_add_tail(&newfs->list, &item->list); 3011 added = true; 3012 break; 3013 } else if (item->fs.location == fs->location) { 3014 netdev_err(netdev, "Rule not added: location %d not free!\n", 3015 fs->location); 3016 ret = -EBUSY; 3017 goto err; 3018 } 3019 } 3020 if (!added) 3021 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 3022 3023 gem_prog_cmp_regs(bp, fs); 3024 bp->rx_fs_list.count++; 3025 /* enable filtering if NTUPLE on */ 3026 gem_enable_flow_filters(bp, 1); 3027 3028 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3029 return 0; 3030 3031 err: 3032 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3033 kfree(newfs); 3034 return ret; 3035 } 3036 3037 static int gem_del_flow_filter(struct net_device *netdev, 3038 struct ethtool_rxnfc *cmd) 3039 { 3040 struct macb *bp = netdev_priv(netdev); 3041 struct ethtool_rx_fs_item *item; 3042 struct ethtool_rx_flow_spec *fs; 3043 unsigned long flags; 3044 3045 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3046 3047 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3048 if (item->fs.location == cmd->fs.location) { 3049 /* disable screener regs for the flow entry */ 3050 fs = &(item->fs); 3051 netdev_dbg(netdev, 3052 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3053 fs->flow_type, (int)fs->ring_cookie, fs->location, 3054 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3055 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3056 htons(fs->h_u.tcp_ip4_spec.psrc), 3057 htons(fs->h_u.tcp_ip4_spec.pdst)); 3058 3059 gem_writel_n(bp, SCRT2, fs->location, 0); 3060 3061 list_del(&item->list); 3062 bp->rx_fs_list.count--; 3063 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3064 kfree(item); 3065 return 0; 3066 } 3067 } 3068 3069 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3070 return -EINVAL; 3071 } 3072 3073 static int gem_get_flow_entry(struct net_device *netdev, 3074 struct ethtool_rxnfc *cmd) 3075 { 3076 struct macb *bp = netdev_priv(netdev); 3077 struct ethtool_rx_fs_item *item; 3078 3079 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3080 if (item->fs.location == cmd->fs.location) { 3081 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3082 return 0; 3083 } 3084 } 3085 return -EINVAL; 3086 } 3087 3088 static int gem_get_all_flow_entries(struct net_device *netdev, 3089 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3090 { 3091 struct macb *bp = netdev_priv(netdev); 3092 struct ethtool_rx_fs_item *item; 3093 uint32_t cnt = 0; 3094 3095 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3096 if (cnt == cmd->rule_cnt) 3097 return -EMSGSIZE; 3098 rule_locs[cnt] = item->fs.location; 3099 cnt++; 3100 } 3101 cmd->data = bp->max_tuples; 3102 cmd->rule_cnt = cnt; 3103 3104 return 0; 3105 } 3106 3107 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3108 u32 *rule_locs) 3109 { 3110 struct macb *bp = netdev_priv(netdev); 3111 int ret = 0; 3112 3113 switch (cmd->cmd) { 3114 case ETHTOOL_GRXRINGS: 3115 cmd->data = bp->num_queues; 3116 break; 3117 case ETHTOOL_GRXCLSRLCNT: 3118 cmd->rule_cnt = bp->rx_fs_list.count; 3119 break; 3120 case ETHTOOL_GRXCLSRULE: 3121 ret = gem_get_flow_entry(netdev, cmd); 3122 break; 3123 case ETHTOOL_GRXCLSRLALL: 3124 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3125 break; 3126 default: 3127 netdev_err(netdev, 3128 "Command parameter %d is not supported\n", cmd->cmd); 3129 ret = -EOPNOTSUPP; 3130 } 3131 3132 return ret; 3133 } 3134 3135 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3136 { 3137 struct macb *bp = netdev_priv(netdev); 3138 int ret; 3139 3140 switch (cmd->cmd) { 3141 case ETHTOOL_SRXCLSRLINS: 3142 if ((cmd->fs.location >= bp->max_tuples) 3143 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3144 ret = -EINVAL; 3145 break; 3146 } 3147 ret = gem_add_flow_filter(netdev, cmd); 3148 break; 3149 case ETHTOOL_SRXCLSRLDEL: 3150 ret = gem_del_flow_filter(netdev, cmd); 3151 break; 3152 default: 3153 netdev_err(netdev, 3154 "Command parameter %d is not supported\n", cmd->cmd); 3155 ret = -EOPNOTSUPP; 3156 } 3157 3158 return ret; 3159 } 3160 3161 static const struct ethtool_ops macb_ethtool_ops = { 3162 .get_regs_len = macb_get_regs_len, 3163 .get_regs = macb_get_regs, 3164 .get_link = ethtool_op_get_link, 3165 .get_ts_info = ethtool_op_get_ts_info, 3166 .get_wol = macb_get_wol, 3167 .set_wol = macb_set_wol, 3168 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3169 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3170 .get_ringparam = macb_get_ringparam, 3171 .set_ringparam = macb_set_ringparam, 3172 }; 3173 3174 static const struct ethtool_ops gem_ethtool_ops = { 3175 .get_regs_len = macb_get_regs_len, 3176 .get_regs = macb_get_regs, 3177 .get_link = ethtool_op_get_link, 3178 .get_ts_info = macb_get_ts_info, 3179 .get_ethtool_stats = gem_get_ethtool_stats, 3180 .get_strings = gem_get_ethtool_strings, 3181 .get_sset_count = gem_get_sset_count, 3182 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3183 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3184 .get_ringparam = macb_get_ringparam, 3185 .set_ringparam = macb_set_ringparam, 3186 .get_rxnfc = gem_get_rxnfc, 3187 .set_rxnfc = gem_set_rxnfc, 3188 }; 3189 3190 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3191 { 3192 struct phy_device *phydev = dev->phydev; 3193 struct macb *bp = netdev_priv(dev); 3194 3195 if (!netif_running(dev)) 3196 return -EINVAL; 3197 3198 if (!phydev) 3199 return -ENODEV; 3200 3201 if (!bp->ptp_info) 3202 return phy_mii_ioctl(phydev, rq, cmd); 3203 3204 switch (cmd) { 3205 case SIOCSHWTSTAMP: 3206 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3207 case SIOCGHWTSTAMP: 3208 return bp->ptp_info->get_hwtst(dev, rq); 3209 default: 3210 return phy_mii_ioctl(phydev, rq, cmd); 3211 } 3212 } 3213 3214 static inline void macb_set_txcsum_feature(struct macb *bp, 3215 netdev_features_t features) 3216 { 3217 u32 val; 3218 3219 if (!macb_is_gem(bp)) 3220 return; 3221 3222 val = gem_readl(bp, DMACFG); 3223 if (features & NETIF_F_HW_CSUM) 3224 val |= GEM_BIT(TXCOEN); 3225 else 3226 val &= ~GEM_BIT(TXCOEN); 3227 3228 gem_writel(bp, DMACFG, val); 3229 } 3230 3231 static inline void macb_set_rxcsum_feature(struct macb *bp, 3232 netdev_features_t features) 3233 { 3234 struct net_device *netdev = bp->dev; 3235 u32 val; 3236 3237 if (!macb_is_gem(bp)) 3238 return; 3239 3240 val = gem_readl(bp, NCFGR); 3241 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) 3242 val |= GEM_BIT(RXCOEN); 3243 else 3244 val &= ~GEM_BIT(RXCOEN); 3245 3246 gem_writel(bp, NCFGR, val); 3247 } 3248 3249 static inline void macb_set_rxflow_feature(struct macb *bp, 3250 netdev_features_t features) 3251 { 3252 if (!macb_is_gem(bp)) 3253 return; 3254 3255 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); 3256 } 3257 3258 static int macb_set_features(struct net_device *netdev, 3259 netdev_features_t features) 3260 { 3261 struct macb *bp = netdev_priv(netdev); 3262 netdev_features_t changed = features ^ netdev->features; 3263 3264 /* TX checksum offload */ 3265 if (changed & NETIF_F_HW_CSUM) 3266 macb_set_txcsum_feature(bp, features); 3267 3268 /* RX checksum offload */ 3269 if (changed & NETIF_F_RXCSUM) 3270 macb_set_rxcsum_feature(bp, features); 3271 3272 /* RX Flow Filters */ 3273 if (changed & NETIF_F_NTUPLE) 3274 macb_set_rxflow_feature(bp, features); 3275 3276 return 0; 3277 } 3278 3279 static void macb_restore_features(struct macb *bp) 3280 { 3281 struct net_device *netdev = bp->dev; 3282 netdev_features_t features = netdev->features; 3283 3284 /* TX checksum offload */ 3285 macb_set_txcsum_feature(bp, features); 3286 3287 /* RX checksum offload */ 3288 macb_set_rxcsum_feature(bp, features); 3289 3290 /* RX Flow Filters */ 3291 macb_set_rxflow_feature(bp, features); 3292 } 3293 3294 static const struct net_device_ops macb_netdev_ops = { 3295 .ndo_open = macb_open, 3296 .ndo_stop = macb_close, 3297 .ndo_start_xmit = macb_start_xmit, 3298 .ndo_set_rx_mode = macb_set_rx_mode, 3299 .ndo_get_stats = macb_get_stats, 3300 .ndo_do_ioctl = macb_ioctl, 3301 .ndo_validate_addr = eth_validate_addr, 3302 .ndo_change_mtu = macb_change_mtu, 3303 .ndo_set_mac_address = eth_mac_addr, 3304 #ifdef CONFIG_NET_POLL_CONTROLLER 3305 .ndo_poll_controller = macb_poll_controller, 3306 #endif 3307 .ndo_set_features = macb_set_features, 3308 .ndo_features_check = macb_features_check, 3309 }; 3310 3311 /* Configure peripheral capabilities according to device tree 3312 * and integration options used 3313 */ 3314 static void macb_configure_caps(struct macb *bp, 3315 const struct macb_config *dt_conf) 3316 { 3317 u32 dcfg; 3318 3319 if (dt_conf) 3320 bp->caps = dt_conf->caps; 3321 3322 if (hw_is_gem(bp->regs, bp->native_io)) { 3323 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3324 3325 dcfg = gem_readl(bp, DCFG1); 3326 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3327 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3328 dcfg = gem_readl(bp, DCFG2); 3329 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3330 bp->caps |= MACB_CAPS_FIFO_MODE; 3331 #ifdef CONFIG_MACB_USE_HWSTAMP 3332 if (gem_has_ptp(bp)) { 3333 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3334 pr_err("GEM doesn't support hardware ptp.\n"); 3335 else { 3336 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3337 bp->ptp_info = &gem_ptp_info; 3338 } 3339 } 3340 #endif 3341 } 3342 3343 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3344 } 3345 3346 static void macb_probe_queues(void __iomem *mem, 3347 bool native_io, 3348 unsigned int *queue_mask, 3349 unsigned int *num_queues) 3350 { 3351 unsigned int hw_q; 3352 3353 *queue_mask = 0x1; 3354 *num_queues = 1; 3355 3356 /* is it macb or gem ? 3357 * 3358 * We need to read directly from the hardware here because 3359 * we are early in the probe process and don't have the 3360 * MACB_CAPS_MACB_IS_GEM flag positioned 3361 */ 3362 if (!hw_is_gem(mem, native_io)) 3363 return; 3364 3365 /* bit 0 is never set but queue 0 always exists */ 3366 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; 3367 3368 *queue_mask |= 0x1; 3369 3370 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 3371 if (*queue_mask & (1 << hw_q)) 3372 (*num_queues)++; 3373 } 3374 3375 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3376 struct clk **hclk, struct clk **tx_clk, 3377 struct clk **rx_clk, struct clk **tsu_clk) 3378 { 3379 struct macb_platform_data *pdata; 3380 int err; 3381 3382 pdata = dev_get_platdata(&pdev->dev); 3383 if (pdata) { 3384 *pclk = pdata->pclk; 3385 *hclk = pdata->hclk; 3386 } else { 3387 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3388 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3389 } 3390 3391 if (IS_ERR_OR_NULL(*pclk)) { 3392 err = PTR_ERR(*pclk); 3393 if (!err) 3394 err = -ENODEV; 3395 3396 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3397 return err; 3398 } 3399 3400 if (IS_ERR_OR_NULL(*hclk)) { 3401 err = PTR_ERR(*hclk); 3402 if (!err) 3403 err = -ENODEV; 3404 3405 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3406 return err; 3407 } 3408 3409 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 3410 if (IS_ERR(*tx_clk)) 3411 *tx_clk = NULL; 3412 3413 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); 3414 if (IS_ERR(*rx_clk)) 3415 *rx_clk = NULL; 3416 3417 *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk"); 3418 if (IS_ERR(*tsu_clk)) 3419 *tsu_clk = NULL; 3420 3421 err = clk_prepare_enable(*pclk); 3422 if (err) { 3423 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3424 return err; 3425 } 3426 3427 err = clk_prepare_enable(*hclk); 3428 if (err) { 3429 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3430 goto err_disable_pclk; 3431 } 3432 3433 err = clk_prepare_enable(*tx_clk); 3434 if (err) { 3435 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3436 goto err_disable_hclk; 3437 } 3438 3439 err = clk_prepare_enable(*rx_clk); 3440 if (err) { 3441 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3442 goto err_disable_txclk; 3443 } 3444 3445 err = clk_prepare_enable(*tsu_clk); 3446 if (err) { 3447 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3448 goto err_disable_rxclk; 3449 } 3450 3451 return 0; 3452 3453 err_disable_rxclk: 3454 clk_disable_unprepare(*rx_clk); 3455 3456 err_disable_txclk: 3457 clk_disable_unprepare(*tx_clk); 3458 3459 err_disable_hclk: 3460 clk_disable_unprepare(*hclk); 3461 3462 err_disable_pclk: 3463 clk_disable_unprepare(*pclk); 3464 3465 return err; 3466 } 3467 3468 static int macb_init(struct platform_device *pdev) 3469 { 3470 struct net_device *dev = platform_get_drvdata(pdev); 3471 unsigned int hw_q, q; 3472 struct macb *bp = netdev_priv(dev); 3473 struct macb_queue *queue; 3474 int err; 3475 u32 val, reg; 3476 3477 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3478 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3479 3480 /* set the queue register mapping once for all: queue0 has a special 3481 * register mapping but we don't want to test the queue index then 3482 * compute the corresponding register offset at run time. 3483 */ 3484 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3485 if (!(bp->queue_mask & (1 << hw_q))) 3486 continue; 3487 3488 queue = &bp->queues[q]; 3489 queue->bp = bp; 3490 netif_napi_add(dev, &queue->napi, macb_poll, 64); 3491 if (hw_q) { 3492 queue->ISR = GEM_ISR(hw_q - 1); 3493 queue->IER = GEM_IER(hw_q - 1); 3494 queue->IDR = GEM_IDR(hw_q - 1); 3495 queue->IMR = GEM_IMR(hw_q - 1); 3496 queue->TBQP = GEM_TBQP(hw_q - 1); 3497 queue->RBQP = GEM_RBQP(hw_q - 1); 3498 queue->RBQS = GEM_RBQS(hw_q - 1); 3499 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3500 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3501 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3502 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3503 } 3504 #endif 3505 } else { 3506 /* queue0 uses legacy registers */ 3507 queue->ISR = MACB_ISR; 3508 queue->IER = MACB_IER; 3509 queue->IDR = MACB_IDR; 3510 queue->IMR = MACB_IMR; 3511 queue->TBQP = MACB_TBQP; 3512 queue->RBQP = MACB_RBQP; 3513 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3514 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3515 queue->TBQPH = MACB_TBQPH; 3516 queue->RBQPH = MACB_RBQPH; 3517 } 3518 #endif 3519 } 3520 3521 /* get irq: here we use the linux queue index, not the hardware 3522 * queue index. the queue irq definitions in the device tree 3523 * must remove the optional gaps that could exist in the 3524 * hardware queue mask. 3525 */ 3526 queue->irq = platform_get_irq(pdev, q); 3527 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3528 IRQF_SHARED, dev->name, queue); 3529 if (err) { 3530 dev_err(&pdev->dev, 3531 "Unable to request IRQ %d (error %d)\n", 3532 queue->irq, err); 3533 return err; 3534 } 3535 3536 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3537 q++; 3538 } 3539 3540 dev->netdev_ops = &macb_netdev_ops; 3541 3542 /* setup appropriated routines according to adapter type */ 3543 if (macb_is_gem(bp)) { 3544 bp->max_tx_length = GEM_MAX_TX_LEN; 3545 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3546 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3547 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3548 bp->macbgem_ops.mog_rx = gem_rx; 3549 dev->ethtool_ops = &gem_ethtool_ops; 3550 } else { 3551 bp->max_tx_length = MACB_MAX_TX_LEN; 3552 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3553 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3554 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3555 bp->macbgem_ops.mog_rx = macb_rx; 3556 dev->ethtool_ops = &macb_ethtool_ops; 3557 } 3558 3559 /* Set features */ 3560 dev->hw_features = NETIF_F_SG; 3561 3562 /* Check LSO capability */ 3563 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3564 dev->hw_features |= MACB_NETIF_LSO; 3565 3566 /* Checksum offload is only available on gem with packet buffer */ 3567 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3568 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3569 if (bp->caps & MACB_CAPS_SG_DISABLED) 3570 dev->hw_features &= ~NETIF_F_SG; 3571 dev->features = dev->hw_features; 3572 3573 /* Check RX Flow Filters support. 3574 * Max Rx flows set by availability of screeners & compare regs: 3575 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3576 */ 3577 reg = gem_readl(bp, DCFG8); 3578 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3579 GEM_BFEXT(T2SCR, reg)); 3580 if (bp->max_tuples > 0) { 3581 /* also needs one ethtype match to check IPv4 */ 3582 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3583 /* program this reg now */ 3584 reg = 0; 3585 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3586 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3587 /* Filtering is supported in hw but don't enable it in kernel now */ 3588 dev->hw_features |= NETIF_F_NTUPLE; 3589 /* init Rx flow definitions */ 3590 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3591 bp->rx_fs_list.count = 0; 3592 spin_lock_init(&bp->rx_fs_lock); 3593 } else 3594 bp->max_tuples = 0; 3595 } 3596 3597 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3598 val = 0; 3599 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3600 val = GEM_BIT(RGMII); 3601 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3602 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3603 val = MACB_BIT(RMII); 3604 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3605 val = MACB_BIT(MII); 3606 3607 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3608 val |= MACB_BIT(CLKEN); 3609 3610 macb_or_gem_writel(bp, USRIO, val); 3611 } 3612 3613 /* Set MII management clock divider */ 3614 val = macb_mdc_clk_div(bp); 3615 val |= macb_dbw(bp); 3616 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3617 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3618 macb_writel(bp, NCFGR, val); 3619 3620 return 0; 3621 } 3622 3623 #if defined(CONFIG_OF) 3624 /* 1518 rounded up */ 3625 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3626 /* max number of receive buffers */ 3627 #define AT91ETHER_MAX_RX_DESCR 9 3628 3629 /* Initialize and start the Receiver and Transmit subsystems */ 3630 static int at91ether_start(struct net_device *dev) 3631 { 3632 struct macb *lp = netdev_priv(dev); 3633 struct macb_queue *q = &lp->queues[0]; 3634 struct macb_dma_desc *desc; 3635 dma_addr_t addr; 3636 u32 ctl; 3637 int i; 3638 3639 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3640 (AT91ETHER_MAX_RX_DESCR * 3641 macb_dma_desc_get_size(lp)), 3642 &q->rx_ring_dma, GFP_KERNEL); 3643 if (!q->rx_ring) 3644 return -ENOMEM; 3645 3646 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3647 AT91ETHER_MAX_RX_DESCR * 3648 AT91ETHER_MAX_RBUFF_SZ, 3649 &q->rx_buffers_dma, GFP_KERNEL); 3650 if (!q->rx_buffers) { 3651 dma_free_coherent(&lp->pdev->dev, 3652 AT91ETHER_MAX_RX_DESCR * 3653 macb_dma_desc_get_size(lp), 3654 q->rx_ring, q->rx_ring_dma); 3655 q->rx_ring = NULL; 3656 return -ENOMEM; 3657 } 3658 3659 addr = q->rx_buffers_dma; 3660 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3661 desc = macb_rx_desc(q, i); 3662 macb_set_addr(lp, desc, addr); 3663 desc->ctrl = 0; 3664 addr += AT91ETHER_MAX_RBUFF_SZ; 3665 } 3666 3667 /* Set the Wrap bit on the last descriptor */ 3668 desc->addr |= MACB_BIT(RX_WRAP); 3669 3670 /* Reset buffer index */ 3671 q->rx_tail = 0; 3672 3673 /* Program address of descriptor list in Rx Buffer Queue register */ 3674 macb_writel(lp, RBQP, q->rx_ring_dma); 3675 3676 /* Enable Receive and Transmit */ 3677 ctl = macb_readl(lp, NCR); 3678 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3679 3680 return 0; 3681 } 3682 3683 /* Open the ethernet interface */ 3684 static int at91ether_open(struct net_device *dev) 3685 { 3686 struct macb *lp = netdev_priv(dev); 3687 u32 ctl; 3688 int ret; 3689 3690 /* Clear internal statistics */ 3691 ctl = macb_readl(lp, NCR); 3692 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3693 3694 macb_set_hwaddr(lp); 3695 3696 ret = at91ether_start(dev); 3697 if (ret) 3698 return ret; 3699 3700 /* Enable MAC interrupts */ 3701 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3702 MACB_BIT(RXUBR) | 3703 MACB_BIT(ISR_TUND) | 3704 MACB_BIT(ISR_RLE) | 3705 MACB_BIT(TCOMP) | 3706 MACB_BIT(ISR_ROVR) | 3707 MACB_BIT(HRESP)); 3708 3709 /* schedule a link state check */ 3710 phy_start(dev->phydev); 3711 3712 netif_start_queue(dev); 3713 3714 return 0; 3715 } 3716 3717 /* Close the interface */ 3718 static int at91ether_close(struct net_device *dev) 3719 { 3720 struct macb *lp = netdev_priv(dev); 3721 struct macb_queue *q = &lp->queues[0]; 3722 u32 ctl; 3723 3724 /* Disable Receiver and Transmitter */ 3725 ctl = macb_readl(lp, NCR); 3726 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3727 3728 /* Disable MAC interrupts */ 3729 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3730 MACB_BIT(RXUBR) | 3731 MACB_BIT(ISR_TUND) | 3732 MACB_BIT(ISR_RLE) | 3733 MACB_BIT(TCOMP) | 3734 MACB_BIT(ISR_ROVR) | 3735 MACB_BIT(HRESP)); 3736 3737 netif_stop_queue(dev); 3738 3739 dma_free_coherent(&lp->pdev->dev, 3740 AT91ETHER_MAX_RX_DESCR * 3741 macb_dma_desc_get_size(lp), 3742 q->rx_ring, q->rx_ring_dma); 3743 q->rx_ring = NULL; 3744 3745 dma_free_coherent(&lp->pdev->dev, 3746 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, 3747 q->rx_buffers, q->rx_buffers_dma); 3748 q->rx_buffers = NULL; 3749 3750 return 0; 3751 } 3752 3753 /* Transmit packet */ 3754 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 3755 struct net_device *dev) 3756 { 3757 struct macb *lp = netdev_priv(dev); 3758 3759 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3760 netif_stop_queue(dev); 3761 3762 /* Store packet information (to free when Tx completed) */ 3763 lp->skb = skb; 3764 lp->skb_length = skb->len; 3765 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, 3766 skb->len, DMA_TO_DEVICE); 3767 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { 3768 dev_kfree_skb_any(skb); 3769 dev->stats.tx_dropped++; 3770 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3771 return NETDEV_TX_OK; 3772 } 3773 3774 /* Set address of the data in the Transmit Address register */ 3775 macb_writel(lp, TAR, lp->skb_physaddr); 3776 /* Set length of the packet in the Transmit Control register */ 3777 macb_writel(lp, TCR, skb->len); 3778 3779 } else { 3780 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3781 return NETDEV_TX_BUSY; 3782 } 3783 3784 return NETDEV_TX_OK; 3785 } 3786 3787 /* Extract received frame from buffer descriptors and sent to upper layers. 3788 * (Called from interrupt context) 3789 */ 3790 static void at91ether_rx(struct net_device *dev) 3791 { 3792 struct macb *lp = netdev_priv(dev); 3793 struct macb_queue *q = &lp->queues[0]; 3794 struct macb_dma_desc *desc; 3795 unsigned char *p_recv; 3796 struct sk_buff *skb; 3797 unsigned int pktlen; 3798 3799 desc = macb_rx_desc(q, q->rx_tail); 3800 while (desc->addr & MACB_BIT(RX_USED)) { 3801 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3802 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3803 skb = netdev_alloc_skb(dev, pktlen + 2); 3804 if (skb) { 3805 skb_reserve(skb, 2); 3806 skb_put_data(skb, p_recv, pktlen); 3807 3808 skb->protocol = eth_type_trans(skb, dev); 3809 dev->stats.rx_packets++; 3810 dev->stats.rx_bytes += pktlen; 3811 netif_rx(skb); 3812 } else { 3813 dev->stats.rx_dropped++; 3814 } 3815 3816 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3817 dev->stats.multicast++; 3818 3819 /* reset ownership bit */ 3820 desc->addr &= ~MACB_BIT(RX_USED); 3821 3822 /* wrap after last buffer */ 3823 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 3824 q->rx_tail = 0; 3825 else 3826 q->rx_tail++; 3827 3828 desc = macb_rx_desc(q, q->rx_tail); 3829 } 3830 } 3831 3832 /* MAC interrupt handler */ 3833 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 3834 { 3835 struct net_device *dev = dev_id; 3836 struct macb *lp = netdev_priv(dev); 3837 u32 intstatus, ctl; 3838 3839 /* MAC Interrupt Status register indicates what interrupts are pending. 3840 * It is automatically cleared once read. 3841 */ 3842 intstatus = macb_readl(lp, ISR); 3843 3844 /* Receive complete */ 3845 if (intstatus & MACB_BIT(RCOMP)) 3846 at91ether_rx(dev); 3847 3848 /* Transmit complete */ 3849 if (intstatus & MACB_BIT(TCOMP)) { 3850 /* The TCOM bit is set even if the transmission failed */ 3851 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 3852 dev->stats.tx_errors++; 3853 3854 if (lp->skb) { 3855 dev_consume_skb_irq(lp->skb); 3856 lp->skb = NULL; 3857 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, 3858 lp->skb_length, DMA_TO_DEVICE); 3859 dev->stats.tx_packets++; 3860 dev->stats.tx_bytes += lp->skb_length; 3861 } 3862 netif_wake_queue(dev); 3863 } 3864 3865 /* Work-around for EMAC Errata section 41.3.1 */ 3866 if (intstatus & MACB_BIT(RXUBR)) { 3867 ctl = macb_readl(lp, NCR); 3868 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 3869 wmb(); 3870 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 3871 } 3872 3873 if (intstatus & MACB_BIT(ISR_ROVR)) 3874 netdev_err(dev, "ROVR error\n"); 3875 3876 return IRQ_HANDLED; 3877 } 3878 3879 #ifdef CONFIG_NET_POLL_CONTROLLER 3880 static void at91ether_poll_controller(struct net_device *dev) 3881 { 3882 unsigned long flags; 3883 3884 local_irq_save(flags); 3885 at91ether_interrupt(dev->irq, dev); 3886 local_irq_restore(flags); 3887 } 3888 #endif 3889 3890 static const struct net_device_ops at91ether_netdev_ops = { 3891 .ndo_open = at91ether_open, 3892 .ndo_stop = at91ether_close, 3893 .ndo_start_xmit = at91ether_start_xmit, 3894 .ndo_get_stats = macb_get_stats, 3895 .ndo_set_rx_mode = macb_set_rx_mode, 3896 .ndo_set_mac_address = eth_mac_addr, 3897 .ndo_do_ioctl = macb_ioctl, 3898 .ndo_validate_addr = eth_validate_addr, 3899 #ifdef CONFIG_NET_POLL_CONTROLLER 3900 .ndo_poll_controller = at91ether_poll_controller, 3901 #endif 3902 }; 3903 3904 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 3905 struct clk **hclk, struct clk **tx_clk, 3906 struct clk **rx_clk, struct clk **tsu_clk) 3907 { 3908 int err; 3909 3910 *hclk = NULL; 3911 *tx_clk = NULL; 3912 *rx_clk = NULL; 3913 *tsu_clk = NULL; 3914 3915 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 3916 if (IS_ERR(*pclk)) 3917 return PTR_ERR(*pclk); 3918 3919 err = clk_prepare_enable(*pclk); 3920 if (err) { 3921 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3922 return err; 3923 } 3924 3925 return 0; 3926 } 3927 3928 static int at91ether_init(struct platform_device *pdev) 3929 { 3930 struct net_device *dev = platform_get_drvdata(pdev); 3931 struct macb *bp = netdev_priv(dev); 3932 int err; 3933 u32 reg; 3934 3935 bp->queues[0].bp = bp; 3936 3937 dev->netdev_ops = &at91ether_netdev_ops; 3938 dev->ethtool_ops = &macb_ethtool_ops; 3939 3940 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 3941 0, dev->name, dev); 3942 if (err) 3943 return err; 3944 3945 macb_writel(bp, NCR, 0); 3946 3947 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); 3948 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 3949 reg |= MACB_BIT(RM9200_RMII); 3950 3951 macb_writel(bp, NCFGR, reg); 3952 3953 return 0; 3954 } 3955 3956 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, 3957 unsigned long parent_rate) 3958 { 3959 return mgmt->rate; 3960 } 3961 3962 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, 3963 unsigned long *parent_rate) 3964 { 3965 if (WARN_ON(rate < 2500000)) 3966 return 2500000; 3967 else if (rate == 2500000) 3968 return 2500000; 3969 else if (WARN_ON(rate < 13750000)) 3970 return 2500000; 3971 else if (WARN_ON(rate < 25000000)) 3972 return 25000000; 3973 else if (rate == 25000000) 3974 return 25000000; 3975 else if (WARN_ON(rate < 75000000)) 3976 return 25000000; 3977 else if (WARN_ON(rate < 125000000)) 3978 return 125000000; 3979 else if (rate == 125000000) 3980 return 125000000; 3981 3982 WARN_ON(rate > 125000000); 3983 3984 return 125000000; 3985 } 3986 3987 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, 3988 unsigned long parent_rate) 3989 { 3990 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); 3991 if (rate != 125000000) 3992 iowrite32(1, mgmt->reg); 3993 else 3994 iowrite32(0, mgmt->reg); 3995 mgmt->rate = rate; 3996 3997 return 0; 3998 } 3999 4000 static const struct clk_ops fu540_c000_ops = { 4001 .recalc_rate = fu540_macb_tx_recalc_rate, 4002 .round_rate = fu540_macb_tx_round_rate, 4003 .set_rate = fu540_macb_tx_set_rate, 4004 }; 4005 4006 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, 4007 struct clk **hclk, struct clk **tx_clk, 4008 struct clk **rx_clk, struct clk **tsu_clk) 4009 { 4010 struct clk_init_data init; 4011 int err = 0; 4012 4013 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); 4014 if (err) 4015 return err; 4016 4017 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); 4018 if (!mgmt) 4019 return -ENOMEM; 4020 4021 init.name = "sifive-gemgxl-mgmt"; 4022 init.ops = &fu540_c000_ops; 4023 init.flags = 0; 4024 init.num_parents = 0; 4025 4026 mgmt->rate = 0; 4027 mgmt->hw.init = &init; 4028 4029 *tx_clk = clk_register(NULL, &mgmt->hw); 4030 if (IS_ERR(*tx_clk)) 4031 return PTR_ERR(*tx_clk); 4032 4033 err = clk_prepare_enable(*tx_clk); 4034 if (err) 4035 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 4036 else 4037 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); 4038 4039 return 0; 4040 } 4041 4042 static int fu540_c000_init(struct platform_device *pdev) 4043 { 4044 struct resource *res; 4045 4046 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 4047 if (!res) 4048 return -ENODEV; 4049 4050 mgmt->reg = ioremap(res->start, resource_size(res)); 4051 if (!mgmt->reg) 4052 return -ENOMEM; 4053 4054 return macb_init(pdev); 4055 } 4056 4057 static const struct macb_config fu540_c000_config = { 4058 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | 4059 MACB_CAPS_GEM_HAS_PTP, 4060 .dma_burst_length = 16, 4061 .clk_init = fu540_c000_clk_init, 4062 .init = fu540_c000_init, 4063 .jumbo_max_len = 10240, 4064 }; 4065 4066 static const struct macb_config at91sam9260_config = { 4067 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4068 .clk_init = macb_clk_init, 4069 .init = macb_init, 4070 }; 4071 4072 static const struct macb_config sama5d3macb_config = { 4073 .caps = MACB_CAPS_SG_DISABLED 4074 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4075 .clk_init = macb_clk_init, 4076 .init = macb_init, 4077 }; 4078 4079 static const struct macb_config pc302gem_config = { 4080 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 4081 .dma_burst_length = 16, 4082 .clk_init = macb_clk_init, 4083 .init = macb_init, 4084 }; 4085 4086 static const struct macb_config sama5d2_config = { 4087 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4088 .dma_burst_length = 16, 4089 .clk_init = macb_clk_init, 4090 .init = macb_init, 4091 }; 4092 4093 static const struct macb_config sama5d3_config = { 4094 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 4095 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 4096 .dma_burst_length = 16, 4097 .clk_init = macb_clk_init, 4098 .init = macb_init, 4099 .jumbo_max_len = 10240, 4100 }; 4101 4102 static const struct macb_config sama5d4_config = { 4103 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4104 .dma_burst_length = 4, 4105 .clk_init = macb_clk_init, 4106 .init = macb_init, 4107 }; 4108 4109 static const struct macb_config emac_config = { 4110 .caps = MACB_CAPS_NEEDS_RSTONUBR, 4111 .clk_init = at91ether_clk_init, 4112 .init = at91ether_init, 4113 }; 4114 4115 static const struct macb_config np4_config = { 4116 .caps = MACB_CAPS_USRIO_DISABLED, 4117 .clk_init = macb_clk_init, 4118 .init = macb_init, 4119 }; 4120 4121 static const struct macb_config zynqmp_config = { 4122 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4123 MACB_CAPS_JUMBO | 4124 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 4125 .dma_burst_length = 16, 4126 .clk_init = macb_clk_init, 4127 .init = macb_init, 4128 .jumbo_max_len = 10240, 4129 }; 4130 4131 static const struct macb_config zynq_config = { 4132 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 4133 MACB_CAPS_NEEDS_RSTONUBR, 4134 .dma_burst_length = 16, 4135 .clk_init = macb_clk_init, 4136 .init = macb_init, 4137 }; 4138 4139 static const struct of_device_id macb_dt_ids[] = { 4140 { .compatible = "cdns,at32ap7000-macb" }, 4141 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 4142 { .compatible = "cdns,macb" }, 4143 { .compatible = "cdns,np4-macb", .data = &np4_config }, 4144 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 4145 { .compatible = "cdns,gem", .data = &pc302gem_config }, 4146 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, 4147 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 4148 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 4149 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 4150 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 4151 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 4152 { .compatible = "cdns,emac", .data = &emac_config }, 4153 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4154 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4155 { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, 4156 { /* sentinel */ } 4157 }; 4158 MODULE_DEVICE_TABLE(of, macb_dt_ids); 4159 #endif /* CONFIG_OF */ 4160 4161 static const struct macb_config default_gem_config = { 4162 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4163 MACB_CAPS_JUMBO | 4164 MACB_CAPS_GEM_HAS_PTP, 4165 .dma_burst_length = 16, 4166 .clk_init = macb_clk_init, 4167 .init = macb_init, 4168 .jumbo_max_len = 10240, 4169 }; 4170 4171 static int macb_probe(struct platform_device *pdev) 4172 { 4173 const struct macb_config *macb_config = &default_gem_config; 4174 int (*clk_init)(struct platform_device *, struct clk **, 4175 struct clk **, struct clk **, struct clk **, 4176 struct clk **) = macb_config->clk_init; 4177 int (*init)(struct platform_device *) = macb_config->init; 4178 struct device_node *np = pdev->dev.of_node; 4179 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 4180 struct clk *tsu_clk = NULL; 4181 unsigned int queue_mask, num_queues; 4182 bool native_io; 4183 struct phy_device *phydev; 4184 struct net_device *dev; 4185 struct resource *regs; 4186 void __iomem *mem; 4187 const char *mac; 4188 struct macb *bp; 4189 int err, val; 4190 4191 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4192 mem = devm_ioremap_resource(&pdev->dev, regs); 4193 if (IS_ERR(mem)) 4194 return PTR_ERR(mem); 4195 4196 if (np) { 4197 const struct of_device_id *match; 4198 4199 match = of_match_node(macb_dt_ids, np); 4200 if (match && match->data) { 4201 macb_config = match->data; 4202 clk_init = macb_config->clk_init; 4203 init = macb_config->init; 4204 } 4205 } 4206 4207 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 4208 if (err) 4209 return err; 4210 4211 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); 4212 pm_runtime_use_autosuspend(&pdev->dev); 4213 pm_runtime_get_noresume(&pdev->dev); 4214 pm_runtime_set_active(&pdev->dev); 4215 pm_runtime_enable(&pdev->dev); 4216 native_io = hw_is_native_io(mem); 4217 4218 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 4219 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 4220 if (!dev) { 4221 err = -ENOMEM; 4222 goto err_disable_clocks; 4223 } 4224 4225 dev->base_addr = regs->start; 4226 4227 SET_NETDEV_DEV(dev, &pdev->dev); 4228 4229 bp = netdev_priv(dev); 4230 bp->pdev = pdev; 4231 bp->dev = dev; 4232 bp->regs = mem; 4233 bp->native_io = native_io; 4234 if (native_io) { 4235 bp->macb_reg_readl = hw_readl_native; 4236 bp->macb_reg_writel = hw_writel_native; 4237 } else { 4238 bp->macb_reg_readl = hw_readl; 4239 bp->macb_reg_writel = hw_writel; 4240 } 4241 bp->num_queues = num_queues; 4242 bp->queue_mask = queue_mask; 4243 if (macb_config) 4244 bp->dma_burst_length = macb_config->dma_burst_length; 4245 bp->pclk = pclk; 4246 bp->hclk = hclk; 4247 bp->tx_clk = tx_clk; 4248 bp->rx_clk = rx_clk; 4249 bp->tsu_clk = tsu_clk; 4250 if (macb_config) 4251 bp->jumbo_max_len = macb_config->jumbo_max_len; 4252 4253 bp->wol = 0; 4254 if (of_get_property(np, "magic-packet", NULL)) 4255 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4256 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4257 4258 spin_lock_init(&bp->lock); 4259 4260 /* setup capabilities */ 4261 macb_configure_caps(bp, macb_config); 4262 4263 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4264 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4265 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4266 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4267 } 4268 #endif 4269 platform_set_drvdata(pdev, dev); 4270 4271 dev->irq = platform_get_irq(pdev, 0); 4272 if (dev->irq < 0) { 4273 err = dev->irq; 4274 goto err_out_free_netdev; 4275 } 4276 4277 /* MTU range: 68 - 1500 or 10240 */ 4278 dev->min_mtu = GEM_MTU_MIN_SIZE; 4279 if (bp->caps & MACB_CAPS_JUMBO) 4280 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4281 else 4282 dev->max_mtu = ETH_DATA_LEN; 4283 4284 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4285 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4286 if (val) 4287 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4288 macb_dma_desc_get_size(bp); 4289 4290 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4291 if (val) 4292 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4293 macb_dma_desc_get_size(bp); 4294 } 4295 4296 bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4297 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4298 bp->rx_intr_mask |= MACB_BIT(RXUBR); 4299 4300 mac = of_get_mac_address(np); 4301 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4302 err = -EPROBE_DEFER; 4303 goto err_out_free_netdev; 4304 } else if (!IS_ERR(mac)) { 4305 ether_addr_copy(bp->dev->dev_addr, mac); 4306 } else { 4307 macb_get_hwaddr(bp); 4308 } 4309 4310 err = of_get_phy_mode(np); 4311 if (err < 0) 4312 /* not found in DT, MII by default */ 4313 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4314 else 4315 bp->phy_interface = err; 4316 4317 /* IP specific init */ 4318 err = init(pdev); 4319 if (err) 4320 goto err_out_free_netdev; 4321 4322 err = macb_mii_init(bp); 4323 if (err) 4324 goto err_out_free_netdev; 4325 4326 phydev = dev->phydev; 4327 4328 netif_carrier_off(dev); 4329 4330 err = register_netdev(dev); 4331 if (err) { 4332 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4333 goto err_out_unregister_mdio; 4334 } 4335 4336 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 4337 (unsigned long)bp); 4338 4339 phy_attached_info(phydev); 4340 4341 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4342 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4343 dev->base_addr, dev->irq, dev->dev_addr); 4344 4345 pm_runtime_mark_last_busy(&bp->pdev->dev); 4346 pm_runtime_put_autosuspend(&bp->pdev->dev); 4347 4348 return 0; 4349 4350 err_out_unregister_mdio: 4351 phy_disconnect(dev->phydev); 4352 mdiobus_unregister(bp->mii_bus); 4353 of_node_put(bp->phy_node); 4354 if (np && of_phy_is_fixed_link(np)) 4355 of_phy_deregister_fixed_link(np); 4356 mdiobus_free(bp->mii_bus); 4357 4358 err_out_free_netdev: 4359 free_netdev(dev); 4360 4361 err_disable_clocks: 4362 clk_disable_unprepare(tx_clk); 4363 clk_unregister(tx_clk); 4364 clk_disable_unprepare(hclk); 4365 clk_disable_unprepare(pclk); 4366 clk_disable_unprepare(rx_clk); 4367 clk_disable_unprepare(tsu_clk); 4368 pm_runtime_disable(&pdev->dev); 4369 pm_runtime_set_suspended(&pdev->dev); 4370 pm_runtime_dont_use_autosuspend(&pdev->dev); 4371 4372 return err; 4373 } 4374 4375 static int macb_remove(struct platform_device *pdev) 4376 { 4377 struct net_device *dev; 4378 struct macb *bp; 4379 struct device_node *np = pdev->dev.of_node; 4380 4381 dev = platform_get_drvdata(pdev); 4382 4383 if (dev) { 4384 bp = netdev_priv(dev); 4385 if (dev->phydev) 4386 phy_disconnect(dev->phydev); 4387 mdiobus_unregister(bp->mii_bus); 4388 if (np && of_phy_is_fixed_link(np)) 4389 of_phy_deregister_fixed_link(np); 4390 dev->phydev = NULL; 4391 mdiobus_free(bp->mii_bus); 4392 4393 unregister_netdev(dev); 4394 pm_runtime_disable(&pdev->dev); 4395 pm_runtime_dont_use_autosuspend(&pdev->dev); 4396 if (!pm_runtime_suspended(&pdev->dev)) { 4397 clk_disable_unprepare(bp->tx_clk); 4398 clk_unregister(bp->tx_clk); 4399 clk_disable_unprepare(bp->hclk); 4400 clk_disable_unprepare(bp->pclk); 4401 clk_disable_unprepare(bp->rx_clk); 4402 clk_disable_unprepare(bp->tsu_clk); 4403 pm_runtime_set_suspended(&pdev->dev); 4404 } 4405 of_node_put(bp->phy_node); 4406 free_netdev(dev); 4407 } 4408 4409 return 0; 4410 } 4411 4412 static int __maybe_unused macb_suspend(struct device *dev) 4413 { 4414 struct net_device *netdev = dev_get_drvdata(dev); 4415 struct macb *bp = netdev_priv(netdev); 4416 struct macb_queue *queue = bp->queues; 4417 unsigned long flags; 4418 unsigned int q; 4419 4420 if (!netif_running(netdev)) 4421 return 0; 4422 4423 4424 if (bp->wol & MACB_WOL_ENABLED) { 4425 macb_writel(bp, IER, MACB_BIT(WOL)); 4426 macb_writel(bp, WOL, MACB_BIT(MAG)); 4427 enable_irq_wake(bp->queues[0].irq); 4428 netif_device_detach(netdev); 4429 } else { 4430 netif_device_detach(netdev); 4431 for (q = 0, queue = bp->queues; q < bp->num_queues; 4432 ++q, ++queue) 4433 napi_disable(&queue->napi); 4434 phy_stop(netdev->phydev); 4435 phy_suspend(netdev->phydev); 4436 spin_lock_irqsave(&bp->lock, flags); 4437 macb_reset_hw(bp); 4438 spin_unlock_irqrestore(&bp->lock, flags); 4439 4440 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4441 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); 4442 4443 if (netdev->hw_features & NETIF_F_NTUPLE) 4444 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4445 } 4446 4447 netif_carrier_off(netdev); 4448 if (bp->ptp_info) 4449 bp->ptp_info->ptp_remove(netdev); 4450 pm_runtime_force_suspend(dev); 4451 4452 return 0; 4453 } 4454 4455 static int __maybe_unused macb_resume(struct device *dev) 4456 { 4457 struct net_device *netdev = dev_get_drvdata(dev); 4458 struct macb *bp = netdev_priv(netdev); 4459 struct macb_queue *queue = bp->queues; 4460 unsigned int q; 4461 4462 if (!netif_running(netdev)) 4463 return 0; 4464 4465 pm_runtime_force_resume(dev); 4466 4467 if (bp->wol & MACB_WOL_ENABLED) { 4468 macb_writel(bp, IDR, MACB_BIT(WOL)); 4469 macb_writel(bp, WOL, 0); 4470 disable_irq_wake(bp->queues[0].irq); 4471 } else { 4472 macb_writel(bp, NCR, MACB_BIT(MPE)); 4473 4474 if (netdev->hw_features & NETIF_F_NTUPLE) 4475 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); 4476 4477 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4478 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); 4479 4480 for (q = 0, queue = bp->queues; q < bp->num_queues; 4481 ++q, ++queue) 4482 napi_enable(&queue->napi); 4483 phy_resume(netdev->phydev); 4484 phy_init_hw(netdev->phydev); 4485 phy_start(netdev->phydev); 4486 } 4487 4488 bp->macbgem_ops.mog_init_rings(bp); 4489 macb_init_hw(bp); 4490 macb_set_rx_mode(netdev); 4491 macb_restore_features(bp); 4492 netif_device_attach(netdev); 4493 if (bp->ptp_info) 4494 bp->ptp_info->ptp_init(netdev); 4495 4496 return 0; 4497 } 4498 4499 static int __maybe_unused macb_runtime_suspend(struct device *dev) 4500 { 4501 struct net_device *netdev = dev_get_drvdata(dev); 4502 struct macb *bp = netdev_priv(netdev); 4503 4504 if (!(device_may_wakeup(&bp->dev->dev))) { 4505 clk_disable_unprepare(bp->tx_clk); 4506 clk_disable_unprepare(bp->hclk); 4507 clk_disable_unprepare(bp->pclk); 4508 clk_disable_unprepare(bp->rx_clk); 4509 } 4510 clk_disable_unprepare(bp->tsu_clk); 4511 4512 return 0; 4513 } 4514 4515 static int __maybe_unused macb_runtime_resume(struct device *dev) 4516 { 4517 struct net_device *netdev = dev_get_drvdata(dev); 4518 struct macb *bp = netdev_priv(netdev); 4519 4520 if (!(device_may_wakeup(&bp->dev->dev))) { 4521 clk_prepare_enable(bp->pclk); 4522 clk_prepare_enable(bp->hclk); 4523 clk_prepare_enable(bp->tx_clk); 4524 clk_prepare_enable(bp->rx_clk); 4525 } 4526 clk_prepare_enable(bp->tsu_clk); 4527 4528 return 0; 4529 } 4530 4531 static const struct dev_pm_ops macb_pm_ops = { 4532 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) 4533 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) 4534 }; 4535 4536 static struct platform_driver macb_driver = { 4537 .probe = macb_probe, 4538 .remove = macb_remove, 4539 .driver = { 4540 .name = "macb", 4541 .of_match_table = of_match_ptr(macb_dt_ids), 4542 .pm = &macb_pm_ops, 4543 }, 4544 }; 4545 4546 module_platform_driver(macb_driver); 4547 4548 MODULE_LICENSE("GPL"); 4549 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4550 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4551 MODULE_ALIAS("platform:macb"); 4552