1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/crc32.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/circ_buf.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/interrupt.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/platform_data/macb.h> 27 #include <linux/platform_device.h> 28 #include <linux/phylink.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_gpio.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/ip.h> 35 #include <linux/udp.h> 36 #include <linux/tcp.h> 37 #include <linux/iopoll.h> 38 #include <linux/pm_runtime.h> 39 #include "macb.h" 40 41 /* This structure is only used for MACB on SiFive FU540 devices */ 42 struct sifive_fu540_macb_mgmt { 43 void __iomem *reg; 44 unsigned long rate; 45 struct clk_hw hw; 46 }; 47 48 #define MACB_RX_BUFFER_SIZE 128 49 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 50 51 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 52 #define MIN_RX_RING_SIZE 64 53 #define MAX_RX_RING_SIZE 8192 54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 55 * (bp)->rx_ring_size) 56 57 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 58 #define MIN_TX_RING_SIZE 64 59 #define MAX_TX_RING_SIZE 4096 60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 61 * (bp)->tx_ring_size) 62 63 /* level of occupied TX descriptors under which we wake up TX process */ 64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 65 66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 68 | MACB_BIT(ISR_RLE) \ 69 | MACB_BIT(TXERR)) 70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ 71 | MACB_BIT(TXUBR)) 72 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 #define MACB_TX_LEN_ALIGN 8 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 77 * false amba_error in TX path from the DMA assuming there is not enough 78 * space in the SRAM (16KB) even when there is. 79 */ 80 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 81 82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 83 #define MACB_NETIF_LSO NETIF_F_TSO 84 85 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 86 #define MACB_WOL_ENABLED (0x1 << 1) 87 88 /* Graceful stop timeouts in us. We should allow up to 89 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 90 */ 91 #define MACB_HALT_TIMEOUT 1230 92 93 #define MACB_PM_TIMEOUT 100 /* ms */ 94 95 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ 96 97 /* DMA buffer descriptor might be different size 98 * depends on hardware configuration: 99 * 100 * 1. dma address width 32 bits: 101 * word 1: 32 bit address of Data Buffer 102 * word 2: control 103 * 104 * 2. dma address width 64 bits: 105 * word 1: 32 bit address of Data Buffer 106 * word 2: control 107 * word 3: upper 32 bit address of Data Buffer 108 * word 4: unused 109 * 110 * 3. dma address width 32 bits with hardware timestamping: 111 * word 1: 32 bit address of Data Buffer 112 * word 2: control 113 * word 3: timestamp word 1 114 * word 4: timestamp word 2 115 * 116 * 4. dma address width 64 bits with hardware timestamping: 117 * word 1: 32 bit address of Data Buffer 118 * word 2: control 119 * word 3: upper 32 bit address of Data Buffer 120 * word 4: unused 121 * word 5: timestamp word 1 122 * word 6: timestamp word 2 123 */ 124 static unsigned int macb_dma_desc_get_size(struct macb *bp) 125 { 126 #ifdef MACB_EXT_DESC 127 unsigned int desc_size; 128 129 switch (bp->hw_dma_cap) { 130 case HW_DMA_CAP_64B: 131 desc_size = sizeof(struct macb_dma_desc) 132 + sizeof(struct macb_dma_desc_64); 133 break; 134 case HW_DMA_CAP_PTP: 135 desc_size = sizeof(struct macb_dma_desc) 136 + sizeof(struct macb_dma_desc_ptp); 137 break; 138 case HW_DMA_CAP_64B_PTP: 139 desc_size = sizeof(struct macb_dma_desc) 140 + sizeof(struct macb_dma_desc_64) 141 + sizeof(struct macb_dma_desc_ptp); 142 break; 143 default: 144 desc_size = sizeof(struct macb_dma_desc); 145 } 146 return desc_size; 147 #endif 148 return sizeof(struct macb_dma_desc); 149 } 150 151 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 152 { 153 #ifdef MACB_EXT_DESC 154 switch (bp->hw_dma_cap) { 155 case HW_DMA_CAP_64B: 156 case HW_DMA_CAP_PTP: 157 desc_idx <<= 1; 158 break; 159 case HW_DMA_CAP_64B_PTP: 160 desc_idx *= 3; 161 break; 162 default: 163 break; 164 } 165 #endif 166 return desc_idx; 167 } 168 169 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 170 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 171 { 172 return (struct macb_dma_desc_64 *)((void *)desc 173 + sizeof(struct macb_dma_desc)); 174 } 175 #endif 176 177 /* Ring buffer accessors */ 178 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 179 { 180 return index & (bp->tx_ring_size - 1); 181 } 182 183 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 184 unsigned int index) 185 { 186 index = macb_tx_ring_wrap(queue->bp, index); 187 index = macb_adj_dma_desc_idx(queue->bp, index); 188 return &queue->tx_ring[index]; 189 } 190 191 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 192 unsigned int index) 193 { 194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 195 } 196 197 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 198 { 199 dma_addr_t offset; 200 201 offset = macb_tx_ring_wrap(queue->bp, index) * 202 macb_dma_desc_get_size(queue->bp); 203 204 return queue->tx_ring_dma + offset; 205 } 206 207 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 208 { 209 return index & (bp->rx_ring_size - 1); 210 } 211 212 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 213 { 214 index = macb_rx_ring_wrap(queue->bp, index); 215 index = macb_adj_dma_desc_idx(queue->bp, index); 216 return &queue->rx_ring[index]; 217 } 218 219 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 220 { 221 return queue->rx_buffers + queue->bp->rx_buffer_size * 222 macb_rx_ring_wrap(queue->bp, index); 223 } 224 225 /* I/O accessors */ 226 static u32 hw_readl_native(struct macb *bp, int offset) 227 { 228 return __raw_readl(bp->regs + offset); 229 } 230 231 static void hw_writel_native(struct macb *bp, int offset, u32 value) 232 { 233 __raw_writel(value, bp->regs + offset); 234 } 235 236 static u32 hw_readl(struct macb *bp, int offset) 237 { 238 return readl_relaxed(bp->regs + offset); 239 } 240 241 static void hw_writel(struct macb *bp, int offset, u32 value) 242 { 243 writel_relaxed(value, bp->regs + offset); 244 } 245 246 /* Find the CPU endianness by using the loopback bit of NCR register. When the 247 * CPU is in big endian we need to program swapped mode for management 248 * descriptor access. 249 */ 250 static bool hw_is_native_io(void __iomem *addr) 251 { 252 u32 value = MACB_BIT(LLB); 253 254 __raw_writel(value, addr + MACB_NCR); 255 value = __raw_readl(addr + MACB_NCR); 256 257 /* Write 0 back to disable everything */ 258 __raw_writel(0, addr + MACB_NCR); 259 260 return value == MACB_BIT(LLB); 261 } 262 263 static bool hw_is_gem(void __iomem *addr, bool native_io) 264 { 265 u32 id; 266 267 if (native_io) 268 id = __raw_readl(addr + MACB_MID); 269 else 270 id = readl_relaxed(addr + MACB_MID); 271 272 return MACB_BFEXT(IDNUM, id) >= 0x2; 273 } 274 275 static void macb_set_hwaddr(struct macb *bp) 276 { 277 u32 bottom; 278 u16 top; 279 280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 281 macb_or_gem_writel(bp, SA1B, bottom); 282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 283 macb_or_gem_writel(bp, SA1T, top); 284 285 /* Clear unused address register sets */ 286 macb_or_gem_writel(bp, SA2B, 0); 287 macb_or_gem_writel(bp, SA2T, 0); 288 macb_or_gem_writel(bp, SA3B, 0); 289 macb_or_gem_writel(bp, SA3T, 0); 290 macb_or_gem_writel(bp, SA4B, 0); 291 macb_or_gem_writel(bp, SA4T, 0); 292 } 293 294 static void macb_get_hwaddr(struct macb *bp) 295 { 296 u32 bottom; 297 u16 top; 298 u8 addr[6]; 299 int i; 300 301 /* Check all 4 address register for valid address */ 302 for (i = 0; i < 4; i++) { 303 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 304 top = macb_or_gem_readl(bp, SA1T + i * 8); 305 306 addr[0] = bottom & 0xff; 307 addr[1] = (bottom >> 8) & 0xff; 308 addr[2] = (bottom >> 16) & 0xff; 309 addr[3] = (bottom >> 24) & 0xff; 310 addr[4] = top & 0xff; 311 addr[5] = (top >> 8) & 0xff; 312 313 if (is_valid_ether_addr(addr)) { 314 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 315 return; 316 } 317 } 318 319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 320 eth_hw_addr_random(bp->dev); 321 } 322 323 static int macb_mdio_wait_for_idle(struct macb *bp) 324 { 325 u32 val; 326 327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), 328 1, MACB_MDIO_TIMEOUT); 329 } 330 331 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 332 { 333 struct macb *bp = bus->priv; 334 int status; 335 336 status = pm_runtime_get_sync(&bp->pdev->dev); 337 if (status < 0) { 338 pm_runtime_put_noidle(&bp->pdev->dev); 339 goto mdio_pm_exit; 340 } 341 342 status = macb_mdio_wait_for_idle(bp); 343 if (status < 0) 344 goto mdio_read_exit; 345 346 if (regnum & MII_ADDR_C45) { 347 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 348 | MACB_BF(RW, MACB_MAN_C45_ADDR) 349 | MACB_BF(PHYA, mii_id) 350 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 351 | MACB_BF(DATA, regnum & 0xFFFF) 352 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 353 354 status = macb_mdio_wait_for_idle(bp); 355 if (status < 0) 356 goto mdio_read_exit; 357 358 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 359 | MACB_BF(RW, MACB_MAN_C45_READ) 360 | MACB_BF(PHYA, mii_id) 361 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 362 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 363 } else { 364 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 365 | MACB_BF(RW, MACB_MAN_C22_READ) 366 | MACB_BF(PHYA, mii_id) 367 | MACB_BF(REGA, regnum) 368 | MACB_BF(CODE, MACB_MAN_C22_CODE))); 369 } 370 371 status = macb_mdio_wait_for_idle(bp); 372 if (status < 0) 373 goto mdio_read_exit; 374 375 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 376 377 mdio_read_exit: 378 pm_runtime_mark_last_busy(&bp->pdev->dev); 379 pm_runtime_put_autosuspend(&bp->pdev->dev); 380 mdio_pm_exit: 381 return status; 382 } 383 384 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 385 u16 value) 386 { 387 struct macb *bp = bus->priv; 388 int status; 389 390 status = pm_runtime_get_sync(&bp->pdev->dev); 391 if (status < 0) { 392 pm_runtime_put_noidle(&bp->pdev->dev); 393 goto mdio_pm_exit; 394 } 395 396 status = macb_mdio_wait_for_idle(bp); 397 if (status < 0) 398 goto mdio_write_exit; 399 400 if (regnum & MII_ADDR_C45) { 401 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 402 | MACB_BF(RW, MACB_MAN_C45_ADDR) 403 | MACB_BF(PHYA, mii_id) 404 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 405 | MACB_BF(DATA, regnum & 0xFFFF) 406 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 407 408 status = macb_mdio_wait_for_idle(bp); 409 if (status < 0) 410 goto mdio_write_exit; 411 412 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 413 | MACB_BF(RW, MACB_MAN_C45_WRITE) 414 | MACB_BF(PHYA, mii_id) 415 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 416 | MACB_BF(CODE, MACB_MAN_C45_CODE) 417 | MACB_BF(DATA, value))); 418 } else { 419 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 420 | MACB_BF(RW, MACB_MAN_C22_WRITE) 421 | MACB_BF(PHYA, mii_id) 422 | MACB_BF(REGA, regnum) 423 | MACB_BF(CODE, MACB_MAN_C22_CODE) 424 | MACB_BF(DATA, value))); 425 } 426 427 status = macb_mdio_wait_for_idle(bp); 428 if (status < 0) 429 goto mdio_write_exit; 430 431 mdio_write_exit: 432 pm_runtime_mark_last_busy(&bp->pdev->dev); 433 pm_runtime_put_autosuspend(&bp->pdev->dev); 434 mdio_pm_exit: 435 return status; 436 } 437 438 static void macb_init_buffers(struct macb *bp) 439 { 440 struct macb_queue *queue; 441 unsigned int q; 442 443 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 444 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 445 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 446 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 447 queue_writel(queue, RBQPH, 448 upper_32_bits(queue->rx_ring_dma)); 449 #endif 450 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 451 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 452 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 453 queue_writel(queue, TBQPH, 454 upper_32_bits(queue->tx_ring_dma)); 455 #endif 456 } 457 } 458 459 /** 460 * macb_set_tx_clk() - Set a clock to a new frequency 461 * @clk Pointer to the clock to change 462 * @rate New frequency in Hz 463 * @dev Pointer to the struct net_device 464 */ 465 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 466 { 467 long ferr, rate, rate_rounded; 468 469 if (!clk) 470 return; 471 472 switch (speed) { 473 case SPEED_10: 474 rate = 2500000; 475 break; 476 case SPEED_100: 477 rate = 25000000; 478 break; 479 case SPEED_1000: 480 rate = 125000000; 481 break; 482 default: 483 return; 484 } 485 486 rate_rounded = clk_round_rate(clk, rate); 487 if (rate_rounded < 0) 488 return; 489 490 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 491 * is not satisfied. 492 */ 493 ferr = abs(rate_rounded - rate); 494 ferr = DIV_ROUND_UP(ferr, rate / 100000); 495 if (ferr > 5) 496 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 497 rate); 498 499 if (clk_set_rate(clk, rate_rounded)) 500 netdev_err(dev, "adjusting tx_clk failed.\n"); 501 } 502 503 static void macb_validate(struct phylink_config *config, 504 unsigned long *supported, 505 struct phylink_link_state *state) 506 { 507 struct net_device *ndev = to_net_dev(config->dev); 508 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 509 struct macb *bp = netdev_priv(ndev); 510 511 /* We only support MII, RMII, GMII, RGMII & SGMII. */ 512 if (state->interface != PHY_INTERFACE_MODE_NA && 513 state->interface != PHY_INTERFACE_MODE_MII && 514 state->interface != PHY_INTERFACE_MODE_RMII && 515 state->interface != PHY_INTERFACE_MODE_GMII && 516 state->interface != PHY_INTERFACE_MODE_SGMII && 517 !phy_interface_mode_is_rgmii(state->interface)) { 518 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 519 return; 520 } 521 522 if (!macb_is_gem(bp) && 523 (state->interface == PHY_INTERFACE_MODE_GMII || 524 phy_interface_mode_is_rgmii(state->interface))) { 525 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 526 return; 527 } 528 529 phylink_set_port_modes(mask); 530 phylink_set(mask, Autoneg); 531 phylink_set(mask, Asym_Pause); 532 533 phylink_set(mask, 10baseT_Half); 534 phylink_set(mask, 10baseT_Full); 535 phylink_set(mask, 100baseT_Half); 536 phylink_set(mask, 100baseT_Full); 537 538 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && 539 (state->interface == PHY_INTERFACE_MODE_NA || 540 state->interface == PHY_INTERFACE_MODE_GMII || 541 state->interface == PHY_INTERFACE_MODE_SGMII || 542 phy_interface_mode_is_rgmii(state->interface))) { 543 phylink_set(mask, 1000baseT_Full); 544 phylink_set(mask, 1000baseX_Full); 545 546 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) 547 phylink_set(mask, 1000baseT_Half); 548 } 549 550 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 551 bitmap_and(state->advertising, state->advertising, mask, 552 __ETHTOOL_LINK_MODE_MASK_NBITS); 553 } 554 555 static void macb_mac_pcs_get_state(struct phylink_config *config, 556 struct phylink_link_state *state) 557 { 558 state->link = 0; 559 } 560 561 static void macb_mac_an_restart(struct phylink_config *config) 562 { 563 /* Not supported */ 564 } 565 566 static void macb_mac_config(struct phylink_config *config, unsigned int mode, 567 const struct phylink_link_state *state) 568 { 569 struct net_device *ndev = to_net_dev(config->dev); 570 struct macb *bp = netdev_priv(ndev); 571 unsigned long flags; 572 u32 old_ctrl, ctrl; 573 574 spin_lock_irqsave(&bp->lock, flags); 575 576 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); 577 578 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { 579 if (state->interface == PHY_INTERFACE_MODE_RMII) 580 ctrl |= MACB_BIT(RM9200_RMII); 581 } else { 582 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); 583 584 if (state->interface == PHY_INTERFACE_MODE_SGMII) 585 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 586 } 587 588 /* Apply the new configuration, if any */ 589 if (old_ctrl ^ ctrl) 590 macb_or_gem_writel(bp, NCFGR, ctrl); 591 592 spin_unlock_irqrestore(&bp->lock, flags); 593 } 594 595 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, 596 phy_interface_t interface) 597 { 598 struct net_device *ndev = to_net_dev(config->dev); 599 struct macb *bp = netdev_priv(ndev); 600 struct macb_queue *queue; 601 unsigned int q; 602 u32 ctrl; 603 604 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 605 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 606 queue_writel(queue, IDR, 607 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 608 609 /* Disable Rx and Tx */ 610 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); 611 macb_writel(bp, NCR, ctrl); 612 613 netif_tx_stop_all_queues(ndev); 614 } 615 616 static void macb_mac_link_up(struct phylink_config *config, 617 struct phy_device *phy, 618 unsigned int mode, phy_interface_t interface, 619 int speed, int duplex, 620 bool tx_pause, bool rx_pause) 621 { 622 struct net_device *ndev = to_net_dev(config->dev); 623 struct macb *bp = netdev_priv(ndev); 624 struct macb_queue *queue; 625 unsigned long flags; 626 unsigned int q; 627 u32 ctrl; 628 629 spin_lock_irqsave(&bp->lock, flags); 630 631 ctrl = macb_or_gem_readl(bp, NCFGR); 632 633 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 634 635 if (speed == SPEED_100) 636 ctrl |= MACB_BIT(SPD); 637 638 if (duplex) 639 ctrl |= MACB_BIT(FD); 640 641 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 642 ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE)); 643 644 if (speed == SPEED_1000) 645 ctrl |= GEM_BIT(GBE); 646 647 /* We do not support MLO_PAUSE_RX yet */ 648 if (tx_pause) 649 ctrl |= MACB_BIT(PAE); 650 651 macb_set_tx_clk(bp->tx_clk, speed, ndev); 652 653 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down 654 * cleared the pipeline and control registers. 655 */ 656 bp->macbgem_ops.mog_init_rings(bp); 657 macb_init_buffers(bp); 658 659 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 660 queue_writel(queue, IER, 661 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 662 } 663 664 macb_or_gem_writel(bp, NCFGR, ctrl); 665 666 spin_unlock_irqrestore(&bp->lock, flags); 667 668 /* Enable Rx and Tx */ 669 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 670 671 netif_tx_wake_all_queues(ndev); 672 } 673 674 static const struct phylink_mac_ops macb_phylink_ops = { 675 .validate = macb_validate, 676 .mac_pcs_get_state = macb_mac_pcs_get_state, 677 .mac_an_restart = macb_mac_an_restart, 678 .mac_config = macb_mac_config, 679 .mac_link_down = macb_mac_link_down, 680 .mac_link_up = macb_mac_link_up, 681 }; 682 683 static bool macb_phy_handle_exists(struct device_node *dn) 684 { 685 dn = of_parse_phandle(dn, "phy-handle", 0); 686 of_node_put(dn); 687 return dn != NULL; 688 } 689 690 static int macb_phylink_connect(struct macb *bp) 691 { 692 struct device_node *dn = bp->pdev->dev.of_node; 693 struct net_device *dev = bp->dev; 694 struct phy_device *phydev; 695 int ret; 696 697 if (dn) 698 ret = phylink_of_phy_connect(bp->phylink, dn, 0); 699 700 if (!dn || (ret && !macb_phy_handle_exists(dn))) { 701 phydev = phy_find_first(bp->mii_bus); 702 if (!phydev) { 703 netdev_err(dev, "no PHY found\n"); 704 return -ENXIO; 705 } 706 707 /* attach the mac to the phy */ 708 ret = phylink_connect_phy(bp->phylink, phydev); 709 } 710 711 if (ret) { 712 netdev_err(dev, "Could not attach PHY (%d)\n", ret); 713 return ret; 714 } 715 716 phylink_start(bp->phylink); 717 718 return 0; 719 } 720 721 /* based on au1000_eth. c*/ 722 static int macb_mii_probe(struct net_device *dev) 723 { 724 struct macb *bp = netdev_priv(dev); 725 726 bp->phylink_config.dev = &dev->dev; 727 bp->phylink_config.type = PHYLINK_NETDEV; 728 729 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, 730 bp->phy_interface, &macb_phylink_ops); 731 if (IS_ERR(bp->phylink)) { 732 netdev_err(dev, "Could not create a phylink instance (%ld)\n", 733 PTR_ERR(bp->phylink)); 734 return PTR_ERR(bp->phylink); 735 } 736 737 return 0; 738 } 739 740 static int macb_mdiobus_register(struct macb *bp) 741 { 742 struct device_node *child, *np = bp->pdev->dev.of_node; 743 744 if (of_phy_is_fixed_link(np)) 745 return mdiobus_register(bp->mii_bus); 746 747 /* Only create the PHY from the device tree if at least one PHY is 748 * described. Otherwise scan the entire MDIO bus. We do this to support 749 * old device tree that did not follow the best practices and did not 750 * describe their network PHYs. 751 */ 752 for_each_available_child_of_node(np, child) 753 if (of_mdiobus_child_is_phy(child)) { 754 /* The loop increments the child refcount, 755 * decrement it before returning. 756 */ 757 of_node_put(child); 758 759 return of_mdiobus_register(bp->mii_bus, np); 760 } 761 762 return mdiobus_register(bp->mii_bus); 763 } 764 765 static int macb_mii_init(struct macb *bp) 766 { 767 int err = -ENXIO; 768 769 /* Enable management port */ 770 macb_writel(bp, NCR, MACB_BIT(MPE)); 771 772 bp->mii_bus = mdiobus_alloc(); 773 if (!bp->mii_bus) { 774 err = -ENOMEM; 775 goto err_out; 776 } 777 778 bp->mii_bus->name = "MACB_mii_bus"; 779 bp->mii_bus->read = &macb_mdio_read; 780 bp->mii_bus->write = &macb_mdio_write; 781 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 782 bp->pdev->name, bp->pdev->id); 783 bp->mii_bus->priv = bp; 784 bp->mii_bus->parent = &bp->pdev->dev; 785 786 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 787 788 err = macb_mdiobus_register(bp); 789 if (err) 790 goto err_out_free_mdiobus; 791 792 err = macb_mii_probe(bp->dev); 793 if (err) 794 goto err_out_unregister_bus; 795 796 return 0; 797 798 err_out_unregister_bus: 799 mdiobus_unregister(bp->mii_bus); 800 err_out_free_mdiobus: 801 mdiobus_free(bp->mii_bus); 802 err_out: 803 return err; 804 } 805 806 static void macb_update_stats(struct macb *bp) 807 { 808 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 809 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 810 int offset = MACB_PFR; 811 812 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 813 814 for (; p < end; p++, offset += 4) 815 *p += bp->macb_reg_readl(bp, offset); 816 } 817 818 static int macb_halt_tx(struct macb *bp) 819 { 820 unsigned long halt_time, timeout; 821 u32 status; 822 823 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 824 825 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 826 do { 827 halt_time = jiffies; 828 status = macb_readl(bp, TSR); 829 if (!(status & MACB_BIT(TGO))) 830 return 0; 831 832 udelay(250); 833 } while (time_before(halt_time, timeout)); 834 835 return -ETIMEDOUT; 836 } 837 838 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 839 { 840 if (tx_skb->mapping) { 841 if (tx_skb->mapped_as_page) 842 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 843 tx_skb->size, DMA_TO_DEVICE); 844 else 845 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 846 tx_skb->size, DMA_TO_DEVICE); 847 tx_skb->mapping = 0; 848 } 849 850 if (tx_skb->skb) { 851 dev_kfree_skb_any(tx_skb->skb); 852 tx_skb->skb = NULL; 853 } 854 } 855 856 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 857 { 858 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 859 struct macb_dma_desc_64 *desc_64; 860 861 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 862 desc_64 = macb_64b_desc(bp, desc); 863 desc_64->addrh = upper_32_bits(addr); 864 /* The low bits of RX address contain the RX_USED bit, clearing 865 * of which allows packet RX. Make sure the high bits are also 866 * visible to HW at that point. 867 */ 868 dma_wmb(); 869 } 870 #endif 871 desc->addr = lower_32_bits(addr); 872 } 873 874 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 875 { 876 dma_addr_t addr = 0; 877 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 878 struct macb_dma_desc_64 *desc_64; 879 880 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 881 desc_64 = macb_64b_desc(bp, desc); 882 addr = ((u64)(desc_64->addrh) << 32); 883 } 884 #endif 885 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 886 return addr; 887 } 888 889 static void macb_tx_error_task(struct work_struct *work) 890 { 891 struct macb_queue *queue = container_of(work, struct macb_queue, 892 tx_error_task); 893 struct macb *bp = queue->bp; 894 struct macb_tx_skb *tx_skb; 895 struct macb_dma_desc *desc; 896 struct sk_buff *skb; 897 unsigned int tail; 898 unsigned long flags; 899 900 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 901 (unsigned int)(queue - bp->queues), 902 queue->tx_tail, queue->tx_head); 903 904 /* Prevent the queue IRQ handlers from running: each of them may call 905 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 906 * As explained below, we have to halt the transmission before updating 907 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 908 * network engine about the macb/gem being halted. 909 */ 910 spin_lock_irqsave(&bp->lock, flags); 911 912 /* Make sure nobody is trying to queue up new packets */ 913 netif_tx_stop_all_queues(bp->dev); 914 915 /* Stop transmission now 916 * (in case we have just queued new packets) 917 * macb/gem must be halted to write TBQP register 918 */ 919 if (macb_halt_tx(bp)) 920 /* Just complain for now, reinitializing TX path can be good */ 921 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 922 923 /* Treat frames in TX queue including the ones that caused the error. 924 * Free transmit buffers in upper layer. 925 */ 926 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 927 u32 ctrl; 928 929 desc = macb_tx_desc(queue, tail); 930 ctrl = desc->ctrl; 931 tx_skb = macb_tx_skb(queue, tail); 932 skb = tx_skb->skb; 933 934 if (ctrl & MACB_BIT(TX_USED)) { 935 /* skb is set for the last buffer of the frame */ 936 while (!skb) { 937 macb_tx_unmap(bp, tx_skb); 938 tail++; 939 tx_skb = macb_tx_skb(queue, tail); 940 skb = tx_skb->skb; 941 } 942 943 /* ctrl still refers to the first buffer descriptor 944 * since it's the only one written back by the hardware 945 */ 946 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 947 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 948 macb_tx_ring_wrap(bp, tail), 949 skb->data); 950 bp->dev->stats.tx_packets++; 951 queue->stats.tx_packets++; 952 bp->dev->stats.tx_bytes += skb->len; 953 queue->stats.tx_bytes += skb->len; 954 } 955 } else { 956 /* "Buffers exhausted mid-frame" errors may only happen 957 * if the driver is buggy, so complain loudly about 958 * those. Statistics are updated by hardware. 959 */ 960 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 961 netdev_err(bp->dev, 962 "BUG: TX buffers exhausted mid-frame\n"); 963 964 desc->ctrl = ctrl | MACB_BIT(TX_USED); 965 } 966 967 macb_tx_unmap(bp, tx_skb); 968 } 969 970 /* Set end of TX queue */ 971 desc = macb_tx_desc(queue, 0); 972 macb_set_addr(bp, desc, 0); 973 desc->ctrl = MACB_BIT(TX_USED); 974 975 /* Make descriptor updates visible to hardware */ 976 wmb(); 977 978 /* Reinitialize the TX desc queue */ 979 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 980 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 981 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 982 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 983 #endif 984 /* Make TX ring reflect state of hardware */ 985 queue->tx_head = 0; 986 queue->tx_tail = 0; 987 988 /* Housework before enabling TX IRQ */ 989 macb_writel(bp, TSR, macb_readl(bp, TSR)); 990 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 991 992 /* Now we are ready to start transmission again */ 993 netif_tx_start_all_queues(bp->dev); 994 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 995 996 spin_unlock_irqrestore(&bp->lock, flags); 997 } 998 999 static void macb_tx_interrupt(struct macb_queue *queue) 1000 { 1001 unsigned int tail; 1002 unsigned int head; 1003 u32 status; 1004 struct macb *bp = queue->bp; 1005 u16 queue_index = queue - bp->queues; 1006 1007 status = macb_readl(bp, TSR); 1008 macb_writel(bp, TSR, status); 1009 1010 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1011 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 1012 1013 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 1014 (unsigned long)status); 1015 1016 head = queue->tx_head; 1017 for (tail = queue->tx_tail; tail != head; tail++) { 1018 struct macb_tx_skb *tx_skb; 1019 struct sk_buff *skb; 1020 struct macb_dma_desc *desc; 1021 u32 ctrl; 1022 1023 desc = macb_tx_desc(queue, tail); 1024 1025 /* Make hw descriptor updates visible to CPU */ 1026 rmb(); 1027 1028 ctrl = desc->ctrl; 1029 1030 /* TX_USED bit is only set by hardware on the very first buffer 1031 * descriptor of the transmitted frame. 1032 */ 1033 if (!(ctrl & MACB_BIT(TX_USED))) 1034 break; 1035 1036 /* Process all buffers of the current transmitted frame */ 1037 for (;; tail++) { 1038 tx_skb = macb_tx_skb(queue, tail); 1039 skb = tx_skb->skb; 1040 1041 /* First, update TX stats if needed */ 1042 if (skb) { 1043 if (unlikely(skb_shinfo(skb)->tx_flags & 1044 SKBTX_HW_TSTAMP) && 1045 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 1046 /* skb now belongs to timestamp buffer 1047 * and will be removed later 1048 */ 1049 tx_skb->skb = NULL; 1050 } 1051 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 1052 macb_tx_ring_wrap(bp, tail), 1053 skb->data); 1054 bp->dev->stats.tx_packets++; 1055 queue->stats.tx_packets++; 1056 bp->dev->stats.tx_bytes += skb->len; 1057 queue->stats.tx_bytes += skb->len; 1058 } 1059 1060 /* Now we can safely release resources */ 1061 macb_tx_unmap(bp, tx_skb); 1062 1063 /* skb is set only for the last buffer of the frame. 1064 * WARNING: at this point skb has been freed by 1065 * macb_tx_unmap(). 1066 */ 1067 if (skb) 1068 break; 1069 } 1070 } 1071 1072 queue->tx_tail = tail; 1073 if (__netif_subqueue_stopped(bp->dev, queue_index) && 1074 CIRC_CNT(queue->tx_head, queue->tx_tail, 1075 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 1076 netif_wake_subqueue(bp->dev, queue_index); 1077 } 1078 1079 static void gem_rx_refill(struct macb_queue *queue) 1080 { 1081 unsigned int entry; 1082 struct sk_buff *skb; 1083 dma_addr_t paddr; 1084 struct macb *bp = queue->bp; 1085 struct macb_dma_desc *desc; 1086 1087 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 1088 bp->rx_ring_size) > 0) { 1089 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 1090 1091 /* Make hw descriptor updates visible to CPU */ 1092 rmb(); 1093 1094 queue->rx_prepared_head++; 1095 desc = macb_rx_desc(queue, entry); 1096 1097 if (!queue->rx_skbuff[entry]) { 1098 /* allocate sk_buff for this free entry in ring */ 1099 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 1100 if (unlikely(!skb)) { 1101 netdev_err(bp->dev, 1102 "Unable to allocate sk_buff\n"); 1103 break; 1104 } 1105 1106 /* now fill corresponding descriptor entry */ 1107 paddr = dma_map_single(&bp->pdev->dev, skb->data, 1108 bp->rx_buffer_size, 1109 DMA_FROM_DEVICE); 1110 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 1111 dev_kfree_skb(skb); 1112 break; 1113 } 1114 1115 queue->rx_skbuff[entry] = skb; 1116 1117 if (entry == bp->rx_ring_size - 1) 1118 paddr |= MACB_BIT(RX_WRAP); 1119 desc->ctrl = 0; 1120 /* Setting addr clears RX_USED and allows reception, 1121 * make sure ctrl is cleared first to avoid a race. 1122 */ 1123 dma_wmb(); 1124 macb_set_addr(bp, desc, paddr); 1125 1126 /* properly align Ethernet header */ 1127 skb_reserve(skb, NET_IP_ALIGN); 1128 } else { 1129 desc->ctrl = 0; 1130 dma_wmb(); 1131 desc->addr &= ~MACB_BIT(RX_USED); 1132 } 1133 } 1134 1135 /* Make descriptor updates visible to hardware */ 1136 wmb(); 1137 1138 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 1139 queue, queue->rx_prepared_head, queue->rx_tail); 1140 } 1141 1142 /* Mark DMA descriptors from begin up to and not including end as unused */ 1143 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 1144 unsigned int end) 1145 { 1146 unsigned int frag; 1147 1148 for (frag = begin; frag != end; frag++) { 1149 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 1150 1151 desc->addr &= ~MACB_BIT(RX_USED); 1152 } 1153 1154 /* Make descriptor updates visible to hardware */ 1155 wmb(); 1156 1157 /* When this happens, the hardware stats registers for 1158 * whatever caused this is updated, so we don't have to record 1159 * anything. 1160 */ 1161 } 1162 1163 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, 1164 int budget) 1165 { 1166 struct macb *bp = queue->bp; 1167 unsigned int len; 1168 unsigned int entry; 1169 struct sk_buff *skb; 1170 struct macb_dma_desc *desc; 1171 int count = 0; 1172 1173 while (count < budget) { 1174 u32 ctrl; 1175 dma_addr_t addr; 1176 bool rxused; 1177 1178 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 1179 desc = macb_rx_desc(queue, entry); 1180 1181 /* Make hw descriptor updates visible to CPU */ 1182 rmb(); 1183 1184 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1185 addr = macb_get_addr(bp, desc); 1186 1187 if (!rxused) 1188 break; 1189 1190 /* Ensure ctrl is at least as up-to-date as rxused */ 1191 dma_rmb(); 1192 1193 ctrl = desc->ctrl; 1194 1195 queue->rx_tail++; 1196 count++; 1197 1198 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1199 netdev_err(bp->dev, 1200 "not whole frame pointed by descriptor\n"); 1201 bp->dev->stats.rx_dropped++; 1202 queue->stats.rx_dropped++; 1203 break; 1204 } 1205 skb = queue->rx_skbuff[entry]; 1206 if (unlikely(!skb)) { 1207 netdev_err(bp->dev, 1208 "inconsistent Rx descriptor chain\n"); 1209 bp->dev->stats.rx_dropped++; 1210 queue->stats.rx_dropped++; 1211 break; 1212 } 1213 /* now everything is ready for receiving packet */ 1214 queue->rx_skbuff[entry] = NULL; 1215 len = ctrl & bp->rx_frm_len_mask; 1216 1217 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1218 1219 skb_put(skb, len); 1220 dma_unmap_single(&bp->pdev->dev, addr, 1221 bp->rx_buffer_size, DMA_FROM_DEVICE); 1222 1223 skb->protocol = eth_type_trans(skb, bp->dev); 1224 skb_checksum_none_assert(skb); 1225 if (bp->dev->features & NETIF_F_RXCSUM && 1226 !(bp->dev->flags & IFF_PROMISC) && 1227 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1228 skb->ip_summed = CHECKSUM_UNNECESSARY; 1229 1230 bp->dev->stats.rx_packets++; 1231 queue->stats.rx_packets++; 1232 bp->dev->stats.rx_bytes += skb->len; 1233 queue->stats.rx_bytes += skb->len; 1234 1235 gem_ptp_do_rxstamp(bp, skb, desc); 1236 1237 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1238 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1239 skb->len, skb->csum); 1240 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1241 skb_mac_header(skb), 16, true); 1242 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1243 skb->data, 32, true); 1244 #endif 1245 1246 napi_gro_receive(napi, skb); 1247 } 1248 1249 gem_rx_refill(queue); 1250 1251 return count; 1252 } 1253 1254 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, 1255 unsigned int first_frag, unsigned int last_frag) 1256 { 1257 unsigned int len; 1258 unsigned int frag; 1259 unsigned int offset; 1260 struct sk_buff *skb; 1261 struct macb_dma_desc *desc; 1262 struct macb *bp = queue->bp; 1263 1264 desc = macb_rx_desc(queue, last_frag); 1265 len = desc->ctrl & bp->rx_frm_len_mask; 1266 1267 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1268 macb_rx_ring_wrap(bp, first_frag), 1269 macb_rx_ring_wrap(bp, last_frag), len); 1270 1271 /* The ethernet header starts NET_IP_ALIGN bytes into the 1272 * first buffer. Since the header is 14 bytes, this makes the 1273 * payload word-aligned. 1274 * 1275 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1276 * the two padding bytes into the skb so that we avoid hitting 1277 * the slowpath in memcpy(), and pull them off afterwards. 1278 */ 1279 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1280 if (!skb) { 1281 bp->dev->stats.rx_dropped++; 1282 for (frag = first_frag; ; frag++) { 1283 desc = macb_rx_desc(queue, frag); 1284 desc->addr &= ~MACB_BIT(RX_USED); 1285 if (frag == last_frag) 1286 break; 1287 } 1288 1289 /* Make descriptor updates visible to hardware */ 1290 wmb(); 1291 1292 return 1; 1293 } 1294 1295 offset = 0; 1296 len += NET_IP_ALIGN; 1297 skb_checksum_none_assert(skb); 1298 skb_put(skb, len); 1299 1300 for (frag = first_frag; ; frag++) { 1301 unsigned int frag_len = bp->rx_buffer_size; 1302 1303 if (offset + frag_len > len) { 1304 if (unlikely(frag != last_frag)) { 1305 dev_kfree_skb_any(skb); 1306 return -1; 1307 } 1308 frag_len = len - offset; 1309 } 1310 skb_copy_to_linear_data_offset(skb, offset, 1311 macb_rx_buffer(queue, frag), 1312 frag_len); 1313 offset += bp->rx_buffer_size; 1314 desc = macb_rx_desc(queue, frag); 1315 desc->addr &= ~MACB_BIT(RX_USED); 1316 1317 if (frag == last_frag) 1318 break; 1319 } 1320 1321 /* Make descriptor updates visible to hardware */ 1322 wmb(); 1323 1324 __skb_pull(skb, NET_IP_ALIGN); 1325 skb->protocol = eth_type_trans(skb, bp->dev); 1326 1327 bp->dev->stats.rx_packets++; 1328 bp->dev->stats.rx_bytes += skb->len; 1329 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1330 skb->len, skb->csum); 1331 napi_gro_receive(napi, skb); 1332 1333 return 0; 1334 } 1335 1336 static inline void macb_init_rx_ring(struct macb_queue *queue) 1337 { 1338 struct macb *bp = queue->bp; 1339 dma_addr_t addr; 1340 struct macb_dma_desc *desc = NULL; 1341 int i; 1342 1343 addr = queue->rx_buffers_dma; 1344 for (i = 0; i < bp->rx_ring_size; i++) { 1345 desc = macb_rx_desc(queue, i); 1346 macb_set_addr(bp, desc, addr); 1347 desc->ctrl = 0; 1348 addr += bp->rx_buffer_size; 1349 } 1350 desc->addr |= MACB_BIT(RX_WRAP); 1351 queue->rx_tail = 0; 1352 } 1353 1354 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, 1355 int budget) 1356 { 1357 struct macb *bp = queue->bp; 1358 bool reset_rx_queue = false; 1359 int received = 0; 1360 unsigned int tail; 1361 int first_frag = -1; 1362 1363 for (tail = queue->rx_tail; budget > 0; tail++) { 1364 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1365 u32 ctrl; 1366 1367 /* Make hw descriptor updates visible to CPU */ 1368 rmb(); 1369 1370 if (!(desc->addr & MACB_BIT(RX_USED))) 1371 break; 1372 1373 /* Ensure ctrl is at least as up-to-date as addr */ 1374 dma_rmb(); 1375 1376 ctrl = desc->ctrl; 1377 1378 if (ctrl & MACB_BIT(RX_SOF)) { 1379 if (first_frag != -1) 1380 discard_partial_frame(queue, first_frag, tail); 1381 first_frag = tail; 1382 } 1383 1384 if (ctrl & MACB_BIT(RX_EOF)) { 1385 int dropped; 1386 1387 if (unlikely(first_frag == -1)) { 1388 reset_rx_queue = true; 1389 continue; 1390 } 1391 1392 dropped = macb_rx_frame(queue, napi, first_frag, tail); 1393 first_frag = -1; 1394 if (unlikely(dropped < 0)) { 1395 reset_rx_queue = true; 1396 continue; 1397 } 1398 if (!dropped) { 1399 received++; 1400 budget--; 1401 } 1402 } 1403 } 1404 1405 if (unlikely(reset_rx_queue)) { 1406 unsigned long flags; 1407 u32 ctrl; 1408 1409 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1410 1411 spin_lock_irqsave(&bp->lock, flags); 1412 1413 ctrl = macb_readl(bp, NCR); 1414 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1415 1416 macb_init_rx_ring(queue); 1417 queue_writel(queue, RBQP, queue->rx_ring_dma); 1418 1419 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1420 1421 spin_unlock_irqrestore(&bp->lock, flags); 1422 return received; 1423 } 1424 1425 if (first_frag != -1) 1426 queue->rx_tail = first_frag; 1427 else 1428 queue->rx_tail = tail; 1429 1430 return received; 1431 } 1432 1433 static int macb_poll(struct napi_struct *napi, int budget) 1434 { 1435 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1436 struct macb *bp = queue->bp; 1437 int work_done; 1438 u32 status; 1439 1440 status = macb_readl(bp, RSR); 1441 macb_writel(bp, RSR, status); 1442 1443 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1444 (unsigned long)status, budget); 1445 1446 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); 1447 if (work_done < budget) { 1448 napi_complete_done(napi, work_done); 1449 1450 /* Packets received while interrupts were disabled */ 1451 status = macb_readl(bp, RSR); 1452 if (status) { 1453 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1454 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1455 napi_reschedule(napi); 1456 } else { 1457 queue_writel(queue, IER, bp->rx_intr_mask); 1458 } 1459 } 1460 1461 /* TODO: Handle errors */ 1462 1463 return work_done; 1464 } 1465 1466 static void macb_hresp_error_task(unsigned long data) 1467 { 1468 struct macb *bp = (struct macb *)data; 1469 struct net_device *dev = bp->dev; 1470 struct macb_queue *queue; 1471 unsigned int q; 1472 u32 ctrl; 1473 1474 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1475 queue_writel(queue, IDR, bp->rx_intr_mask | 1476 MACB_TX_INT_FLAGS | 1477 MACB_BIT(HRESP)); 1478 } 1479 ctrl = macb_readl(bp, NCR); 1480 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1481 macb_writel(bp, NCR, ctrl); 1482 1483 netif_tx_stop_all_queues(dev); 1484 netif_carrier_off(dev); 1485 1486 bp->macbgem_ops.mog_init_rings(bp); 1487 1488 /* Initialize TX and RX buffers */ 1489 macb_init_buffers(bp); 1490 1491 /* Enable interrupts */ 1492 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1493 queue_writel(queue, IER, 1494 bp->rx_intr_mask | 1495 MACB_TX_INT_FLAGS | 1496 MACB_BIT(HRESP)); 1497 1498 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1499 macb_writel(bp, NCR, ctrl); 1500 1501 netif_carrier_on(dev); 1502 netif_tx_start_all_queues(dev); 1503 } 1504 1505 static void macb_tx_restart(struct macb_queue *queue) 1506 { 1507 unsigned int head = queue->tx_head; 1508 unsigned int tail = queue->tx_tail; 1509 struct macb *bp = queue->bp; 1510 1511 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1512 queue_writel(queue, ISR, MACB_BIT(TXUBR)); 1513 1514 if (head == tail) 1515 return; 1516 1517 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1518 } 1519 1520 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1521 { 1522 struct macb_queue *queue = dev_id; 1523 struct macb *bp = queue->bp; 1524 struct net_device *dev = bp->dev; 1525 u32 status, ctrl; 1526 1527 status = queue_readl(queue, ISR); 1528 1529 if (unlikely(!status)) 1530 return IRQ_NONE; 1531 1532 spin_lock(&bp->lock); 1533 1534 while (status) { 1535 /* close possible race with dev_close */ 1536 if (unlikely(!netif_running(dev))) { 1537 queue_writel(queue, IDR, -1); 1538 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1539 queue_writel(queue, ISR, -1); 1540 break; 1541 } 1542 1543 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1544 (unsigned int)(queue - bp->queues), 1545 (unsigned long)status); 1546 1547 if (status & bp->rx_intr_mask) { 1548 /* There's no point taking any more interrupts 1549 * until we have processed the buffers. The 1550 * scheduling call may fail if the poll routine 1551 * is already scheduled, so disable interrupts 1552 * now. 1553 */ 1554 queue_writel(queue, IDR, bp->rx_intr_mask); 1555 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1556 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1557 1558 if (napi_schedule_prep(&queue->napi)) { 1559 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1560 __napi_schedule(&queue->napi); 1561 } 1562 } 1563 1564 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1565 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1566 schedule_work(&queue->tx_error_task); 1567 1568 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1569 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1570 1571 break; 1572 } 1573 1574 if (status & MACB_BIT(TCOMP)) 1575 macb_tx_interrupt(queue); 1576 1577 if (status & MACB_BIT(TXUBR)) 1578 macb_tx_restart(queue); 1579 1580 /* Link change detection isn't possible with RMII, so we'll 1581 * add that if/when we get our hands on a full-blown MII PHY. 1582 */ 1583 1584 /* There is a hardware issue under heavy load where DMA can 1585 * stop, this causes endless "used buffer descriptor read" 1586 * interrupts but it can be cleared by re-enabling RX. See 1587 * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1588 * section 16.7.4 for details. RXUBR is only enabled for 1589 * these two versions. 1590 */ 1591 if (status & MACB_BIT(RXUBR)) { 1592 ctrl = macb_readl(bp, NCR); 1593 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1594 wmb(); 1595 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1596 1597 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1598 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1599 } 1600 1601 if (status & MACB_BIT(ISR_ROVR)) { 1602 /* We missed at least one packet */ 1603 if (macb_is_gem(bp)) 1604 bp->hw_stats.gem.rx_overruns++; 1605 else 1606 bp->hw_stats.macb.rx_overruns++; 1607 1608 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1609 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1610 } 1611 1612 if (status & MACB_BIT(HRESP)) { 1613 tasklet_schedule(&bp->hresp_err_tasklet); 1614 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1615 1616 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1617 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1618 } 1619 status = queue_readl(queue, ISR); 1620 } 1621 1622 spin_unlock(&bp->lock); 1623 1624 return IRQ_HANDLED; 1625 } 1626 1627 #ifdef CONFIG_NET_POLL_CONTROLLER 1628 /* Polling receive - used by netconsole and other diagnostic tools 1629 * to allow network i/o with interrupts disabled. 1630 */ 1631 static void macb_poll_controller(struct net_device *dev) 1632 { 1633 struct macb *bp = netdev_priv(dev); 1634 struct macb_queue *queue; 1635 unsigned long flags; 1636 unsigned int q; 1637 1638 local_irq_save(flags); 1639 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1640 macb_interrupt(dev->irq, queue); 1641 local_irq_restore(flags); 1642 } 1643 #endif 1644 1645 static unsigned int macb_tx_map(struct macb *bp, 1646 struct macb_queue *queue, 1647 struct sk_buff *skb, 1648 unsigned int hdrlen) 1649 { 1650 dma_addr_t mapping; 1651 unsigned int len, entry, i, tx_head = queue->tx_head; 1652 struct macb_tx_skb *tx_skb = NULL; 1653 struct macb_dma_desc *desc; 1654 unsigned int offset, size, count = 0; 1655 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1656 unsigned int eof = 1, mss_mfs = 0; 1657 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1658 1659 /* LSO */ 1660 if (skb_shinfo(skb)->gso_size != 0) { 1661 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1662 /* UDP - UFO */ 1663 lso_ctrl = MACB_LSO_UFO_ENABLE; 1664 else 1665 /* TCP - TSO */ 1666 lso_ctrl = MACB_LSO_TSO_ENABLE; 1667 } 1668 1669 /* First, map non-paged data */ 1670 len = skb_headlen(skb); 1671 1672 /* first buffer length */ 1673 size = hdrlen; 1674 1675 offset = 0; 1676 while (len) { 1677 entry = macb_tx_ring_wrap(bp, tx_head); 1678 tx_skb = &queue->tx_skb[entry]; 1679 1680 mapping = dma_map_single(&bp->pdev->dev, 1681 skb->data + offset, 1682 size, DMA_TO_DEVICE); 1683 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1684 goto dma_error; 1685 1686 /* Save info to properly release resources */ 1687 tx_skb->skb = NULL; 1688 tx_skb->mapping = mapping; 1689 tx_skb->size = size; 1690 tx_skb->mapped_as_page = false; 1691 1692 len -= size; 1693 offset += size; 1694 count++; 1695 tx_head++; 1696 1697 size = min(len, bp->max_tx_length); 1698 } 1699 1700 /* Then, map paged data from fragments */ 1701 for (f = 0; f < nr_frags; f++) { 1702 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1703 1704 len = skb_frag_size(frag); 1705 offset = 0; 1706 while (len) { 1707 size = min(len, bp->max_tx_length); 1708 entry = macb_tx_ring_wrap(bp, tx_head); 1709 tx_skb = &queue->tx_skb[entry]; 1710 1711 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1712 offset, size, DMA_TO_DEVICE); 1713 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1714 goto dma_error; 1715 1716 /* Save info to properly release resources */ 1717 tx_skb->skb = NULL; 1718 tx_skb->mapping = mapping; 1719 tx_skb->size = size; 1720 tx_skb->mapped_as_page = true; 1721 1722 len -= size; 1723 offset += size; 1724 count++; 1725 tx_head++; 1726 } 1727 } 1728 1729 /* Should never happen */ 1730 if (unlikely(!tx_skb)) { 1731 netdev_err(bp->dev, "BUG! empty skb!\n"); 1732 return 0; 1733 } 1734 1735 /* This is the last buffer of the frame: save socket buffer */ 1736 tx_skb->skb = skb; 1737 1738 /* Update TX ring: update buffer descriptors in reverse order 1739 * to avoid race condition 1740 */ 1741 1742 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1743 * to set the end of TX queue 1744 */ 1745 i = tx_head; 1746 entry = macb_tx_ring_wrap(bp, i); 1747 ctrl = MACB_BIT(TX_USED); 1748 desc = macb_tx_desc(queue, entry); 1749 desc->ctrl = ctrl; 1750 1751 if (lso_ctrl) { 1752 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1753 /* include header and FCS in value given to h/w */ 1754 mss_mfs = skb_shinfo(skb)->gso_size + 1755 skb_transport_offset(skb) + 1756 ETH_FCS_LEN; 1757 else /* TSO */ { 1758 mss_mfs = skb_shinfo(skb)->gso_size; 1759 /* TCP Sequence Number Source Select 1760 * can be set only for TSO 1761 */ 1762 seq_ctrl = 0; 1763 } 1764 } 1765 1766 do { 1767 i--; 1768 entry = macb_tx_ring_wrap(bp, i); 1769 tx_skb = &queue->tx_skb[entry]; 1770 desc = macb_tx_desc(queue, entry); 1771 1772 ctrl = (u32)tx_skb->size; 1773 if (eof) { 1774 ctrl |= MACB_BIT(TX_LAST); 1775 eof = 0; 1776 } 1777 if (unlikely(entry == (bp->tx_ring_size - 1))) 1778 ctrl |= MACB_BIT(TX_WRAP); 1779 1780 /* First descriptor is header descriptor */ 1781 if (i == queue->tx_head) { 1782 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1783 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1784 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1785 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1786 ctrl |= MACB_BIT(TX_NOCRC); 1787 } else 1788 /* Only set MSS/MFS on payload descriptors 1789 * (second or later descriptor) 1790 */ 1791 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1792 1793 /* Set TX buffer descriptor */ 1794 macb_set_addr(bp, desc, tx_skb->mapping); 1795 /* desc->addr must be visible to hardware before clearing 1796 * 'TX_USED' bit in desc->ctrl. 1797 */ 1798 wmb(); 1799 desc->ctrl = ctrl; 1800 } while (i != queue->tx_head); 1801 1802 queue->tx_head = tx_head; 1803 1804 return count; 1805 1806 dma_error: 1807 netdev_err(bp->dev, "TX DMA map failed\n"); 1808 1809 for (i = queue->tx_head; i != tx_head; i++) { 1810 tx_skb = macb_tx_skb(queue, i); 1811 1812 macb_tx_unmap(bp, tx_skb); 1813 } 1814 1815 return 0; 1816 } 1817 1818 static netdev_features_t macb_features_check(struct sk_buff *skb, 1819 struct net_device *dev, 1820 netdev_features_t features) 1821 { 1822 unsigned int nr_frags, f; 1823 unsigned int hdrlen; 1824 1825 /* Validate LSO compatibility */ 1826 1827 /* there is only one buffer or protocol is not UDP */ 1828 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1829 return features; 1830 1831 /* length of header */ 1832 hdrlen = skb_transport_offset(skb); 1833 1834 /* For UFO only: 1835 * When software supplies two or more payload buffers all payload buffers 1836 * apart from the last must be a multiple of 8 bytes in size. 1837 */ 1838 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 1839 return features & ~MACB_NETIF_LSO; 1840 1841 nr_frags = skb_shinfo(skb)->nr_frags; 1842 /* No need to check last fragment */ 1843 nr_frags--; 1844 for (f = 0; f < nr_frags; f++) { 1845 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1846 1847 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 1848 return features & ~MACB_NETIF_LSO; 1849 } 1850 return features; 1851 } 1852 1853 static inline int macb_clear_csum(struct sk_buff *skb) 1854 { 1855 /* no change for packets without checksum offloading */ 1856 if (skb->ip_summed != CHECKSUM_PARTIAL) 1857 return 0; 1858 1859 /* make sure we can modify the header */ 1860 if (unlikely(skb_cow_head(skb, 0))) 1861 return -1; 1862 1863 /* initialize checksum field 1864 * This is required - at least for Zynq, which otherwise calculates 1865 * wrong UDP header checksums for UDP packets with UDP data len <=2 1866 */ 1867 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 1868 return 0; 1869 } 1870 1871 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 1872 { 1873 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 1874 int padlen = ETH_ZLEN - (*skb)->len; 1875 int headroom = skb_headroom(*skb); 1876 int tailroom = skb_tailroom(*skb); 1877 struct sk_buff *nskb; 1878 u32 fcs; 1879 1880 if (!(ndev->features & NETIF_F_HW_CSUM) || 1881 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 1882 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 1883 return 0; 1884 1885 if (padlen <= 0) { 1886 /* FCS could be appeded to tailroom. */ 1887 if (tailroom >= ETH_FCS_LEN) 1888 goto add_fcs; 1889 /* FCS could be appeded by moving data to headroom. */ 1890 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 1891 padlen = 0; 1892 /* No room for FCS, need to reallocate skb. */ 1893 else 1894 padlen = ETH_FCS_LEN; 1895 } else { 1896 /* Add room for FCS. */ 1897 padlen += ETH_FCS_LEN; 1898 } 1899 1900 if (!cloned && headroom + tailroom >= padlen) { 1901 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 1902 skb_set_tail_pointer(*skb, (*skb)->len); 1903 } else { 1904 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 1905 if (!nskb) 1906 return -ENOMEM; 1907 1908 dev_consume_skb_any(*skb); 1909 *skb = nskb; 1910 } 1911 1912 if (padlen > ETH_FCS_LEN) 1913 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 1914 1915 add_fcs: 1916 /* set FCS to packet */ 1917 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 1918 fcs = ~fcs; 1919 1920 skb_put_u8(*skb, fcs & 0xff); 1921 skb_put_u8(*skb, (fcs >> 8) & 0xff); 1922 skb_put_u8(*skb, (fcs >> 16) & 0xff); 1923 skb_put_u8(*skb, (fcs >> 24) & 0xff); 1924 1925 return 0; 1926 } 1927 1928 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1929 { 1930 u16 queue_index = skb_get_queue_mapping(skb); 1931 struct macb *bp = netdev_priv(dev); 1932 struct macb_queue *queue = &bp->queues[queue_index]; 1933 unsigned long flags; 1934 unsigned int desc_cnt, nr_frags, frag_size, f; 1935 unsigned int hdrlen; 1936 bool is_lso; 1937 netdev_tx_t ret = NETDEV_TX_OK; 1938 1939 if (macb_clear_csum(skb)) { 1940 dev_kfree_skb_any(skb); 1941 return ret; 1942 } 1943 1944 if (macb_pad_and_fcs(&skb, dev)) { 1945 dev_kfree_skb_any(skb); 1946 return ret; 1947 } 1948 1949 is_lso = (skb_shinfo(skb)->gso_size != 0); 1950 1951 if (is_lso) { 1952 /* length of headers */ 1953 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1954 /* only queue eth + ip headers separately for UDP */ 1955 hdrlen = skb_transport_offset(skb); 1956 else 1957 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 1958 if (skb_headlen(skb) < hdrlen) { 1959 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 1960 /* if this is required, would need to copy to single buffer */ 1961 return NETDEV_TX_BUSY; 1962 } 1963 } else 1964 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 1965 1966 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1967 netdev_vdbg(bp->dev, 1968 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1969 queue_index, skb->len, skb->head, skb->data, 1970 skb_tail_pointer(skb), skb_end_pointer(skb)); 1971 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1972 skb->data, 16, true); 1973 #endif 1974 1975 /* Count how many TX buffer descriptors are needed to send this 1976 * socket buffer: skb fragments of jumbo frames may need to be 1977 * split into many buffer descriptors. 1978 */ 1979 if (is_lso && (skb_headlen(skb) > hdrlen)) 1980 /* extra header descriptor if also payload in first buffer */ 1981 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 1982 else 1983 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1984 nr_frags = skb_shinfo(skb)->nr_frags; 1985 for (f = 0; f < nr_frags; f++) { 1986 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1987 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 1988 } 1989 1990 spin_lock_irqsave(&bp->lock, flags); 1991 1992 /* This is a hard error, log it. */ 1993 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 1994 bp->tx_ring_size) < desc_cnt) { 1995 netif_stop_subqueue(dev, queue_index); 1996 spin_unlock_irqrestore(&bp->lock, flags); 1997 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1998 queue->tx_head, queue->tx_tail); 1999 return NETDEV_TX_BUSY; 2000 } 2001 2002 /* Map socket buffer for DMA transfer */ 2003 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 2004 dev_kfree_skb_any(skb); 2005 goto unlock; 2006 } 2007 2008 /* Make newly initialized descriptor visible to hardware */ 2009 wmb(); 2010 skb_tx_timestamp(skb); 2011 2012 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 2013 2014 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 2015 netif_stop_subqueue(dev, queue_index); 2016 2017 unlock: 2018 spin_unlock_irqrestore(&bp->lock, flags); 2019 2020 return ret; 2021 } 2022 2023 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 2024 { 2025 if (!macb_is_gem(bp)) { 2026 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 2027 } else { 2028 bp->rx_buffer_size = size; 2029 2030 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 2031 netdev_dbg(bp->dev, 2032 "RX buffer must be multiple of %d bytes, expanding\n", 2033 RX_BUFFER_MULTIPLE); 2034 bp->rx_buffer_size = 2035 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 2036 } 2037 } 2038 2039 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 2040 bp->dev->mtu, bp->rx_buffer_size); 2041 } 2042 2043 static void gem_free_rx_buffers(struct macb *bp) 2044 { 2045 struct sk_buff *skb; 2046 struct macb_dma_desc *desc; 2047 struct macb_queue *queue; 2048 dma_addr_t addr; 2049 unsigned int q; 2050 int i; 2051 2052 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2053 if (!queue->rx_skbuff) 2054 continue; 2055 2056 for (i = 0; i < bp->rx_ring_size; i++) { 2057 skb = queue->rx_skbuff[i]; 2058 2059 if (!skb) 2060 continue; 2061 2062 desc = macb_rx_desc(queue, i); 2063 addr = macb_get_addr(bp, desc); 2064 2065 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 2066 DMA_FROM_DEVICE); 2067 dev_kfree_skb_any(skb); 2068 skb = NULL; 2069 } 2070 2071 kfree(queue->rx_skbuff); 2072 queue->rx_skbuff = NULL; 2073 } 2074 } 2075 2076 static void macb_free_rx_buffers(struct macb *bp) 2077 { 2078 struct macb_queue *queue = &bp->queues[0]; 2079 2080 if (queue->rx_buffers) { 2081 dma_free_coherent(&bp->pdev->dev, 2082 bp->rx_ring_size * bp->rx_buffer_size, 2083 queue->rx_buffers, queue->rx_buffers_dma); 2084 queue->rx_buffers = NULL; 2085 } 2086 } 2087 2088 static void macb_free_consistent(struct macb *bp) 2089 { 2090 struct macb_queue *queue; 2091 unsigned int q; 2092 int size; 2093 2094 bp->macbgem_ops.mog_free_rx_buffers(bp); 2095 2096 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2097 kfree(queue->tx_skb); 2098 queue->tx_skb = NULL; 2099 if (queue->tx_ring) { 2100 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2101 dma_free_coherent(&bp->pdev->dev, size, 2102 queue->tx_ring, queue->tx_ring_dma); 2103 queue->tx_ring = NULL; 2104 } 2105 if (queue->rx_ring) { 2106 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2107 dma_free_coherent(&bp->pdev->dev, size, 2108 queue->rx_ring, queue->rx_ring_dma); 2109 queue->rx_ring = NULL; 2110 } 2111 } 2112 } 2113 2114 static int gem_alloc_rx_buffers(struct macb *bp) 2115 { 2116 struct macb_queue *queue; 2117 unsigned int q; 2118 int size; 2119 2120 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2121 size = bp->rx_ring_size * sizeof(struct sk_buff *); 2122 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 2123 if (!queue->rx_skbuff) 2124 return -ENOMEM; 2125 else 2126 netdev_dbg(bp->dev, 2127 "Allocated %d RX struct sk_buff entries at %p\n", 2128 bp->rx_ring_size, queue->rx_skbuff); 2129 } 2130 return 0; 2131 } 2132 2133 static int macb_alloc_rx_buffers(struct macb *bp) 2134 { 2135 struct macb_queue *queue = &bp->queues[0]; 2136 int size; 2137 2138 size = bp->rx_ring_size * bp->rx_buffer_size; 2139 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 2140 &queue->rx_buffers_dma, GFP_KERNEL); 2141 if (!queue->rx_buffers) 2142 return -ENOMEM; 2143 2144 netdev_dbg(bp->dev, 2145 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 2146 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 2147 return 0; 2148 } 2149 2150 static int macb_alloc_consistent(struct macb *bp) 2151 { 2152 struct macb_queue *queue; 2153 unsigned int q; 2154 int size; 2155 2156 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2157 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2158 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2159 &queue->tx_ring_dma, 2160 GFP_KERNEL); 2161 if (!queue->tx_ring) 2162 goto out_err; 2163 netdev_dbg(bp->dev, 2164 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 2165 q, size, (unsigned long)queue->tx_ring_dma, 2166 queue->tx_ring); 2167 2168 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 2169 queue->tx_skb = kmalloc(size, GFP_KERNEL); 2170 if (!queue->tx_skb) 2171 goto out_err; 2172 2173 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2174 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2175 &queue->rx_ring_dma, GFP_KERNEL); 2176 if (!queue->rx_ring) 2177 goto out_err; 2178 netdev_dbg(bp->dev, 2179 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 2180 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 2181 } 2182 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 2183 goto out_err; 2184 2185 return 0; 2186 2187 out_err: 2188 macb_free_consistent(bp); 2189 return -ENOMEM; 2190 } 2191 2192 static void gem_init_rings(struct macb *bp) 2193 { 2194 struct macb_queue *queue; 2195 struct macb_dma_desc *desc = NULL; 2196 unsigned int q; 2197 int i; 2198 2199 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2200 for (i = 0; i < bp->tx_ring_size; i++) { 2201 desc = macb_tx_desc(queue, i); 2202 macb_set_addr(bp, desc, 0); 2203 desc->ctrl = MACB_BIT(TX_USED); 2204 } 2205 desc->ctrl |= MACB_BIT(TX_WRAP); 2206 queue->tx_head = 0; 2207 queue->tx_tail = 0; 2208 2209 queue->rx_tail = 0; 2210 queue->rx_prepared_head = 0; 2211 2212 gem_rx_refill(queue); 2213 } 2214 2215 } 2216 2217 static void macb_init_rings(struct macb *bp) 2218 { 2219 int i; 2220 struct macb_dma_desc *desc = NULL; 2221 2222 macb_init_rx_ring(&bp->queues[0]); 2223 2224 for (i = 0; i < bp->tx_ring_size; i++) { 2225 desc = macb_tx_desc(&bp->queues[0], i); 2226 macb_set_addr(bp, desc, 0); 2227 desc->ctrl = MACB_BIT(TX_USED); 2228 } 2229 bp->queues[0].tx_head = 0; 2230 bp->queues[0].tx_tail = 0; 2231 desc->ctrl |= MACB_BIT(TX_WRAP); 2232 } 2233 2234 static void macb_reset_hw(struct macb *bp) 2235 { 2236 struct macb_queue *queue; 2237 unsigned int q; 2238 u32 ctrl = macb_readl(bp, NCR); 2239 2240 /* Disable RX and TX (XXX: Should we halt the transmission 2241 * more gracefully?) 2242 */ 2243 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2244 2245 /* Clear the stats registers (XXX: Update stats first?) */ 2246 ctrl |= MACB_BIT(CLRSTAT); 2247 2248 macb_writel(bp, NCR, ctrl); 2249 2250 /* Clear all status flags */ 2251 macb_writel(bp, TSR, -1); 2252 macb_writel(bp, RSR, -1); 2253 2254 /* Disable all interrupts */ 2255 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2256 queue_writel(queue, IDR, -1); 2257 queue_readl(queue, ISR); 2258 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2259 queue_writel(queue, ISR, -1); 2260 } 2261 } 2262 2263 static u32 gem_mdc_clk_div(struct macb *bp) 2264 { 2265 u32 config; 2266 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2267 2268 if (pclk_hz <= 20000000) 2269 config = GEM_BF(CLK, GEM_CLK_DIV8); 2270 else if (pclk_hz <= 40000000) 2271 config = GEM_BF(CLK, GEM_CLK_DIV16); 2272 else if (pclk_hz <= 80000000) 2273 config = GEM_BF(CLK, GEM_CLK_DIV32); 2274 else if (pclk_hz <= 120000000) 2275 config = GEM_BF(CLK, GEM_CLK_DIV48); 2276 else if (pclk_hz <= 160000000) 2277 config = GEM_BF(CLK, GEM_CLK_DIV64); 2278 else 2279 config = GEM_BF(CLK, GEM_CLK_DIV96); 2280 2281 return config; 2282 } 2283 2284 static u32 macb_mdc_clk_div(struct macb *bp) 2285 { 2286 u32 config; 2287 unsigned long pclk_hz; 2288 2289 if (macb_is_gem(bp)) 2290 return gem_mdc_clk_div(bp); 2291 2292 pclk_hz = clk_get_rate(bp->pclk); 2293 if (pclk_hz <= 20000000) 2294 config = MACB_BF(CLK, MACB_CLK_DIV8); 2295 else if (pclk_hz <= 40000000) 2296 config = MACB_BF(CLK, MACB_CLK_DIV16); 2297 else if (pclk_hz <= 80000000) 2298 config = MACB_BF(CLK, MACB_CLK_DIV32); 2299 else 2300 config = MACB_BF(CLK, MACB_CLK_DIV64); 2301 2302 return config; 2303 } 2304 2305 /* Get the DMA bus width field of the network configuration register that we 2306 * should program. We find the width from decoding the design configuration 2307 * register to find the maximum supported data bus width. 2308 */ 2309 static u32 macb_dbw(struct macb *bp) 2310 { 2311 if (!macb_is_gem(bp)) 2312 return 0; 2313 2314 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2315 case 4: 2316 return GEM_BF(DBW, GEM_DBW128); 2317 case 2: 2318 return GEM_BF(DBW, GEM_DBW64); 2319 case 1: 2320 default: 2321 return GEM_BF(DBW, GEM_DBW32); 2322 } 2323 } 2324 2325 /* Configure the receive DMA engine 2326 * - use the correct receive buffer size 2327 * - set best burst length for DMA operations 2328 * (if not supported by FIFO, it will fallback to default) 2329 * - set both rx/tx packet buffers to full memory size 2330 * These are configurable parameters for GEM. 2331 */ 2332 static void macb_configure_dma(struct macb *bp) 2333 { 2334 struct macb_queue *queue; 2335 u32 buffer_size; 2336 unsigned int q; 2337 u32 dmacfg; 2338 2339 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2340 if (macb_is_gem(bp)) { 2341 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2342 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2343 if (q) 2344 queue_writel(queue, RBQS, buffer_size); 2345 else 2346 dmacfg |= GEM_BF(RXBS, buffer_size); 2347 } 2348 if (bp->dma_burst_length) 2349 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2350 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2351 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2352 2353 if (bp->native_io) 2354 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2355 else 2356 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2357 2358 if (bp->dev->features & NETIF_F_HW_CSUM) 2359 dmacfg |= GEM_BIT(TXCOEN); 2360 else 2361 dmacfg &= ~GEM_BIT(TXCOEN); 2362 2363 dmacfg &= ~GEM_BIT(ADDR64); 2364 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2365 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2366 dmacfg |= GEM_BIT(ADDR64); 2367 #endif 2368 #ifdef CONFIG_MACB_USE_HWSTAMP 2369 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2370 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2371 #endif 2372 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2373 dmacfg); 2374 gem_writel(bp, DMACFG, dmacfg); 2375 } 2376 } 2377 2378 static void macb_init_hw(struct macb *bp) 2379 { 2380 u32 config; 2381 2382 macb_reset_hw(bp); 2383 macb_set_hwaddr(bp); 2384 2385 config = macb_mdc_clk_div(bp); 2386 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2387 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2388 if (bp->caps & MACB_CAPS_JUMBO) 2389 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2390 else 2391 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2392 if (bp->dev->flags & IFF_PROMISC) 2393 config |= MACB_BIT(CAF); /* Copy All Frames */ 2394 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2395 config |= GEM_BIT(RXCOEN); 2396 if (!(bp->dev->flags & IFF_BROADCAST)) 2397 config |= MACB_BIT(NBC); /* No BroadCast */ 2398 config |= macb_dbw(bp); 2399 macb_writel(bp, NCFGR, config); 2400 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2401 gem_writel(bp, JML, bp->jumbo_max_len); 2402 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2403 if (bp->caps & MACB_CAPS_JUMBO) 2404 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2405 2406 macb_configure_dma(bp); 2407 } 2408 2409 /* The hash address register is 64 bits long and takes up two 2410 * locations in the memory map. The least significant bits are stored 2411 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2412 * 2413 * The unicast hash enable and the multicast hash enable bits in the 2414 * network configuration register enable the reception of hash matched 2415 * frames. The destination address is reduced to a 6 bit index into 2416 * the 64 bit hash register using the following hash function. The 2417 * hash function is an exclusive or of every sixth bit of the 2418 * destination address. 2419 * 2420 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2421 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2422 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2423 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2424 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2425 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2426 * 2427 * da[0] represents the least significant bit of the first byte 2428 * received, that is, the multicast/unicast indicator, and da[47] 2429 * represents the most significant bit of the last byte received. If 2430 * the hash index, hi[n], points to a bit that is set in the hash 2431 * register then the frame will be matched according to whether the 2432 * frame is multicast or unicast. A multicast match will be signalled 2433 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2434 * index points to a bit set in the hash register. A unicast match 2435 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2436 * and the hash index points to a bit set in the hash register. To 2437 * receive all multicast frames, the hash register should be set with 2438 * all ones and the multicast hash enable bit should be set in the 2439 * network configuration register. 2440 */ 2441 2442 static inline int hash_bit_value(int bitnr, __u8 *addr) 2443 { 2444 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2445 return 1; 2446 return 0; 2447 } 2448 2449 /* Return the hash index value for the specified address. */ 2450 static int hash_get_index(__u8 *addr) 2451 { 2452 int i, j, bitval; 2453 int hash_index = 0; 2454 2455 for (j = 0; j < 6; j++) { 2456 for (i = 0, bitval = 0; i < 8; i++) 2457 bitval ^= hash_bit_value(i * 6 + j, addr); 2458 2459 hash_index |= (bitval << j); 2460 } 2461 2462 return hash_index; 2463 } 2464 2465 /* Add multicast addresses to the internal multicast-hash table. */ 2466 static void macb_sethashtable(struct net_device *dev) 2467 { 2468 struct netdev_hw_addr *ha; 2469 unsigned long mc_filter[2]; 2470 unsigned int bitnr; 2471 struct macb *bp = netdev_priv(dev); 2472 2473 mc_filter[0] = 0; 2474 mc_filter[1] = 0; 2475 2476 netdev_for_each_mc_addr(ha, dev) { 2477 bitnr = hash_get_index(ha->addr); 2478 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2479 } 2480 2481 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2482 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2483 } 2484 2485 /* Enable/Disable promiscuous and multicast modes. */ 2486 static void macb_set_rx_mode(struct net_device *dev) 2487 { 2488 unsigned long cfg; 2489 struct macb *bp = netdev_priv(dev); 2490 2491 cfg = macb_readl(bp, NCFGR); 2492 2493 if (dev->flags & IFF_PROMISC) { 2494 /* Enable promiscuous mode */ 2495 cfg |= MACB_BIT(CAF); 2496 2497 /* Disable RX checksum offload */ 2498 if (macb_is_gem(bp)) 2499 cfg &= ~GEM_BIT(RXCOEN); 2500 } else { 2501 /* Disable promiscuous mode */ 2502 cfg &= ~MACB_BIT(CAF); 2503 2504 /* Enable RX checksum offload only if requested */ 2505 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2506 cfg |= GEM_BIT(RXCOEN); 2507 } 2508 2509 if (dev->flags & IFF_ALLMULTI) { 2510 /* Enable all multicast mode */ 2511 macb_or_gem_writel(bp, HRB, -1); 2512 macb_or_gem_writel(bp, HRT, -1); 2513 cfg |= MACB_BIT(NCFGR_MTI); 2514 } else if (!netdev_mc_empty(dev)) { 2515 /* Enable specific multicasts */ 2516 macb_sethashtable(dev); 2517 cfg |= MACB_BIT(NCFGR_MTI); 2518 } else if (dev->flags & (~IFF_ALLMULTI)) { 2519 /* Disable all multicast mode */ 2520 macb_or_gem_writel(bp, HRB, 0); 2521 macb_or_gem_writel(bp, HRT, 0); 2522 cfg &= ~MACB_BIT(NCFGR_MTI); 2523 } 2524 2525 macb_writel(bp, NCFGR, cfg); 2526 } 2527 2528 static int macb_open(struct net_device *dev) 2529 { 2530 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2531 struct macb *bp = netdev_priv(dev); 2532 struct macb_queue *queue; 2533 unsigned int q; 2534 int err; 2535 2536 netdev_dbg(bp->dev, "open\n"); 2537 2538 err = pm_runtime_get_sync(&bp->pdev->dev); 2539 if (err < 0) 2540 goto pm_exit; 2541 2542 /* RX buffers initialization */ 2543 macb_init_rx_buffer_size(bp, bufsz); 2544 2545 err = macb_alloc_consistent(bp); 2546 if (err) { 2547 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2548 err); 2549 goto pm_exit; 2550 } 2551 2552 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2553 napi_enable(&queue->napi); 2554 2555 macb_init_hw(bp); 2556 2557 err = macb_phylink_connect(bp); 2558 if (err) 2559 goto reset_hw; 2560 2561 netif_tx_start_all_queues(dev); 2562 2563 if (bp->ptp_info) 2564 bp->ptp_info->ptp_init(dev); 2565 2566 return 0; 2567 2568 reset_hw: 2569 macb_reset_hw(bp); 2570 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2571 napi_disable(&queue->napi); 2572 macb_free_consistent(bp); 2573 pm_exit: 2574 pm_runtime_put_sync(&bp->pdev->dev); 2575 return err; 2576 } 2577 2578 static int macb_close(struct net_device *dev) 2579 { 2580 struct macb *bp = netdev_priv(dev); 2581 struct macb_queue *queue; 2582 unsigned long flags; 2583 unsigned int q; 2584 2585 netif_tx_stop_all_queues(dev); 2586 2587 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2588 napi_disable(&queue->napi); 2589 2590 phylink_stop(bp->phylink); 2591 phylink_disconnect_phy(bp->phylink); 2592 2593 spin_lock_irqsave(&bp->lock, flags); 2594 macb_reset_hw(bp); 2595 netif_carrier_off(dev); 2596 spin_unlock_irqrestore(&bp->lock, flags); 2597 2598 macb_free_consistent(bp); 2599 2600 if (bp->ptp_info) 2601 bp->ptp_info->ptp_remove(dev); 2602 2603 pm_runtime_put(&bp->pdev->dev); 2604 2605 return 0; 2606 } 2607 2608 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2609 { 2610 if (netif_running(dev)) 2611 return -EBUSY; 2612 2613 dev->mtu = new_mtu; 2614 2615 return 0; 2616 } 2617 2618 static void gem_update_stats(struct macb *bp) 2619 { 2620 struct macb_queue *queue; 2621 unsigned int i, q, idx; 2622 unsigned long *stat; 2623 2624 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2625 2626 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2627 u32 offset = gem_statistics[i].offset; 2628 u64 val = bp->macb_reg_readl(bp, offset); 2629 2630 bp->ethtool_stats[i] += val; 2631 *p += val; 2632 2633 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2634 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2635 val = bp->macb_reg_readl(bp, offset + 4); 2636 bp->ethtool_stats[i] += ((u64)val) << 32; 2637 *(++p) += val; 2638 } 2639 } 2640 2641 idx = GEM_STATS_LEN; 2642 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2643 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2644 bp->ethtool_stats[idx++] = *stat; 2645 } 2646 2647 static struct net_device_stats *gem_get_stats(struct macb *bp) 2648 { 2649 struct gem_stats *hwstat = &bp->hw_stats.gem; 2650 struct net_device_stats *nstat = &bp->dev->stats; 2651 2652 gem_update_stats(bp); 2653 2654 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2655 hwstat->rx_alignment_errors + 2656 hwstat->rx_resource_errors + 2657 hwstat->rx_overruns + 2658 hwstat->rx_oversize_frames + 2659 hwstat->rx_jabbers + 2660 hwstat->rx_undersized_frames + 2661 hwstat->rx_length_field_frame_errors); 2662 nstat->tx_errors = (hwstat->tx_late_collisions + 2663 hwstat->tx_excessive_collisions + 2664 hwstat->tx_underrun + 2665 hwstat->tx_carrier_sense_errors); 2666 nstat->multicast = hwstat->rx_multicast_frames; 2667 nstat->collisions = (hwstat->tx_single_collision_frames + 2668 hwstat->tx_multiple_collision_frames + 2669 hwstat->tx_excessive_collisions); 2670 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2671 hwstat->rx_jabbers + 2672 hwstat->rx_undersized_frames + 2673 hwstat->rx_length_field_frame_errors); 2674 nstat->rx_over_errors = hwstat->rx_resource_errors; 2675 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2676 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2677 nstat->rx_fifo_errors = hwstat->rx_overruns; 2678 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2679 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2680 nstat->tx_fifo_errors = hwstat->tx_underrun; 2681 2682 return nstat; 2683 } 2684 2685 static void gem_get_ethtool_stats(struct net_device *dev, 2686 struct ethtool_stats *stats, u64 *data) 2687 { 2688 struct macb *bp; 2689 2690 bp = netdev_priv(dev); 2691 gem_update_stats(bp); 2692 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2693 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2694 } 2695 2696 static int gem_get_sset_count(struct net_device *dev, int sset) 2697 { 2698 struct macb *bp = netdev_priv(dev); 2699 2700 switch (sset) { 2701 case ETH_SS_STATS: 2702 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2703 default: 2704 return -EOPNOTSUPP; 2705 } 2706 } 2707 2708 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2709 { 2710 char stat_string[ETH_GSTRING_LEN]; 2711 struct macb *bp = netdev_priv(dev); 2712 struct macb_queue *queue; 2713 unsigned int i; 2714 unsigned int q; 2715 2716 switch (sset) { 2717 case ETH_SS_STATS: 2718 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2719 memcpy(p, gem_statistics[i].stat_string, 2720 ETH_GSTRING_LEN); 2721 2722 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2723 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2724 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2725 q, queue_statistics[i].stat_string); 2726 memcpy(p, stat_string, ETH_GSTRING_LEN); 2727 } 2728 } 2729 break; 2730 } 2731 } 2732 2733 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2734 { 2735 struct macb *bp = netdev_priv(dev); 2736 struct net_device_stats *nstat = &bp->dev->stats; 2737 struct macb_stats *hwstat = &bp->hw_stats.macb; 2738 2739 if (macb_is_gem(bp)) 2740 return gem_get_stats(bp); 2741 2742 /* read stats from hardware */ 2743 macb_update_stats(bp); 2744 2745 /* Convert HW stats into netdevice stats */ 2746 nstat->rx_errors = (hwstat->rx_fcs_errors + 2747 hwstat->rx_align_errors + 2748 hwstat->rx_resource_errors + 2749 hwstat->rx_overruns + 2750 hwstat->rx_oversize_pkts + 2751 hwstat->rx_jabbers + 2752 hwstat->rx_undersize_pkts + 2753 hwstat->rx_length_mismatch); 2754 nstat->tx_errors = (hwstat->tx_late_cols + 2755 hwstat->tx_excessive_cols + 2756 hwstat->tx_underruns + 2757 hwstat->tx_carrier_errors + 2758 hwstat->sqe_test_errors); 2759 nstat->collisions = (hwstat->tx_single_cols + 2760 hwstat->tx_multiple_cols + 2761 hwstat->tx_excessive_cols); 2762 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2763 hwstat->rx_jabbers + 2764 hwstat->rx_undersize_pkts + 2765 hwstat->rx_length_mismatch); 2766 nstat->rx_over_errors = hwstat->rx_resource_errors + 2767 hwstat->rx_overruns; 2768 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2769 nstat->rx_frame_errors = hwstat->rx_align_errors; 2770 nstat->rx_fifo_errors = hwstat->rx_overruns; 2771 /* XXX: What does "missed" mean? */ 2772 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2773 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2774 nstat->tx_fifo_errors = hwstat->tx_underruns; 2775 /* Don't know about heartbeat or window errors... */ 2776 2777 return nstat; 2778 } 2779 2780 static int macb_get_regs_len(struct net_device *netdev) 2781 { 2782 return MACB_GREGS_NBR * sizeof(u32); 2783 } 2784 2785 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2786 void *p) 2787 { 2788 struct macb *bp = netdev_priv(dev); 2789 unsigned int tail, head; 2790 u32 *regs_buff = p; 2791 2792 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2793 | MACB_GREGS_VERSION; 2794 2795 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2796 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2797 2798 regs_buff[0] = macb_readl(bp, NCR); 2799 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2800 regs_buff[2] = macb_readl(bp, NSR); 2801 regs_buff[3] = macb_readl(bp, TSR); 2802 regs_buff[4] = macb_readl(bp, RBQP); 2803 regs_buff[5] = macb_readl(bp, TBQP); 2804 regs_buff[6] = macb_readl(bp, RSR); 2805 regs_buff[7] = macb_readl(bp, IMR); 2806 2807 regs_buff[8] = tail; 2808 regs_buff[9] = head; 2809 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2810 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2811 2812 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2813 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2814 if (macb_is_gem(bp)) 2815 regs_buff[13] = gem_readl(bp, DMACFG); 2816 } 2817 2818 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2819 { 2820 struct macb *bp = netdev_priv(netdev); 2821 2822 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2823 phylink_ethtool_get_wol(bp->phylink, wol); 2824 wol->supported |= WAKE_MAGIC; 2825 2826 if (bp->wol & MACB_WOL_ENABLED) 2827 wol->wolopts |= WAKE_MAGIC; 2828 } 2829 } 2830 2831 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2832 { 2833 struct macb *bp = netdev_priv(netdev); 2834 int ret; 2835 2836 /* Pass the order to phylink layer */ 2837 ret = phylink_ethtool_set_wol(bp->phylink, wol); 2838 /* Don't manage WoL on MAC if handled by the PHY 2839 * or if there's a failure in talking to the PHY 2840 */ 2841 if (!ret || ret != -EOPNOTSUPP) 2842 return ret; 2843 2844 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 2845 (wol->wolopts & ~WAKE_MAGIC)) 2846 return -EOPNOTSUPP; 2847 2848 if (wol->wolopts & WAKE_MAGIC) 2849 bp->wol |= MACB_WOL_ENABLED; 2850 else 2851 bp->wol &= ~MACB_WOL_ENABLED; 2852 2853 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 2854 2855 return 0; 2856 } 2857 2858 static int macb_get_link_ksettings(struct net_device *netdev, 2859 struct ethtool_link_ksettings *kset) 2860 { 2861 struct macb *bp = netdev_priv(netdev); 2862 2863 return phylink_ethtool_ksettings_get(bp->phylink, kset); 2864 } 2865 2866 static int macb_set_link_ksettings(struct net_device *netdev, 2867 const struct ethtool_link_ksettings *kset) 2868 { 2869 struct macb *bp = netdev_priv(netdev); 2870 2871 return phylink_ethtool_ksettings_set(bp->phylink, kset); 2872 } 2873 2874 static void macb_get_ringparam(struct net_device *netdev, 2875 struct ethtool_ringparam *ring) 2876 { 2877 struct macb *bp = netdev_priv(netdev); 2878 2879 ring->rx_max_pending = MAX_RX_RING_SIZE; 2880 ring->tx_max_pending = MAX_TX_RING_SIZE; 2881 2882 ring->rx_pending = bp->rx_ring_size; 2883 ring->tx_pending = bp->tx_ring_size; 2884 } 2885 2886 static int macb_set_ringparam(struct net_device *netdev, 2887 struct ethtool_ringparam *ring) 2888 { 2889 struct macb *bp = netdev_priv(netdev); 2890 u32 new_rx_size, new_tx_size; 2891 unsigned int reset = 0; 2892 2893 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2894 return -EINVAL; 2895 2896 new_rx_size = clamp_t(u32, ring->rx_pending, 2897 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 2898 new_rx_size = roundup_pow_of_two(new_rx_size); 2899 2900 new_tx_size = clamp_t(u32, ring->tx_pending, 2901 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 2902 new_tx_size = roundup_pow_of_two(new_tx_size); 2903 2904 if ((new_tx_size == bp->tx_ring_size) && 2905 (new_rx_size == bp->rx_ring_size)) { 2906 /* nothing to do */ 2907 return 0; 2908 } 2909 2910 if (netif_running(bp->dev)) { 2911 reset = 1; 2912 macb_close(bp->dev); 2913 } 2914 2915 bp->rx_ring_size = new_rx_size; 2916 bp->tx_ring_size = new_tx_size; 2917 2918 if (reset) 2919 macb_open(bp->dev); 2920 2921 return 0; 2922 } 2923 2924 #ifdef CONFIG_MACB_USE_HWSTAMP 2925 static unsigned int gem_get_tsu_rate(struct macb *bp) 2926 { 2927 struct clk *tsu_clk; 2928 unsigned int tsu_rate; 2929 2930 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 2931 if (!IS_ERR(tsu_clk)) 2932 tsu_rate = clk_get_rate(tsu_clk); 2933 /* try pclk instead */ 2934 else if (!IS_ERR(bp->pclk)) { 2935 tsu_clk = bp->pclk; 2936 tsu_rate = clk_get_rate(tsu_clk); 2937 } else 2938 return -ENOTSUPP; 2939 return tsu_rate; 2940 } 2941 2942 static s32 gem_get_ptp_max_adj(void) 2943 { 2944 return 64000000; 2945 } 2946 2947 static int gem_get_ts_info(struct net_device *dev, 2948 struct ethtool_ts_info *info) 2949 { 2950 struct macb *bp = netdev_priv(dev); 2951 2952 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 2953 ethtool_op_get_ts_info(dev, info); 2954 return 0; 2955 } 2956 2957 info->so_timestamping = 2958 SOF_TIMESTAMPING_TX_SOFTWARE | 2959 SOF_TIMESTAMPING_RX_SOFTWARE | 2960 SOF_TIMESTAMPING_SOFTWARE | 2961 SOF_TIMESTAMPING_TX_HARDWARE | 2962 SOF_TIMESTAMPING_RX_HARDWARE | 2963 SOF_TIMESTAMPING_RAW_HARDWARE; 2964 info->tx_types = 2965 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 2966 (1 << HWTSTAMP_TX_OFF) | 2967 (1 << HWTSTAMP_TX_ON); 2968 info->rx_filters = 2969 (1 << HWTSTAMP_FILTER_NONE) | 2970 (1 << HWTSTAMP_FILTER_ALL); 2971 2972 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 2973 2974 return 0; 2975 } 2976 2977 static struct macb_ptp_info gem_ptp_info = { 2978 .ptp_init = gem_ptp_init, 2979 .ptp_remove = gem_ptp_remove, 2980 .get_ptp_max_adj = gem_get_ptp_max_adj, 2981 .get_tsu_rate = gem_get_tsu_rate, 2982 .get_ts_info = gem_get_ts_info, 2983 .get_hwtst = gem_get_hwtst, 2984 .set_hwtst = gem_set_hwtst, 2985 }; 2986 #endif 2987 2988 static int macb_get_ts_info(struct net_device *netdev, 2989 struct ethtool_ts_info *info) 2990 { 2991 struct macb *bp = netdev_priv(netdev); 2992 2993 if (bp->ptp_info) 2994 return bp->ptp_info->get_ts_info(netdev, info); 2995 2996 return ethtool_op_get_ts_info(netdev, info); 2997 } 2998 2999 static void gem_enable_flow_filters(struct macb *bp, bool enable) 3000 { 3001 struct net_device *netdev = bp->dev; 3002 struct ethtool_rx_fs_item *item; 3003 u32 t2_scr; 3004 int num_t2_scr; 3005 3006 if (!(netdev->features & NETIF_F_NTUPLE)) 3007 return; 3008 3009 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 3010 3011 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3012 struct ethtool_rx_flow_spec *fs = &item->fs; 3013 struct ethtool_tcpip4_spec *tp4sp_m; 3014 3015 if (fs->location >= num_t2_scr) 3016 continue; 3017 3018 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 3019 3020 /* enable/disable screener regs for the flow entry */ 3021 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 3022 3023 /* only enable fields with no masking */ 3024 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3025 3026 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 3027 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 3028 else 3029 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 3030 3031 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 3032 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 3033 else 3034 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 3035 3036 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 3037 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 3038 else 3039 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 3040 3041 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 3042 } 3043 } 3044 3045 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 3046 { 3047 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 3048 uint16_t index = fs->location; 3049 u32 w0, w1, t2_scr; 3050 bool cmp_a = false; 3051 bool cmp_b = false; 3052 bool cmp_c = false; 3053 3054 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 3055 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3056 3057 /* ignore field if any masking set */ 3058 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 3059 /* 1st compare reg - IP source address */ 3060 w0 = 0; 3061 w1 = 0; 3062 w0 = tp4sp_v->ip4src; 3063 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3064 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3065 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 3066 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 3067 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 3068 cmp_a = true; 3069 } 3070 3071 /* ignore field if any masking set */ 3072 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 3073 /* 2nd compare reg - IP destination address */ 3074 w0 = 0; 3075 w1 = 0; 3076 w0 = tp4sp_v->ip4dst; 3077 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3078 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3079 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 3080 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 3081 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 3082 cmp_b = true; 3083 } 3084 3085 /* ignore both port fields if masking set in both */ 3086 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 3087 /* 3rd compare reg - source port, destination port */ 3088 w0 = 0; 3089 w1 = 0; 3090 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 3091 if (tp4sp_m->psrc == tp4sp_m->pdst) { 3092 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 3093 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3094 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3095 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3096 } else { 3097 /* only one port definition */ 3098 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 3099 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 3100 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 3101 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 3102 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3103 } else { /* dst port */ 3104 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3105 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 3106 } 3107 } 3108 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 3109 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 3110 cmp_c = true; 3111 } 3112 3113 t2_scr = 0; 3114 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 3115 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 3116 if (cmp_a) 3117 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 3118 if (cmp_b) 3119 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 3120 if (cmp_c) 3121 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 3122 gem_writel_n(bp, SCRT2, index, t2_scr); 3123 } 3124 3125 static int gem_add_flow_filter(struct net_device *netdev, 3126 struct ethtool_rxnfc *cmd) 3127 { 3128 struct macb *bp = netdev_priv(netdev); 3129 struct ethtool_rx_flow_spec *fs = &cmd->fs; 3130 struct ethtool_rx_fs_item *item, *newfs; 3131 unsigned long flags; 3132 int ret = -EINVAL; 3133 bool added = false; 3134 3135 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 3136 if (newfs == NULL) 3137 return -ENOMEM; 3138 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 3139 3140 netdev_dbg(netdev, 3141 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3142 fs->flow_type, (int)fs->ring_cookie, fs->location, 3143 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3144 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3145 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 3146 3147 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3148 3149 /* find correct place to add in list */ 3150 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3151 if (item->fs.location > newfs->fs.location) { 3152 list_add_tail(&newfs->list, &item->list); 3153 added = true; 3154 break; 3155 } else if (item->fs.location == fs->location) { 3156 netdev_err(netdev, "Rule not added: location %d not free!\n", 3157 fs->location); 3158 ret = -EBUSY; 3159 goto err; 3160 } 3161 } 3162 if (!added) 3163 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 3164 3165 gem_prog_cmp_regs(bp, fs); 3166 bp->rx_fs_list.count++; 3167 /* enable filtering if NTUPLE on */ 3168 gem_enable_flow_filters(bp, 1); 3169 3170 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3171 return 0; 3172 3173 err: 3174 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3175 kfree(newfs); 3176 return ret; 3177 } 3178 3179 static int gem_del_flow_filter(struct net_device *netdev, 3180 struct ethtool_rxnfc *cmd) 3181 { 3182 struct macb *bp = netdev_priv(netdev); 3183 struct ethtool_rx_fs_item *item; 3184 struct ethtool_rx_flow_spec *fs; 3185 unsigned long flags; 3186 3187 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3188 3189 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3190 if (item->fs.location == cmd->fs.location) { 3191 /* disable screener regs for the flow entry */ 3192 fs = &(item->fs); 3193 netdev_dbg(netdev, 3194 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3195 fs->flow_type, (int)fs->ring_cookie, fs->location, 3196 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3197 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3198 htons(fs->h_u.tcp_ip4_spec.psrc), 3199 htons(fs->h_u.tcp_ip4_spec.pdst)); 3200 3201 gem_writel_n(bp, SCRT2, fs->location, 0); 3202 3203 list_del(&item->list); 3204 bp->rx_fs_list.count--; 3205 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3206 kfree(item); 3207 return 0; 3208 } 3209 } 3210 3211 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3212 return -EINVAL; 3213 } 3214 3215 static int gem_get_flow_entry(struct net_device *netdev, 3216 struct ethtool_rxnfc *cmd) 3217 { 3218 struct macb *bp = netdev_priv(netdev); 3219 struct ethtool_rx_fs_item *item; 3220 3221 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3222 if (item->fs.location == cmd->fs.location) { 3223 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3224 return 0; 3225 } 3226 } 3227 return -EINVAL; 3228 } 3229 3230 static int gem_get_all_flow_entries(struct net_device *netdev, 3231 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3232 { 3233 struct macb *bp = netdev_priv(netdev); 3234 struct ethtool_rx_fs_item *item; 3235 uint32_t cnt = 0; 3236 3237 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3238 if (cnt == cmd->rule_cnt) 3239 return -EMSGSIZE; 3240 rule_locs[cnt] = item->fs.location; 3241 cnt++; 3242 } 3243 cmd->data = bp->max_tuples; 3244 cmd->rule_cnt = cnt; 3245 3246 return 0; 3247 } 3248 3249 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3250 u32 *rule_locs) 3251 { 3252 struct macb *bp = netdev_priv(netdev); 3253 int ret = 0; 3254 3255 switch (cmd->cmd) { 3256 case ETHTOOL_GRXRINGS: 3257 cmd->data = bp->num_queues; 3258 break; 3259 case ETHTOOL_GRXCLSRLCNT: 3260 cmd->rule_cnt = bp->rx_fs_list.count; 3261 break; 3262 case ETHTOOL_GRXCLSRULE: 3263 ret = gem_get_flow_entry(netdev, cmd); 3264 break; 3265 case ETHTOOL_GRXCLSRLALL: 3266 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3267 break; 3268 default: 3269 netdev_err(netdev, 3270 "Command parameter %d is not supported\n", cmd->cmd); 3271 ret = -EOPNOTSUPP; 3272 } 3273 3274 return ret; 3275 } 3276 3277 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3278 { 3279 struct macb *bp = netdev_priv(netdev); 3280 int ret; 3281 3282 switch (cmd->cmd) { 3283 case ETHTOOL_SRXCLSRLINS: 3284 if ((cmd->fs.location >= bp->max_tuples) 3285 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3286 ret = -EINVAL; 3287 break; 3288 } 3289 ret = gem_add_flow_filter(netdev, cmd); 3290 break; 3291 case ETHTOOL_SRXCLSRLDEL: 3292 ret = gem_del_flow_filter(netdev, cmd); 3293 break; 3294 default: 3295 netdev_err(netdev, 3296 "Command parameter %d is not supported\n", cmd->cmd); 3297 ret = -EOPNOTSUPP; 3298 } 3299 3300 return ret; 3301 } 3302 3303 static const struct ethtool_ops macb_ethtool_ops = { 3304 .get_regs_len = macb_get_regs_len, 3305 .get_regs = macb_get_regs, 3306 .get_link = ethtool_op_get_link, 3307 .get_ts_info = ethtool_op_get_ts_info, 3308 .get_wol = macb_get_wol, 3309 .set_wol = macb_set_wol, 3310 .get_link_ksettings = macb_get_link_ksettings, 3311 .set_link_ksettings = macb_set_link_ksettings, 3312 .get_ringparam = macb_get_ringparam, 3313 .set_ringparam = macb_set_ringparam, 3314 }; 3315 3316 static const struct ethtool_ops gem_ethtool_ops = { 3317 .get_regs_len = macb_get_regs_len, 3318 .get_regs = macb_get_regs, 3319 .get_link = ethtool_op_get_link, 3320 .get_ts_info = macb_get_ts_info, 3321 .get_ethtool_stats = gem_get_ethtool_stats, 3322 .get_strings = gem_get_ethtool_strings, 3323 .get_sset_count = gem_get_sset_count, 3324 .get_link_ksettings = macb_get_link_ksettings, 3325 .set_link_ksettings = macb_set_link_ksettings, 3326 .get_ringparam = macb_get_ringparam, 3327 .set_ringparam = macb_set_ringparam, 3328 .get_rxnfc = gem_get_rxnfc, 3329 .set_rxnfc = gem_set_rxnfc, 3330 }; 3331 3332 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3333 { 3334 struct macb *bp = netdev_priv(dev); 3335 3336 if (!netif_running(dev)) 3337 return -EINVAL; 3338 3339 if (bp->ptp_info) { 3340 switch (cmd) { 3341 case SIOCSHWTSTAMP: 3342 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3343 case SIOCGHWTSTAMP: 3344 return bp->ptp_info->get_hwtst(dev, rq); 3345 } 3346 } 3347 3348 return phylink_mii_ioctl(bp->phylink, rq, cmd); 3349 } 3350 3351 static inline void macb_set_txcsum_feature(struct macb *bp, 3352 netdev_features_t features) 3353 { 3354 u32 val; 3355 3356 if (!macb_is_gem(bp)) 3357 return; 3358 3359 val = gem_readl(bp, DMACFG); 3360 if (features & NETIF_F_HW_CSUM) 3361 val |= GEM_BIT(TXCOEN); 3362 else 3363 val &= ~GEM_BIT(TXCOEN); 3364 3365 gem_writel(bp, DMACFG, val); 3366 } 3367 3368 static inline void macb_set_rxcsum_feature(struct macb *bp, 3369 netdev_features_t features) 3370 { 3371 struct net_device *netdev = bp->dev; 3372 u32 val; 3373 3374 if (!macb_is_gem(bp)) 3375 return; 3376 3377 val = gem_readl(bp, NCFGR); 3378 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) 3379 val |= GEM_BIT(RXCOEN); 3380 else 3381 val &= ~GEM_BIT(RXCOEN); 3382 3383 gem_writel(bp, NCFGR, val); 3384 } 3385 3386 static inline void macb_set_rxflow_feature(struct macb *bp, 3387 netdev_features_t features) 3388 { 3389 if (!macb_is_gem(bp)) 3390 return; 3391 3392 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); 3393 } 3394 3395 static int macb_set_features(struct net_device *netdev, 3396 netdev_features_t features) 3397 { 3398 struct macb *bp = netdev_priv(netdev); 3399 netdev_features_t changed = features ^ netdev->features; 3400 3401 /* TX checksum offload */ 3402 if (changed & NETIF_F_HW_CSUM) 3403 macb_set_txcsum_feature(bp, features); 3404 3405 /* RX checksum offload */ 3406 if (changed & NETIF_F_RXCSUM) 3407 macb_set_rxcsum_feature(bp, features); 3408 3409 /* RX Flow Filters */ 3410 if (changed & NETIF_F_NTUPLE) 3411 macb_set_rxflow_feature(bp, features); 3412 3413 return 0; 3414 } 3415 3416 static void macb_restore_features(struct macb *bp) 3417 { 3418 struct net_device *netdev = bp->dev; 3419 netdev_features_t features = netdev->features; 3420 3421 /* TX checksum offload */ 3422 macb_set_txcsum_feature(bp, features); 3423 3424 /* RX checksum offload */ 3425 macb_set_rxcsum_feature(bp, features); 3426 3427 /* RX Flow Filters */ 3428 macb_set_rxflow_feature(bp, features); 3429 } 3430 3431 static const struct net_device_ops macb_netdev_ops = { 3432 .ndo_open = macb_open, 3433 .ndo_stop = macb_close, 3434 .ndo_start_xmit = macb_start_xmit, 3435 .ndo_set_rx_mode = macb_set_rx_mode, 3436 .ndo_get_stats = macb_get_stats, 3437 .ndo_do_ioctl = macb_ioctl, 3438 .ndo_validate_addr = eth_validate_addr, 3439 .ndo_change_mtu = macb_change_mtu, 3440 .ndo_set_mac_address = eth_mac_addr, 3441 #ifdef CONFIG_NET_POLL_CONTROLLER 3442 .ndo_poll_controller = macb_poll_controller, 3443 #endif 3444 .ndo_set_features = macb_set_features, 3445 .ndo_features_check = macb_features_check, 3446 }; 3447 3448 /* Configure peripheral capabilities according to device tree 3449 * and integration options used 3450 */ 3451 static void macb_configure_caps(struct macb *bp, 3452 const struct macb_config *dt_conf) 3453 { 3454 u32 dcfg; 3455 3456 if (dt_conf) 3457 bp->caps = dt_conf->caps; 3458 3459 if (hw_is_gem(bp->regs, bp->native_io)) { 3460 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3461 3462 dcfg = gem_readl(bp, DCFG1); 3463 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3464 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3465 dcfg = gem_readl(bp, DCFG2); 3466 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3467 bp->caps |= MACB_CAPS_FIFO_MODE; 3468 #ifdef CONFIG_MACB_USE_HWSTAMP 3469 if (gem_has_ptp(bp)) { 3470 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3471 dev_err(&bp->pdev->dev, 3472 "GEM doesn't support hardware ptp.\n"); 3473 else { 3474 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3475 bp->ptp_info = &gem_ptp_info; 3476 } 3477 } 3478 #endif 3479 } 3480 3481 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3482 } 3483 3484 static void macb_probe_queues(void __iomem *mem, 3485 bool native_io, 3486 unsigned int *queue_mask, 3487 unsigned int *num_queues) 3488 { 3489 *queue_mask = 0x1; 3490 *num_queues = 1; 3491 3492 /* is it macb or gem ? 3493 * 3494 * We need to read directly from the hardware here because 3495 * we are early in the probe process and don't have the 3496 * MACB_CAPS_MACB_IS_GEM flag positioned 3497 */ 3498 if (!hw_is_gem(mem, native_io)) 3499 return; 3500 3501 /* bit 0 is never set but queue 0 always exists */ 3502 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; 3503 *num_queues = hweight32(*queue_mask); 3504 } 3505 3506 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3507 struct clk **hclk, struct clk **tx_clk, 3508 struct clk **rx_clk, struct clk **tsu_clk) 3509 { 3510 struct macb_platform_data *pdata; 3511 int err; 3512 3513 pdata = dev_get_platdata(&pdev->dev); 3514 if (pdata) { 3515 *pclk = pdata->pclk; 3516 *hclk = pdata->hclk; 3517 } else { 3518 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3519 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3520 } 3521 3522 if (IS_ERR_OR_NULL(*pclk)) { 3523 err = PTR_ERR(*pclk); 3524 if (!err) 3525 err = -ENODEV; 3526 3527 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3528 return err; 3529 } 3530 3531 if (IS_ERR_OR_NULL(*hclk)) { 3532 err = PTR_ERR(*hclk); 3533 if (!err) 3534 err = -ENODEV; 3535 3536 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3537 return err; 3538 } 3539 3540 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); 3541 if (IS_ERR(*tx_clk)) 3542 return PTR_ERR(*tx_clk); 3543 3544 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); 3545 if (IS_ERR(*rx_clk)) 3546 return PTR_ERR(*rx_clk); 3547 3548 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); 3549 if (IS_ERR(*tsu_clk)) 3550 return PTR_ERR(*tsu_clk); 3551 3552 err = clk_prepare_enable(*pclk); 3553 if (err) { 3554 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3555 return err; 3556 } 3557 3558 err = clk_prepare_enable(*hclk); 3559 if (err) { 3560 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3561 goto err_disable_pclk; 3562 } 3563 3564 err = clk_prepare_enable(*tx_clk); 3565 if (err) { 3566 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3567 goto err_disable_hclk; 3568 } 3569 3570 err = clk_prepare_enable(*rx_clk); 3571 if (err) { 3572 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3573 goto err_disable_txclk; 3574 } 3575 3576 err = clk_prepare_enable(*tsu_clk); 3577 if (err) { 3578 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3579 goto err_disable_rxclk; 3580 } 3581 3582 return 0; 3583 3584 err_disable_rxclk: 3585 clk_disable_unprepare(*rx_clk); 3586 3587 err_disable_txclk: 3588 clk_disable_unprepare(*tx_clk); 3589 3590 err_disable_hclk: 3591 clk_disable_unprepare(*hclk); 3592 3593 err_disable_pclk: 3594 clk_disable_unprepare(*pclk); 3595 3596 return err; 3597 } 3598 3599 static int macb_init(struct platform_device *pdev) 3600 { 3601 struct net_device *dev = platform_get_drvdata(pdev); 3602 unsigned int hw_q, q; 3603 struct macb *bp = netdev_priv(dev); 3604 struct macb_queue *queue; 3605 int err; 3606 u32 val, reg; 3607 3608 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3609 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3610 3611 /* set the queue register mapping once for all: queue0 has a special 3612 * register mapping but we don't want to test the queue index then 3613 * compute the corresponding register offset at run time. 3614 */ 3615 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3616 if (!(bp->queue_mask & (1 << hw_q))) 3617 continue; 3618 3619 queue = &bp->queues[q]; 3620 queue->bp = bp; 3621 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); 3622 if (hw_q) { 3623 queue->ISR = GEM_ISR(hw_q - 1); 3624 queue->IER = GEM_IER(hw_q - 1); 3625 queue->IDR = GEM_IDR(hw_q - 1); 3626 queue->IMR = GEM_IMR(hw_q - 1); 3627 queue->TBQP = GEM_TBQP(hw_q - 1); 3628 queue->RBQP = GEM_RBQP(hw_q - 1); 3629 queue->RBQS = GEM_RBQS(hw_q - 1); 3630 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3631 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3632 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3633 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3634 } 3635 #endif 3636 } else { 3637 /* queue0 uses legacy registers */ 3638 queue->ISR = MACB_ISR; 3639 queue->IER = MACB_IER; 3640 queue->IDR = MACB_IDR; 3641 queue->IMR = MACB_IMR; 3642 queue->TBQP = MACB_TBQP; 3643 queue->RBQP = MACB_RBQP; 3644 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3645 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3646 queue->TBQPH = MACB_TBQPH; 3647 queue->RBQPH = MACB_RBQPH; 3648 } 3649 #endif 3650 } 3651 3652 /* get irq: here we use the linux queue index, not the hardware 3653 * queue index. the queue irq definitions in the device tree 3654 * must remove the optional gaps that could exist in the 3655 * hardware queue mask. 3656 */ 3657 queue->irq = platform_get_irq(pdev, q); 3658 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3659 IRQF_SHARED, dev->name, queue); 3660 if (err) { 3661 dev_err(&pdev->dev, 3662 "Unable to request IRQ %d (error %d)\n", 3663 queue->irq, err); 3664 return err; 3665 } 3666 3667 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3668 q++; 3669 } 3670 3671 dev->netdev_ops = &macb_netdev_ops; 3672 3673 /* setup appropriated routines according to adapter type */ 3674 if (macb_is_gem(bp)) { 3675 bp->max_tx_length = GEM_MAX_TX_LEN; 3676 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3677 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3678 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3679 bp->macbgem_ops.mog_rx = gem_rx; 3680 dev->ethtool_ops = &gem_ethtool_ops; 3681 } else { 3682 bp->max_tx_length = MACB_MAX_TX_LEN; 3683 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3684 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3685 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3686 bp->macbgem_ops.mog_rx = macb_rx; 3687 dev->ethtool_ops = &macb_ethtool_ops; 3688 } 3689 3690 /* Set features */ 3691 dev->hw_features = NETIF_F_SG; 3692 3693 /* Check LSO capability */ 3694 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3695 dev->hw_features |= MACB_NETIF_LSO; 3696 3697 /* Checksum offload is only available on gem with packet buffer */ 3698 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3699 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3700 if (bp->caps & MACB_CAPS_SG_DISABLED) 3701 dev->hw_features &= ~NETIF_F_SG; 3702 dev->features = dev->hw_features; 3703 3704 /* Check RX Flow Filters support. 3705 * Max Rx flows set by availability of screeners & compare regs: 3706 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3707 */ 3708 reg = gem_readl(bp, DCFG8); 3709 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3710 GEM_BFEXT(T2SCR, reg)); 3711 if (bp->max_tuples > 0) { 3712 /* also needs one ethtype match to check IPv4 */ 3713 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3714 /* program this reg now */ 3715 reg = 0; 3716 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3717 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3718 /* Filtering is supported in hw but don't enable it in kernel now */ 3719 dev->hw_features |= NETIF_F_NTUPLE; 3720 /* init Rx flow definitions */ 3721 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3722 bp->rx_fs_list.count = 0; 3723 spin_lock_init(&bp->rx_fs_lock); 3724 } else 3725 bp->max_tuples = 0; 3726 } 3727 3728 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3729 val = 0; 3730 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) 3731 val = GEM_BIT(RGMII); 3732 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3733 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3734 val = MACB_BIT(RMII); 3735 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3736 val = MACB_BIT(MII); 3737 3738 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3739 val |= MACB_BIT(CLKEN); 3740 3741 macb_or_gem_writel(bp, USRIO, val); 3742 } 3743 3744 /* Set MII management clock divider */ 3745 val = macb_mdc_clk_div(bp); 3746 val |= macb_dbw(bp); 3747 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3748 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3749 macb_writel(bp, NCFGR, val); 3750 3751 return 0; 3752 } 3753 3754 #if defined(CONFIG_OF) 3755 /* 1518 rounded up */ 3756 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3757 /* max number of receive buffers */ 3758 #define AT91ETHER_MAX_RX_DESCR 9 3759 3760 static struct sifive_fu540_macb_mgmt *mgmt; 3761 3762 static int at91ether_alloc_coherent(struct macb *lp) 3763 { 3764 struct macb_queue *q = &lp->queues[0]; 3765 3766 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3767 (AT91ETHER_MAX_RX_DESCR * 3768 macb_dma_desc_get_size(lp)), 3769 &q->rx_ring_dma, GFP_KERNEL); 3770 if (!q->rx_ring) 3771 return -ENOMEM; 3772 3773 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3774 AT91ETHER_MAX_RX_DESCR * 3775 AT91ETHER_MAX_RBUFF_SZ, 3776 &q->rx_buffers_dma, GFP_KERNEL); 3777 if (!q->rx_buffers) { 3778 dma_free_coherent(&lp->pdev->dev, 3779 AT91ETHER_MAX_RX_DESCR * 3780 macb_dma_desc_get_size(lp), 3781 q->rx_ring, q->rx_ring_dma); 3782 q->rx_ring = NULL; 3783 return -ENOMEM; 3784 } 3785 3786 return 0; 3787 } 3788 3789 static void at91ether_free_coherent(struct macb *lp) 3790 { 3791 struct macb_queue *q = &lp->queues[0]; 3792 3793 if (q->rx_ring) { 3794 dma_free_coherent(&lp->pdev->dev, 3795 AT91ETHER_MAX_RX_DESCR * 3796 macb_dma_desc_get_size(lp), 3797 q->rx_ring, q->rx_ring_dma); 3798 q->rx_ring = NULL; 3799 } 3800 3801 if (q->rx_buffers) { 3802 dma_free_coherent(&lp->pdev->dev, 3803 AT91ETHER_MAX_RX_DESCR * 3804 AT91ETHER_MAX_RBUFF_SZ, 3805 q->rx_buffers, q->rx_buffers_dma); 3806 q->rx_buffers = NULL; 3807 } 3808 } 3809 3810 /* Initialize and start the Receiver and Transmit subsystems */ 3811 static int at91ether_start(struct macb *lp) 3812 { 3813 struct macb_queue *q = &lp->queues[0]; 3814 struct macb_dma_desc *desc; 3815 dma_addr_t addr; 3816 u32 ctl; 3817 int i, ret; 3818 3819 ret = at91ether_alloc_coherent(lp); 3820 if (ret) 3821 return ret; 3822 3823 addr = q->rx_buffers_dma; 3824 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 3825 desc = macb_rx_desc(q, i); 3826 macb_set_addr(lp, desc, addr); 3827 desc->ctrl = 0; 3828 addr += AT91ETHER_MAX_RBUFF_SZ; 3829 } 3830 3831 /* Set the Wrap bit on the last descriptor */ 3832 desc->addr |= MACB_BIT(RX_WRAP); 3833 3834 /* Reset buffer index */ 3835 q->rx_tail = 0; 3836 3837 /* Program address of descriptor list in Rx Buffer Queue register */ 3838 macb_writel(lp, RBQP, q->rx_ring_dma); 3839 3840 /* Enable Receive and Transmit */ 3841 ctl = macb_readl(lp, NCR); 3842 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 3843 3844 /* Enable MAC interrupts */ 3845 macb_writel(lp, IER, MACB_BIT(RCOMP) | 3846 MACB_BIT(RXUBR) | 3847 MACB_BIT(ISR_TUND) | 3848 MACB_BIT(ISR_RLE) | 3849 MACB_BIT(TCOMP) | 3850 MACB_BIT(ISR_ROVR) | 3851 MACB_BIT(HRESP)); 3852 3853 return 0; 3854 } 3855 3856 static void at91ether_stop(struct macb *lp) 3857 { 3858 u32 ctl; 3859 3860 /* Disable MAC interrupts */ 3861 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 3862 MACB_BIT(RXUBR) | 3863 MACB_BIT(ISR_TUND) | 3864 MACB_BIT(ISR_RLE) | 3865 MACB_BIT(TCOMP) | 3866 MACB_BIT(ISR_ROVR) | 3867 MACB_BIT(HRESP)); 3868 3869 /* Disable Receiver and Transmitter */ 3870 ctl = macb_readl(lp, NCR); 3871 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 3872 3873 /* Free resources. */ 3874 at91ether_free_coherent(lp); 3875 } 3876 3877 /* Open the ethernet interface */ 3878 static int at91ether_open(struct net_device *dev) 3879 { 3880 struct macb *lp = netdev_priv(dev); 3881 u32 ctl; 3882 int ret; 3883 3884 ret = pm_runtime_get_sync(&lp->pdev->dev); 3885 if (ret < 0) { 3886 pm_runtime_put_noidle(&lp->pdev->dev); 3887 return ret; 3888 } 3889 3890 /* Clear internal statistics */ 3891 ctl = macb_readl(lp, NCR); 3892 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 3893 3894 macb_set_hwaddr(lp); 3895 3896 ret = at91ether_start(lp); 3897 if (ret) 3898 goto pm_exit; 3899 3900 ret = macb_phylink_connect(lp); 3901 if (ret) 3902 goto stop; 3903 3904 netif_start_queue(dev); 3905 3906 return 0; 3907 3908 stop: 3909 at91ether_stop(lp); 3910 pm_exit: 3911 pm_runtime_put_sync(&lp->pdev->dev); 3912 return ret; 3913 } 3914 3915 /* Close the interface */ 3916 static int at91ether_close(struct net_device *dev) 3917 { 3918 struct macb *lp = netdev_priv(dev); 3919 3920 netif_stop_queue(dev); 3921 3922 phylink_stop(lp->phylink); 3923 phylink_disconnect_phy(lp->phylink); 3924 3925 at91ether_stop(lp); 3926 3927 return pm_runtime_put(&lp->pdev->dev); 3928 } 3929 3930 /* Transmit packet */ 3931 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 3932 struct net_device *dev) 3933 { 3934 struct macb *lp = netdev_priv(dev); 3935 3936 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { 3937 netif_stop_queue(dev); 3938 3939 /* Store packet information (to free when Tx completed) */ 3940 lp->skb = skb; 3941 lp->skb_length = skb->len; 3942 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, 3943 skb->len, DMA_TO_DEVICE); 3944 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { 3945 dev_kfree_skb_any(skb); 3946 dev->stats.tx_dropped++; 3947 netdev_err(dev, "%s: DMA mapping error\n", __func__); 3948 return NETDEV_TX_OK; 3949 } 3950 3951 /* Set address of the data in the Transmit Address register */ 3952 macb_writel(lp, TAR, lp->skb_physaddr); 3953 /* Set length of the packet in the Transmit Control register */ 3954 macb_writel(lp, TCR, skb->len); 3955 3956 } else { 3957 netdev_err(dev, "%s called, but device is busy!\n", __func__); 3958 return NETDEV_TX_BUSY; 3959 } 3960 3961 return NETDEV_TX_OK; 3962 } 3963 3964 /* Extract received frame from buffer descriptors and sent to upper layers. 3965 * (Called from interrupt context) 3966 */ 3967 static void at91ether_rx(struct net_device *dev) 3968 { 3969 struct macb *lp = netdev_priv(dev); 3970 struct macb_queue *q = &lp->queues[0]; 3971 struct macb_dma_desc *desc; 3972 unsigned char *p_recv; 3973 struct sk_buff *skb; 3974 unsigned int pktlen; 3975 3976 desc = macb_rx_desc(q, q->rx_tail); 3977 while (desc->addr & MACB_BIT(RX_USED)) { 3978 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 3979 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 3980 skb = netdev_alloc_skb(dev, pktlen + 2); 3981 if (skb) { 3982 skb_reserve(skb, 2); 3983 skb_put_data(skb, p_recv, pktlen); 3984 3985 skb->protocol = eth_type_trans(skb, dev); 3986 dev->stats.rx_packets++; 3987 dev->stats.rx_bytes += pktlen; 3988 netif_rx(skb); 3989 } else { 3990 dev->stats.rx_dropped++; 3991 } 3992 3993 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 3994 dev->stats.multicast++; 3995 3996 /* reset ownership bit */ 3997 desc->addr &= ~MACB_BIT(RX_USED); 3998 3999 /* wrap after last buffer */ 4000 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 4001 q->rx_tail = 0; 4002 else 4003 q->rx_tail++; 4004 4005 desc = macb_rx_desc(q, q->rx_tail); 4006 } 4007 } 4008 4009 /* MAC interrupt handler */ 4010 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 4011 { 4012 struct net_device *dev = dev_id; 4013 struct macb *lp = netdev_priv(dev); 4014 u32 intstatus, ctl; 4015 4016 /* MAC Interrupt Status register indicates what interrupts are pending. 4017 * It is automatically cleared once read. 4018 */ 4019 intstatus = macb_readl(lp, ISR); 4020 4021 /* Receive complete */ 4022 if (intstatus & MACB_BIT(RCOMP)) 4023 at91ether_rx(dev); 4024 4025 /* Transmit complete */ 4026 if (intstatus & MACB_BIT(TCOMP)) { 4027 /* The TCOM bit is set even if the transmission failed */ 4028 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 4029 dev->stats.tx_errors++; 4030 4031 if (lp->skb) { 4032 dev_consume_skb_irq(lp->skb); 4033 lp->skb = NULL; 4034 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, 4035 lp->skb_length, DMA_TO_DEVICE); 4036 dev->stats.tx_packets++; 4037 dev->stats.tx_bytes += lp->skb_length; 4038 } 4039 netif_wake_queue(dev); 4040 } 4041 4042 /* Work-around for EMAC Errata section 41.3.1 */ 4043 if (intstatus & MACB_BIT(RXUBR)) { 4044 ctl = macb_readl(lp, NCR); 4045 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 4046 wmb(); 4047 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 4048 } 4049 4050 if (intstatus & MACB_BIT(ISR_ROVR)) 4051 netdev_err(dev, "ROVR error\n"); 4052 4053 return IRQ_HANDLED; 4054 } 4055 4056 #ifdef CONFIG_NET_POLL_CONTROLLER 4057 static void at91ether_poll_controller(struct net_device *dev) 4058 { 4059 unsigned long flags; 4060 4061 local_irq_save(flags); 4062 at91ether_interrupt(dev->irq, dev); 4063 local_irq_restore(flags); 4064 } 4065 #endif 4066 4067 static const struct net_device_ops at91ether_netdev_ops = { 4068 .ndo_open = at91ether_open, 4069 .ndo_stop = at91ether_close, 4070 .ndo_start_xmit = at91ether_start_xmit, 4071 .ndo_get_stats = macb_get_stats, 4072 .ndo_set_rx_mode = macb_set_rx_mode, 4073 .ndo_set_mac_address = eth_mac_addr, 4074 .ndo_do_ioctl = macb_ioctl, 4075 .ndo_validate_addr = eth_validate_addr, 4076 #ifdef CONFIG_NET_POLL_CONTROLLER 4077 .ndo_poll_controller = at91ether_poll_controller, 4078 #endif 4079 }; 4080 4081 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 4082 struct clk **hclk, struct clk **tx_clk, 4083 struct clk **rx_clk, struct clk **tsu_clk) 4084 { 4085 int err; 4086 4087 *hclk = NULL; 4088 *tx_clk = NULL; 4089 *rx_clk = NULL; 4090 *tsu_clk = NULL; 4091 4092 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 4093 if (IS_ERR(*pclk)) 4094 return PTR_ERR(*pclk); 4095 4096 err = clk_prepare_enable(*pclk); 4097 if (err) { 4098 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 4099 return err; 4100 } 4101 4102 return 0; 4103 } 4104 4105 static int at91ether_init(struct platform_device *pdev) 4106 { 4107 struct net_device *dev = platform_get_drvdata(pdev); 4108 struct macb *bp = netdev_priv(dev); 4109 int err; 4110 4111 bp->queues[0].bp = bp; 4112 4113 dev->netdev_ops = &at91ether_netdev_ops; 4114 dev->ethtool_ops = &macb_ethtool_ops; 4115 4116 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 4117 0, dev->name, dev); 4118 if (err) 4119 return err; 4120 4121 macb_writel(bp, NCR, 0); 4122 4123 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); 4124 4125 return 0; 4126 } 4127 4128 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, 4129 unsigned long parent_rate) 4130 { 4131 return mgmt->rate; 4132 } 4133 4134 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, 4135 unsigned long *parent_rate) 4136 { 4137 if (WARN_ON(rate < 2500000)) 4138 return 2500000; 4139 else if (rate == 2500000) 4140 return 2500000; 4141 else if (WARN_ON(rate < 13750000)) 4142 return 2500000; 4143 else if (WARN_ON(rate < 25000000)) 4144 return 25000000; 4145 else if (rate == 25000000) 4146 return 25000000; 4147 else if (WARN_ON(rate < 75000000)) 4148 return 25000000; 4149 else if (WARN_ON(rate < 125000000)) 4150 return 125000000; 4151 else if (rate == 125000000) 4152 return 125000000; 4153 4154 WARN_ON(rate > 125000000); 4155 4156 return 125000000; 4157 } 4158 4159 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, 4160 unsigned long parent_rate) 4161 { 4162 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); 4163 if (rate != 125000000) 4164 iowrite32(1, mgmt->reg); 4165 else 4166 iowrite32(0, mgmt->reg); 4167 mgmt->rate = rate; 4168 4169 return 0; 4170 } 4171 4172 static const struct clk_ops fu540_c000_ops = { 4173 .recalc_rate = fu540_macb_tx_recalc_rate, 4174 .round_rate = fu540_macb_tx_round_rate, 4175 .set_rate = fu540_macb_tx_set_rate, 4176 }; 4177 4178 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, 4179 struct clk **hclk, struct clk **tx_clk, 4180 struct clk **rx_clk, struct clk **tsu_clk) 4181 { 4182 struct clk_init_data init; 4183 int err = 0; 4184 4185 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); 4186 if (err) 4187 return err; 4188 4189 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); 4190 if (!mgmt) 4191 return -ENOMEM; 4192 4193 init.name = "sifive-gemgxl-mgmt"; 4194 init.ops = &fu540_c000_ops; 4195 init.flags = 0; 4196 init.num_parents = 0; 4197 4198 mgmt->rate = 0; 4199 mgmt->hw.init = &init; 4200 4201 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); 4202 if (IS_ERR(*tx_clk)) 4203 return PTR_ERR(*tx_clk); 4204 4205 err = clk_prepare_enable(*tx_clk); 4206 if (err) 4207 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 4208 else 4209 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); 4210 4211 return 0; 4212 } 4213 4214 static int fu540_c000_init(struct platform_device *pdev) 4215 { 4216 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); 4217 if (IS_ERR(mgmt->reg)) 4218 return PTR_ERR(mgmt->reg); 4219 4220 return macb_init(pdev); 4221 } 4222 4223 static const struct macb_config fu540_c000_config = { 4224 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | 4225 MACB_CAPS_GEM_HAS_PTP, 4226 .dma_burst_length = 16, 4227 .clk_init = fu540_c000_clk_init, 4228 .init = fu540_c000_init, 4229 .jumbo_max_len = 10240, 4230 }; 4231 4232 static const struct macb_config at91sam9260_config = { 4233 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4234 .clk_init = macb_clk_init, 4235 .init = macb_init, 4236 }; 4237 4238 static const struct macb_config sama5d3macb_config = { 4239 .caps = MACB_CAPS_SG_DISABLED 4240 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4241 .clk_init = macb_clk_init, 4242 .init = macb_init, 4243 }; 4244 4245 static const struct macb_config pc302gem_config = { 4246 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 4247 .dma_burst_length = 16, 4248 .clk_init = macb_clk_init, 4249 .init = macb_init, 4250 }; 4251 4252 static const struct macb_config sama5d2_config = { 4253 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4254 .dma_burst_length = 16, 4255 .clk_init = macb_clk_init, 4256 .init = macb_init, 4257 }; 4258 4259 static const struct macb_config sama5d3_config = { 4260 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 4261 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 4262 .dma_burst_length = 16, 4263 .clk_init = macb_clk_init, 4264 .init = macb_init, 4265 .jumbo_max_len = 10240, 4266 }; 4267 4268 static const struct macb_config sama5d4_config = { 4269 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4270 .dma_burst_length = 4, 4271 .clk_init = macb_clk_init, 4272 .init = macb_init, 4273 }; 4274 4275 static const struct macb_config emac_config = { 4276 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, 4277 .clk_init = at91ether_clk_init, 4278 .init = at91ether_init, 4279 }; 4280 4281 static const struct macb_config np4_config = { 4282 .caps = MACB_CAPS_USRIO_DISABLED, 4283 .clk_init = macb_clk_init, 4284 .init = macb_init, 4285 }; 4286 4287 static const struct macb_config zynqmp_config = { 4288 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4289 MACB_CAPS_JUMBO | 4290 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 4291 .dma_burst_length = 16, 4292 .clk_init = macb_clk_init, 4293 .init = macb_init, 4294 .jumbo_max_len = 10240, 4295 }; 4296 4297 static const struct macb_config zynq_config = { 4298 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 4299 MACB_CAPS_NEEDS_RSTONUBR, 4300 .dma_burst_length = 16, 4301 .clk_init = macb_clk_init, 4302 .init = macb_init, 4303 }; 4304 4305 static const struct of_device_id macb_dt_ids[] = { 4306 { .compatible = "cdns,at32ap7000-macb" }, 4307 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 4308 { .compatible = "cdns,macb" }, 4309 { .compatible = "cdns,np4-macb", .data = &np4_config }, 4310 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 4311 { .compatible = "cdns,gem", .data = &pc302gem_config }, 4312 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, 4313 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 4314 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 4315 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 4316 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 4317 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 4318 { .compatible = "cdns,emac", .data = &emac_config }, 4319 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4320 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4321 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, 4322 { /* sentinel */ } 4323 }; 4324 MODULE_DEVICE_TABLE(of, macb_dt_ids); 4325 #endif /* CONFIG_OF */ 4326 4327 static const struct macb_config default_gem_config = { 4328 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4329 MACB_CAPS_JUMBO | 4330 MACB_CAPS_GEM_HAS_PTP, 4331 .dma_burst_length = 16, 4332 .clk_init = macb_clk_init, 4333 .init = macb_init, 4334 .jumbo_max_len = 10240, 4335 }; 4336 4337 static int macb_probe(struct platform_device *pdev) 4338 { 4339 const struct macb_config *macb_config = &default_gem_config; 4340 int (*clk_init)(struct platform_device *, struct clk **, 4341 struct clk **, struct clk **, struct clk **, 4342 struct clk **) = macb_config->clk_init; 4343 int (*init)(struct platform_device *) = macb_config->init; 4344 struct device_node *np = pdev->dev.of_node; 4345 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 4346 struct clk *tsu_clk = NULL; 4347 unsigned int queue_mask, num_queues; 4348 bool native_io; 4349 phy_interface_t interface; 4350 struct net_device *dev; 4351 struct resource *regs; 4352 void __iomem *mem; 4353 const char *mac; 4354 struct macb *bp; 4355 int err, val; 4356 4357 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4358 mem = devm_ioremap_resource(&pdev->dev, regs); 4359 if (IS_ERR(mem)) 4360 return PTR_ERR(mem); 4361 4362 if (np) { 4363 const struct of_device_id *match; 4364 4365 match = of_match_node(macb_dt_ids, np); 4366 if (match && match->data) { 4367 macb_config = match->data; 4368 clk_init = macb_config->clk_init; 4369 init = macb_config->init; 4370 } 4371 } 4372 4373 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 4374 if (err) 4375 return err; 4376 4377 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); 4378 pm_runtime_use_autosuspend(&pdev->dev); 4379 pm_runtime_get_noresume(&pdev->dev); 4380 pm_runtime_set_active(&pdev->dev); 4381 pm_runtime_enable(&pdev->dev); 4382 native_io = hw_is_native_io(mem); 4383 4384 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 4385 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 4386 if (!dev) { 4387 err = -ENOMEM; 4388 goto err_disable_clocks; 4389 } 4390 4391 dev->base_addr = regs->start; 4392 4393 SET_NETDEV_DEV(dev, &pdev->dev); 4394 4395 bp = netdev_priv(dev); 4396 bp->pdev = pdev; 4397 bp->dev = dev; 4398 bp->regs = mem; 4399 bp->native_io = native_io; 4400 if (native_io) { 4401 bp->macb_reg_readl = hw_readl_native; 4402 bp->macb_reg_writel = hw_writel_native; 4403 } else { 4404 bp->macb_reg_readl = hw_readl; 4405 bp->macb_reg_writel = hw_writel; 4406 } 4407 bp->num_queues = num_queues; 4408 bp->queue_mask = queue_mask; 4409 if (macb_config) 4410 bp->dma_burst_length = macb_config->dma_burst_length; 4411 bp->pclk = pclk; 4412 bp->hclk = hclk; 4413 bp->tx_clk = tx_clk; 4414 bp->rx_clk = rx_clk; 4415 bp->tsu_clk = tsu_clk; 4416 if (macb_config) 4417 bp->jumbo_max_len = macb_config->jumbo_max_len; 4418 4419 bp->wol = 0; 4420 if (of_get_property(np, "magic-packet", NULL)) 4421 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4422 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4423 4424 spin_lock_init(&bp->lock); 4425 4426 /* setup capabilities */ 4427 macb_configure_caps(bp, macb_config); 4428 4429 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4430 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4431 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4432 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4433 } 4434 #endif 4435 platform_set_drvdata(pdev, dev); 4436 4437 dev->irq = platform_get_irq(pdev, 0); 4438 if (dev->irq < 0) { 4439 err = dev->irq; 4440 goto err_out_free_netdev; 4441 } 4442 4443 /* MTU range: 68 - 1500 or 10240 */ 4444 dev->min_mtu = GEM_MTU_MIN_SIZE; 4445 if (bp->caps & MACB_CAPS_JUMBO) 4446 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4447 else 4448 dev->max_mtu = ETH_DATA_LEN; 4449 4450 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4451 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4452 if (val) 4453 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4454 macb_dma_desc_get_size(bp); 4455 4456 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4457 if (val) 4458 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4459 macb_dma_desc_get_size(bp); 4460 } 4461 4462 bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4463 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4464 bp->rx_intr_mask |= MACB_BIT(RXUBR); 4465 4466 mac = of_get_mac_address(np); 4467 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4468 err = -EPROBE_DEFER; 4469 goto err_out_free_netdev; 4470 } else if (!IS_ERR_OR_NULL(mac)) { 4471 ether_addr_copy(bp->dev->dev_addr, mac); 4472 } else { 4473 macb_get_hwaddr(bp); 4474 } 4475 4476 err = of_get_phy_mode(np, &interface); 4477 if (err) 4478 /* not found in DT, MII by default */ 4479 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4480 else 4481 bp->phy_interface = interface; 4482 4483 /* IP specific init */ 4484 err = init(pdev); 4485 if (err) 4486 goto err_out_free_netdev; 4487 4488 err = macb_mii_init(bp); 4489 if (err) 4490 goto err_out_free_netdev; 4491 4492 netif_carrier_off(dev); 4493 4494 err = register_netdev(dev); 4495 if (err) { 4496 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4497 goto err_out_unregister_mdio; 4498 } 4499 4500 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, 4501 (unsigned long)bp); 4502 4503 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4504 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4505 dev->base_addr, dev->irq, dev->dev_addr); 4506 4507 pm_runtime_mark_last_busy(&bp->pdev->dev); 4508 pm_runtime_put_autosuspend(&bp->pdev->dev); 4509 4510 return 0; 4511 4512 err_out_unregister_mdio: 4513 mdiobus_unregister(bp->mii_bus); 4514 mdiobus_free(bp->mii_bus); 4515 4516 err_out_free_netdev: 4517 free_netdev(dev); 4518 4519 err_disable_clocks: 4520 clk_disable_unprepare(tx_clk); 4521 clk_disable_unprepare(hclk); 4522 clk_disable_unprepare(pclk); 4523 clk_disable_unprepare(rx_clk); 4524 clk_disable_unprepare(tsu_clk); 4525 pm_runtime_disable(&pdev->dev); 4526 pm_runtime_set_suspended(&pdev->dev); 4527 pm_runtime_dont_use_autosuspend(&pdev->dev); 4528 4529 return err; 4530 } 4531 4532 static int macb_remove(struct platform_device *pdev) 4533 { 4534 struct net_device *dev; 4535 struct macb *bp; 4536 4537 dev = platform_get_drvdata(pdev); 4538 4539 if (dev) { 4540 bp = netdev_priv(dev); 4541 mdiobus_unregister(bp->mii_bus); 4542 mdiobus_free(bp->mii_bus); 4543 4544 unregister_netdev(dev); 4545 tasklet_kill(&bp->hresp_err_tasklet); 4546 pm_runtime_disable(&pdev->dev); 4547 pm_runtime_dont_use_autosuspend(&pdev->dev); 4548 if (!pm_runtime_suspended(&pdev->dev)) { 4549 clk_disable_unprepare(bp->tx_clk); 4550 clk_disable_unprepare(bp->hclk); 4551 clk_disable_unprepare(bp->pclk); 4552 clk_disable_unprepare(bp->rx_clk); 4553 clk_disable_unprepare(bp->tsu_clk); 4554 pm_runtime_set_suspended(&pdev->dev); 4555 } 4556 phylink_destroy(bp->phylink); 4557 free_netdev(dev); 4558 } 4559 4560 return 0; 4561 } 4562 4563 static int __maybe_unused macb_suspend(struct device *dev) 4564 { 4565 struct net_device *netdev = dev_get_drvdata(dev); 4566 struct macb *bp = netdev_priv(netdev); 4567 struct macb_queue *queue = bp->queues; 4568 unsigned long flags; 4569 unsigned int q; 4570 4571 if (!netif_running(netdev)) 4572 return 0; 4573 4574 if (bp->wol & MACB_WOL_ENABLED) { 4575 macb_writel(bp, IER, MACB_BIT(WOL)); 4576 macb_writel(bp, WOL, MACB_BIT(MAG)); 4577 enable_irq_wake(bp->queues[0].irq); 4578 netif_device_detach(netdev); 4579 } else { 4580 netif_device_detach(netdev); 4581 for (q = 0, queue = bp->queues; q < bp->num_queues; 4582 ++q, ++queue) 4583 napi_disable(&queue->napi); 4584 rtnl_lock(); 4585 phylink_stop(bp->phylink); 4586 rtnl_unlock(); 4587 spin_lock_irqsave(&bp->lock, flags); 4588 macb_reset_hw(bp); 4589 spin_unlock_irqrestore(&bp->lock, flags); 4590 4591 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4592 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); 4593 4594 if (netdev->hw_features & NETIF_F_NTUPLE) 4595 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4596 } 4597 4598 if (bp->ptp_info) 4599 bp->ptp_info->ptp_remove(netdev); 4600 if (!device_may_wakeup(dev)) 4601 pm_runtime_force_suspend(dev); 4602 4603 return 0; 4604 } 4605 4606 static int __maybe_unused macb_resume(struct device *dev) 4607 { 4608 struct net_device *netdev = dev_get_drvdata(dev); 4609 struct macb *bp = netdev_priv(netdev); 4610 struct macb_queue *queue = bp->queues; 4611 unsigned int q; 4612 4613 if (!netif_running(netdev)) 4614 return 0; 4615 4616 if (!device_may_wakeup(dev)) 4617 pm_runtime_force_resume(dev); 4618 4619 if (bp->wol & MACB_WOL_ENABLED) { 4620 macb_writel(bp, IDR, MACB_BIT(WOL)); 4621 macb_writel(bp, WOL, 0); 4622 disable_irq_wake(bp->queues[0].irq); 4623 } else { 4624 macb_writel(bp, NCR, MACB_BIT(MPE)); 4625 4626 if (netdev->hw_features & NETIF_F_NTUPLE) 4627 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); 4628 4629 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4630 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); 4631 4632 for (q = 0, queue = bp->queues; q < bp->num_queues; 4633 ++q, ++queue) 4634 napi_enable(&queue->napi); 4635 rtnl_lock(); 4636 phylink_start(bp->phylink); 4637 rtnl_unlock(); 4638 } 4639 4640 macb_init_hw(bp); 4641 macb_set_rx_mode(netdev); 4642 macb_restore_features(bp); 4643 netif_device_attach(netdev); 4644 if (bp->ptp_info) 4645 bp->ptp_info->ptp_init(netdev); 4646 4647 return 0; 4648 } 4649 4650 static int __maybe_unused macb_runtime_suspend(struct device *dev) 4651 { 4652 struct net_device *netdev = dev_get_drvdata(dev); 4653 struct macb *bp = netdev_priv(netdev); 4654 4655 if (!(device_may_wakeup(dev))) { 4656 clk_disable_unprepare(bp->tx_clk); 4657 clk_disable_unprepare(bp->hclk); 4658 clk_disable_unprepare(bp->pclk); 4659 clk_disable_unprepare(bp->rx_clk); 4660 } 4661 clk_disable_unprepare(bp->tsu_clk); 4662 4663 return 0; 4664 } 4665 4666 static int __maybe_unused macb_runtime_resume(struct device *dev) 4667 { 4668 struct net_device *netdev = dev_get_drvdata(dev); 4669 struct macb *bp = netdev_priv(netdev); 4670 4671 if (!(device_may_wakeup(dev))) { 4672 clk_prepare_enable(bp->pclk); 4673 clk_prepare_enable(bp->hclk); 4674 clk_prepare_enable(bp->tx_clk); 4675 clk_prepare_enable(bp->rx_clk); 4676 } 4677 clk_prepare_enable(bp->tsu_clk); 4678 4679 return 0; 4680 } 4681 4682 static const struct dev_pm_ops macb_pm_ops = { 4683 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) 4684 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) 4685 }; 4686 4687 static struct platform_driver macb_driver = { 4688 .probe = macb_probe, 4689 .remove = macb_remove, 4690 .driver = { 4691 .name = "macb", 4692 .of_match_table = of_match_ptr(macb_dt_ids), 4693 .pm = &macb_pm_ops, 4694 }, 4695 }; 4696 4697 module_platform_driver(macb_driver); 4698 4699 MODULE_LICENSE("GPL"); 4700 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4701 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4702 MODULE_ALIAS("platform:macb"); 4703