1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cadence MACB/GEM Ethernet Controller driver 4 * 5 * Copyright (C) 2004-2006 Atmel Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/clk.h> 10 #include <linux/clk-provider.h> 11 #include <linux/crc32.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/circ_buf.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/gpio.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/interrupt.h> 23 #include <linux/netdevice.h> 24 #include <linux/etherdevice.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/platform_device.h> 27 #include <linux/phylink.h> 28 #include <linux/of.h> 29 #include <linux/of_device.h> 30 #include <linux/of_gpio.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/ip.h> 34 #include <linux/udp.h> 35 #include <linux/tcp.h> 36 #include <linux/iopoll.h> 37 #include <linux/pm_runtime.h> 38 #include "macb.h" 39 40 /* This structure is only used for MACB on SiFive FU540 devices */ 41 struct sifive_fu540_macb_mgmt { 42 void __iomem *reg; 43 unsigned long rate; 44 struct clk_hw hw; 45 }; 46 47 #define MACB_RX_BUFFER_SIZE 128 48 #define RX_BUFFER_MULTIPLE 64 /* bytes */ 49 50 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 51 #define MIN_RX_RING_SIZE 64 52 #define MAX_RX_RING_SIZE 8192 53 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 54 * (bp)->rx_ring_size) 55 56 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 57 #define MIN_TX_RING_SIZE 64 58 #define MAX_TX_RING_SIZE 4096 59 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ 60 * (bp)->tx_ring_size) 61 62 /* level of occupied TX descriptors under which we wake up TX process */ 63 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 64 65 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) 66 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 67 | MACB_BIT(ISR_RLE) \ 68 | MACB_BIT(TXERR)) 69 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ 70 | MACB_BIT(TXUBR)) 71 72 /* Max length of transmit frame must be a multiple of 8 bytes */ 73 #define MACB_TX_LEN_ALIGN 8 74 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 75 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 76 * false amba_error in TX path from the DMA assuming there is not enough 77 * space in the SRAM (16KB) even when there is. 78 */ 79 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 80 81 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 82 #define MACB_NETIF_LSO NETIF_F_TSO 83 84 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 85 #define MACB_WOL_ENABLED (0x1 << 1) 86 87 #define HS_SPEED_10000M 4 88 #define MACB_SERDES_RATE_10G 1 89 90 /* Graceful stop timeouts in us. We should allow up to 91 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 92 */ 93 #define MACB_HALT_TIMEOUT 1230 94 95 #define MACB_PM_TIMEOUT 100 /* ms */ 96 97 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ 98 99 /* DMA buffer descriptor might be different size 100 * depends on hardware configuration: 101 * 102 * 1. dma address width 32 bits: 103 * word 1: 32 bit address of Data Buffer 104 * word 2: control 105 * 106 * 2. dma address width 64 bits: 107 * word 1: 32 bit address of Data Buffer 108 * word 2: control 109 * word 3: upper 32 bit address of Data Buffer 110 * word 4: unused 111 * 112 * 3. dma address width 32 bits with hardware timestamping: 113 * word 1: 32 bit address of Data Buffer 114 * word 2: control 115 * word 3: timestamp word 1 116 * word 4: timestamp word 2 117 * 118 * 4. dma address width 64 bits with hardware timestamping: 119 * word 1: 32 bit address of Data Buffer 120 * word 2: control 121 * word 3: upper 32 bit address of Data Buffer 122 * word 4: unused 123 * word 5: timestamp word 1 124 * word 6: timestamp word 2 125 */ 126 static unsigned int macb_dma_desc_get_size(struct macb *bp) 127 { 128 #ifdef MACB_EXT_DESC 129 unsigned int desc_size; 130 131 switch (bp->hw_dma_cap) { 132 case HW_DMA_CAP_64B: 133 desc_size = sizeof(struct macb_dma_desc) 134 + sizeof(struct macb_dma_desc_64); 135 break; 136 case HW_DMA_CAP_PTP: 137 desc_size = sizeof(struct macb_dma_desc) 138 + sizeof(struct macb_dma_desc_ptp); 139 break; 140 case HW_DMA_CAP_64B_PTP: 141 desc_size = sizeof(struct macb_dma_desc) 142 + sizeof(struct macb_dma_desc_64) 143 + sizeof(struct macb_dma_desc_ptp); 144 break; 145 default: 146 desc_size = sizeof(struct macb_dma_desc); 147 } 148 return desc_size; 149 #endif 150 return sizeof(struct macb_dma_desc); 151 } 152 153 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 154 { 155 #ifdef MACB_EXT_DESC 156 switch (bp->hw_dma_cap) { 157 case HW_DMA_CAP_64B: 158 case HW_DMA_CAP_PTP: 159 desc_idx <<= 1; 160 break; 161 case HW_DMA_CAP_64B_PTP: 162 desc_idx *= 3; 163 break; 164 default: 165 break; 166 } 167 #endif 168 return desc_idx; 169 } 170 171 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 172 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 173 { 174 return (struct macb_dma_desc_64 *)((void *)desc 175 + sizeof(struct macb_dma_desc)); 176 } 177 #endif 178 179 /* Ring buffer accessors */ 180 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 181 { 182 return index & (bp->tx_ring_size - 1); 183 } 184 185 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 186 unsigned int index) 187 { 188 index = macb_tx_ring_wrap(queue->bp, index); 189 index = macb_adj_dma_desc_idx(queue->bp, index); 190 return &queue->tx_ring[index]; 191 } 192 193 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 194 unsigned int index) 195 { 196 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; 197 } 198 199 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 200 { 201 dma_addr_t offset; 202 203 offset = macb_tx_ring_wrap(queue->bp, index) * 204 macb_dma_desc_get_size(queue->bp); 205 206 return queue->tx_ring_dma + offset; 207 } 208 209 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) 210 { 211 return index & (bp->rx_ring_size - 1); 212 } 213 214 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) 215 { 216 index = macb_rx_ring_wrap(queue->bp, index); 217 index = macb_adj_dma_desc_idx(queue->bp, index); 218 return &queue->rx_ring[index]; 219 } 220 221 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) 222 { 223 return queue->rx_buffers + queue->bp->rx_buffer_size * 224 macb_rx_ring_wrap(queue->bp, index); 225 } 226 227 /* I/O accessors */ 228 static u32 hw_readl_native(struct macb *bp, int offset) 229 { 230 return __raw_readl(bp->regs + offset); 231 } 232 233 static void hw_writel_native(struct macb *bp, int offset, u32 value) 234 { 235 __raw_writel(value, bp->regs + offset); 236 } 237 238 static u32 hw_readl(struct macb *bp, int offset) 239 { 240 return readl_relaxed(bp->regs + offset); 241 } 242 243 static void hw_writel(struct macb *bp, int offset, u32 value) 244 { 245 writel_relaxed(value, bp->regs + offset); 246 } 247 248 /* Find the CPU endianness by using the loopback bit of NCR register. When the 249 * CPU is in big endian we need to program swapped mode for management 250 * descriptor access. 251 */ 252 static bool hw_is_native_io(void __iomem *addr) 253 { 254 u32 value = MACB_BIT(LLB); 255 256 __raw_writel(value, addr + MACB_NCR); 257 value = __raw_readl(addr + MACB_NCR); 258 259 /* Write 0 back to disable everything */ 260 __raw_writel(0, addr + MACB_NCR); 261 262 return value == MACB_BIT(LLB); 263 } 264 265 static bool hw_is_gem(void __iomem *addr, bool native_io) 266 { 267 u32 id; 268 269 if (native_io) 270 id = __raw_readl(addr + MACB_MID); 271 else 272 id = readl_relaxed(addr + MACB_MID); 273 274 return MACB_BFEXT(IDNUM, id) >= 0x2; 275 } 276 277 static void macb_set_hwaddr(struct macb *bp) 278 { 279 u32 bottom; 280 u16 top; 281 282 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 283 macb_or_gem_writel(bp, SA1B, bottom); 284 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 285 macb_or_gem_writel(bp, SA1T, top); 286 287 /* Clear unused address register sets */ 288 macb_or_gem_writel(bp, SA2B, 0); 289 macb_or_gem_writel(bp, SA2T, 0); 290 macb_or_gem_writel(bp, SA3B, 0); 291 macb_or_gem_writel(bp, SA3T, 0); 292 macb_or_gem_writel(bp, SA4B, 0); 293 macb_or_gem_writel(bp, SA4T, 0); 294 } 295 296 static void macb_get_hwaddr(struct macb *bp) 297 { 298 u32 bottom; 299 u16 top; 300 u8 addr[6]; 301 int i; 302 303 /* Check all 4 address register for valid address */ 304 for (i = 0; i < 4; i++) { 305 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 306 top = macb_or_gem_readl(bp, SA1T + i * 8); 307 308 addr[0] = bottom & 0xff; 309 addr[1] = (bottom >> 8) & 0xff; 310 addr[2] = (bottom >> 16) & 0xff; 311 addr[3] = (bottom >> 24) & 0xff; 312 addr[4] = top & 0xff; 313 addr[5] = (top >> 8) & 0xff; 314 315 if (is_valid_ether_addr(addr)) { 316 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 317 return; 318 } 319 } 320 321 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 322 eth_hw_addr_random(bp->dev); 323 } 324 325 static int macb_mdio_wait_for_idle(struct macb *bp) 326 { 327 u32 val; 328 329 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), 330 1, MACB_MDIO_TIMEOUT); 331 } 332 333 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 334 { 335 struct macb *bp = bus->priv; 336 int status; 337 338 status = pm_runtime_get_sync(&bp->pdev->dev); 339 if (status < 0) { 340 pm_runtime_put_noidle(&bp->pdev->dev); 341 goto mdio_pm_exit; 342 } 343 344 status = macb_mdio_wait_for_idle(bp); 345 if (status < 0) 346 goto mdio_read_exit; 347 348 if (regnum & MII_ADDR_C45) { 349 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 350 | MACB_BF(RW, MACB_MAN_C45_ADDR) 351 | MACB_BF(PHYA, mii_id) 352 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 353 | MACB_BF(DATA, regnum & 0xFFFF) 354 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 355 356 status = macb_mdio_wait_for_idle(bp); 357 if (status < 0) 358 goto mdio_read_exit; 359 360 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 361 | MACB_BF(RW, MACB_MAN_C45_READ) 362 | MACB_BF(PHYA, mii_id) 363 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 364 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 365 } else { 366 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 367 | MACB_BF(RW, MACB_MAN_C22_READ) 368 | MACB_BF(PHYA, mii_id) 369 | MACB_BF(REGA, regnum) 370 | MACB_BF(CODE, MACB_MAN_C22_CODE))); 371 } 372 373 status = macb_mdio_wait_for_idle(bp); 374 if (status < 0) 375 goto mdio_read_exit; 376 377 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 378 379 mdio_read_exit: 380 pm_runtime_mark_last_busy(&bp->pdev->dev); 381 pm_runtime_put_autosuspend(&bp->pdev->dev); 382 mdio_pm_exit: 383 return status; 384 } 385 386 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 387 u16 value) 388 { 389 struct macb *bp = bus->priv; 390 int status; 391 392 status = pm_runtime_get_sync(&bp->pdev->dev); 393 if (status < 0) { 394 pm_runtime_put_noidle(&bp->pdev->dev); 395 goto mdio_pm_exit; 396 } 397 398 status = macb_mdio_wait_for_idle(bp); 399 if (status < 0) 400 goto mdio_write_exit; 401 402 if (regnum & MII_ADDR_C45) { 403 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 404 | MACB_BF(RW, MACB_MAN_C45_ADDR) 405 | MACB_BF(PHYA, mii_id) 406 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 407 | MACB_BF(DATA, regnum & 0xFFFF) 408 | MACB_BF(CODE, MACB_MAN_C45_CODE))); 409 410 status = macb_mdio_wait_for_idle(bp); 411 if (status < 0) 412 goto mdio_write_exit; 413 414 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) 415 | MACB_BF(RW, MACB_MAN_C45_WRITE) 416 | MACB_BF(PHYA, mii_id) 417 | MACB_BF(REGA, (regnum >> 16) & 0x1F) 418 | MACB_BF(CODE, MACB_MAN_C45_CODE) 419 | MACB_BF(DATA, value))); 420 } else { 421 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) 422 | MACB_BF(RW, MACB_MAN_C22_WRITE) 423 | MACB_BF(PHYA, mii_id) 424 | MACB_BF(REGA, regnum) 425 | MACB_BF(CODE, MACB_MAN_C22_CODE) 426 | MACB_BF(DATA, value))); 427 } 428 429 status = macb_mdio_wait_for_idle(bp); 430 if (status < 0) 431 goto mdio_write_exit; 432 433 mdio_write_exit: 434 pm_runtime_mark_last_busy(&bp->pdev->dev); 435 pm_runtime_put_autosuspend(&bp->pdev->dev); 436 mdio_pm_exit: 437 return status; 438 } 439 440 static void macb_init_buffers(struct macb *bp) 441 { 442 struct macb_queue *queue; 443 unsigned int q; 444 445 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 446 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); 447 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 448 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 449 queue_writel(queue, RBQPH, 450 upper_32_bits(queue->rx_ring_dma)); 451 #endif 452 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 453 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 454 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 455 queue_writel(queue, TBQPH, 456 upper_32_bits(queue->tx_ring_dma)); 457 #endif 458 } 459 } 460 461 /** 462 * macb_set_tx_clk() - Set a clock to a new frequency 463 * @clk: Pointer to the clock to change 464 * @speed: New frequency in Hz 465 * @dev: Pointer to the struct net_device 466 */ 467 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) 468 { 469 long ferr, rate, rate_rounded; 470 471 if (!clk) 472 return; 473 474 switch (speed) { 475 case SPEED_10: 476 rate = 2500000; 477 break; 478 case SPEED_100: 479 rate = 25000000; 480 break; 481 case SPEED_1000: 482 rate = 125000000; 483 break; 484 default: 485 return; 486 } 487 488 rate_rounded = clk_round_rate(clk, rate); 489 if (rate_rounded < 0) 490 return; 491 492 /* RGMII allows 50 ppm frequency error. Test and warn if this limit 493 * is not satisfied. 494 */ 495 ferr = abs(rate_rounded - rate); 496 ferr = DIV_ROUND_UP(ferr, rate / 100000); 497 if (ferr > 5) 498 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 499 rate); 500 501 if (clk_set_rate(clk, rate_rounded)) 502 netdev_err(dev, "adjusting tx_clk failed.\n"); 503 } 504 505 static void macb_validate(struct phylink_config *config, 506 unsigned long *supported, 507 struct phylink_link_state *state) 508 { 509 struct net_device *ndev = to_net_dev(config->dev); 510 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 511 struct macb *bp = netdev_priv(ndev); 512 513 /* We only support MII, RMII, GMII, RGMII & SGMII. */ 514 if (state->interface != PHY_INTERFACE_MODE_NA && 515 state->interface != PHY_INTERFACE_MODE_MII && 516 state->interface != PHY_INTERFACE_MODE_RMII && 517 state->interface != PHY_INTERFACE_MODE_GMII && 518 state->interface != PHY_INTERFACE_MODE_SGMII && 519 state->interface != PHY_INTERFACE_MODE_10GBASER && 520 !phy_interface_mode_is_rgmii(state->interface)) { 521 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 522 return; 523 } 524 525 if (!macb_is_gem(bp) && 526 (state->interface == PHY_INTERFACE_MODE_GMII || 527 phy_interface_mode_is_rgmii(state->interface))) { 528 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 529 return; 530 } 531 532 if (state->interface == PHY_INTERFACE_MODE_10GBASER && 533 !(bp->caps & MACB_CAPS_HIGH_SPEED && 534 bp->caps & MACB_CAPS_PCS)) { 535 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 536 return; 537 } 538 539 phylink_set_port_modes(mask); 540 phylink_set(mask, Autoneg); 541 phylink_set(mask, Asym_Pause); 542 543 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && 544 (state->interface == PHY_INTERFACE_MODE_NA || 545 state->interface == PHY_INTERFACE_MODE_10GBASER)) { 546 phylink_set(mask, 10000baseCR_Full); 547 phylink_set(mask, 10000baseER_Full); 548 phylink_set(mask, 10000baseKR_Full); 549 phylink_set(mask, 10000baseLR_Full); 550 phylink_set(mask, 10000baseLRM_Full); 551 phylink_set(mask, 10000baseSR_Full); 552 phylink_set(mask, 10000baseT_Full); 553 if (state->interface != PHY_INTERFACE_MODE_NA) 554 goto out; 555 } 556 557 phylink_set(mask, 10baseT_Half); 558 phylink_set(mask, 10baseT_Full); 559 phylink_set(mask, 100baseT_Half); 560 phylink_set(mask, 100baseT_Full); 561 562 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && 563 (state->interface == PHY_INTERFACE_MODE_NA || 564 state->interface == PHY_INTERFACE_MODE_GMII || 565 state->interface == PHY_INTERFACE_MODE_SGMII || 566 phy_interface_mode_is_rgmii(state->interface))) { 567 phylink_set(mask, 1000baseT_Full); 568 phylink_set(mask, 1000baseX_Full); 569 570 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) 571 phylink_set(mask, 1000baseT_Half); 572 } 573 out: 574 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 575 bitmap_and(state->advertising, state->advertising, mask, 576 __ETHTOOL_LINK_MODE_MASK_NBITS); 577 } 578 579 static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, 580 phy_interface_t interface, int speed, 581 int duplex) 582 { 583 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); 584 u32 config; 585 586 config = gem_readl(bp, USX_CONTROL); 587 config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); 588 config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); 589 config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); 590 config |= GEM_BIT(TX_EN); 591 gem_writel(bp, USX_CONTROL, config); 592 } 593 594 static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, 595 struct phylink_link_state *state) 596 { 597 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); 598 u32 val; 599 600 state->speed = SPEED_10000; 601 state->duplex = 1; 602 state->an_complete = 1; 603 604 val = gem_readl(bp, USX_STATUS); 605 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); 606 val = gem_readl(bp, NCFGR); 607 if (val & GEM_BIT(PAE)) 608 state->pause = MLO_PAUSE_RX; 609 } 610 611 static int macb_usx_pcs_config(struct phylink_pcs *pcs, 612 unsigned int mode, 613 phy_interface_t interface, 614 const unsigned long *advertising, 615 bool permit_pause_to_mac) 616 { 617 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); 618 619 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | 620 GEM_BIT(SIGNAL_OK)); 621 622 return 0; 623 } 624 625 static void macb_pcs_get_state(struct phylink_pcs *pcs, 626 struct phylink_link_state *state) 627 { 628 state->link = 0; 629 } 630 631 static void macb_pcs_an_restart(struct phylink_pcs *pcs) 632 { 633 /* Not supported */ 634 } 635 636 static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = { 637 .pcs_get_state = macb_usx_pcs_get_state, 638 .pcs_config = macb_usx_pcs_config, 639 .pcs_link_up = macb_usx_pcs_link_up, 640 }; 641 642 static const struct phylink_pcs_ops macb_phylink_pcs_ops = { 643 .pcs_get_state = macb_pcs_get_state, 644 .pcs_an_restart = macb_pcs_an_restart, 645 }; 646 647 static void macb_mac_config(struct phylink_config *config, unsigned int mode, 648 const struct phylink_link_state *state) 649 { 650 struct net_device *ndev = to_net_dev(config->dev); 651 struct macb *bp = netdev_priv(ndev); 652 unsigned long flags; 653 u32 old_ctrl, ctrl; 654 u32 old_ncr, ncr; 655 656 spin_lock_irqsave(&bp->lock, flags); 657 658 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); 659 old_ncr = ncr = macb_or_gem_readl(bp, NCR); 660 661 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { 662 if (state->interface == PHY_INTERFACE_MODE_RMII) 663 ctrl |= MACB_BIT(RM9200_RMII); 664 } else if (macb_is_gem(bp)) { 665 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); 666 ncr &= ~GEM_BIT(ENABLE_HS_MAC); 667 668 if (state->interface == PHY_INTERFACE_MODE_SGMII) { 669 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 670 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { 671 ctrl |= GEM_BIT(PCSSEL); 672 ncr |= GEM_BIT(ENABLE_HS_MAC); 673 } 674 } 675 676 /* Apply the new configuration, if any */ 677 if (old_ctrl ^ ctrl) 678 macb_or_gem_writel(bp, NCFGR, ctrl); 679 680 if (old_ncr ^ ncr) 681 macb_or_gem_writel(bp, NCR, ncr); 682 683 spin_unlock_irqrestore(&bp->lock, flags); 684 } 685 686 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, 687 phy_interface_t interface) 688 { 689 struct net_device *ndev = to_net_dev(config->dev); 690 struct macb *bp = netdev_priv(ndev); 691 struct macb_queue *queue; 692 unsigned int q; 693 u32 ctrl; 694 695 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 696 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 697 queue_writel(queue, IDR, 698 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 699 700 /* Disable Rx and Tx */ 701 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); 702 macb_writel(bp, NCR, ctrl); 703 704 netif_tx_stop_all_queues(ndev); 705 } 706 707 static void macb_mac_link_up(struct phylink_config *config, 708 struct phy_device *phy, 709 unsigned int mode, phy_interface_t interface, 710 int speed, int duplex, 711 bool tx_pause, bool rx_pause) 712 { 713 struct net_device *ndev = to_net_dev(config->dev); 714 struct macb *bp = netdev_priv(ndev); 715 struct macb_queue *queue; 716 unsigned long flags; 717 unsigned int q; 718 u32 ctrl; 719 720 spin_lock_irqsave(&bp->lock, flags); 721 722 ctrl = macb_or_gem_readl(bp, NCFGR); 723 724 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 725 726 if (speed == SPEED_100) 727 ctrl |= MACB_BIT(SPD); 728 729 if (duplex) 730 ctrl |= MACB_BIT(FD); 731 732 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 733 ctrl &= ~MACB_BIT(PAE); 734 if (macb_is_gem(bp)) { 735 ctrl &= ~GEM_BIT(GBE); 736 737 if (speed == SPEED_1000) 738 ctrl |= GEM_BIT(GBE); 739 } 740 741 if (rx_pause) 742 ctrl |= MACB_BIT(PAE); 743 744 macb_set_tx_clk(bp->tx_clk, speed, ndev); 745 746 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down 747 * cleared the pipeline and control registers. 748 */ 749 bp->macbgem_ops.mog_init_rings(bp); 750 macb_init_buffers(bp); 751 752 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 753 queue_writel(queue, IER, 754 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 755 } 756 757 macb_or_gem_writel(bp, NCFGR, ctrl); 758 759 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) 760 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, 761 gem_readl(bp, HS_MAC_CONFIG))); 762 763 spin_unlock_irqrestore(&bp->lock, flags); 764 765 /* Enable Rx and Tx */ 766 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); 767 768 netif_tx_wake_all_queues(ndev); 769 } 770 771 static int macb_mac_prepare(struct phylink_config *config, unsigned int mode, 772 phy_interface_t interface) 773 { 774 struct net_device *ndev = to_net_dev(config->dev); 775 struct macb *bp = netdev_priv(ndev); 776 777 if (interface == PHY_INTERFACE_MODE_10GBASER) 778 bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; 779 else 780 bp->phylink_pcs.ops = &macb_phylink_pcs_ops; 781 782 phylink_set_pcs(bp->phylink, &bp->phylink_pcs); 783 784 return 0; 785 } 786 787 static const struct phylink_mac_ops macb_phylink_ops = { 788 .validate = macb_validate, 789 .mac_prepare = macb_mac_prepare, 790 .mac_config = macb_mac_config, 791 .mac_link_down = macb_mac_link_down, 792 .mac_link_up = macb_mac_link_up, 793 }; 794 795 static bool macb_phy_handle_exists(struct device_node *dn) 796 { 797 dn = of_parse_phandle(dn, "phy-handle", 0); 798 of_node_put(dn); 799 return dn != NULL; 800 } 801 802 static int macb_phylink_connect(struct macb *bp) 803 { 804 struct device_node *dn = bp->pdev->dev.of_node; 805 struct net_device *dev = bp->dev; 806 struct phy_device *phydev; 807 int ret; 808 809 if (dn) 810 ret = phylink_of_phy_connect(bp->phylink, dn, 0); 811 812 if (!dn || (ret && !macb_phy_handle_exists(dn))) { 813 phydev = phy_find_first(bp->mii_bus); 814 if (!phydev) { 815 netdev_err(dev, "no PHY found\n"); 816 return -ENXIO; 817 } 818 819 /* attach the mac to the phy */ 820 ret = phylink_connect_phy(bp->phylink, phydev); 821 } 822 823 if (ret) { 824 netdev_err(dev, "Could not attach PHY (%d)\n", ret); 825 return ret; 826 } 827 828 phylink_start(bp->phylink); 829 830 return 0; 831 } 832 833 /* based on au1000_eth. c*/ 834 static int macb_mii_probe(struct net_device *dev) 835 { 836 struct macb *bp = netdev_priv(dev); 837 838 bp->phylink_config.dev = &dev->dev; 839 bp->phylink_config.type = PHYLINK_NETDEV; 840 841 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, 842 bp->phy_interface, &macb_phylink_ops); 843 if (IS_ERR(bp->phylink)) { 844 netdev_err(dev, "Could not create a phylink instance (%ld)\n", 845 PTR_ERR(bp->phylink)); 846 return PTR_ERR(bp->phylink); 847 } 848 849 return 0; 850 } 851 852 static int macb_mdiobus_register(struct macb *bp) 853 { 854 struct device_node *child, *np = bp->pdev->dev.of_node; 855 856 if (of_phy_is_fixed_link(np)) 857 return mdiobus_register(bp->mii_bus); 858 859 /* Only create the PHY from the device tree if at least one PHY is 860 * described. Otherwise scan the entire MDIO bus. We do this to support 861 * old device tree that did not follow the best practices and did not 862 * describe their network PHYs. 863 */ 864 for_each_available_child_of_node(np, child) 865 if (of_mdiobus_child_is_phy(child)) { 866 /* The loop increments the child refcount, 867 * decrement it before returning. 868 */ 869 of_node_put(child); 870 871 return of_mdiobus_register(bp->mii_bus, np); 872 } 873 874 return mdiobus_register(bp->mii_bus); 875 } 876 877 static int macb_mii_init(struct macb *bp) 878 { 879 int err = -ENXIO; 880 881 /* Enable management port */ 882 macb_writel(bp, NCR, MACB_BIT(MPE)); 883 884 bp->mii_bus = mdiobus_alloc(); 885 if (!bp->mii_bus) { 886 err = -ENOMEM; 887 goto err_out; 888 } 889 890 bp->mii_bus->name = "MACB_mii_bus"; 891 bp->mii_bus->read = &macb_mdio_read; 892 bp->mii_bus->write = &macb_mdio_write; 893 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 894 bp->pdev->name, bp->pdev->id); 895 bp->mii_bus->priv = bp; 896 bp->mii_bus->parent = &bp->pdev->dev; 897 898 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 899 900 err = macb_mdiobus_register(bp); 901 if (err) 902 goto err_out_free_mdiobus; 903 904 err = macb_mii_probe(bp->dev); 905 if (err) 906 goto err_out_unregister_bus; 907 908 return 0; 909 910 err_out_unregister_bus: 911 mdiobus_unregister(bp->mii_bus); 912 err_out_free_mdiobus: 913 mdiobus_free(bp->mii_bus); 914 err_out: 915 return err; 916 } 917 918 static void macb_update_stats(struct macb *bp) 919 { 920 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 921 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 922 int offset = MACB_PFR; 923 924 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 925 926 for (; p < end; p++, offset += 4) 927 *p += bp->macb_reg_readl(bp, offset); 928 } 929 930 static int macb_halt_tx(struct macb *bp) 931 { 932 unsigned long halt_time, timeout; 933 u32 status; 934 935 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); 936 937 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); 938 do { 939 halt_time = jiffies; 940 status = macb_readl(bp, TSR); 941 if (!(status & MACB_BIT(TGO))) 942 return 0; 943 944 udelay(250); 945 } while (time_before(halt_time, timeout)); 946 947 return -ETIMEDOUT; 948 } 949 950 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) 951 { 952 if (tx_skb->mapping) { 953 if (tx_skb->mapped_as_page) 954 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, 955 tx_skb->size, DMA_TO_DEVICE); 956 else 957 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 958 tx_skb->size, DMA_TO_DEVICE); 959 tx_skb->mapping = 0; 960 } 961 962 if (tx_skb->skb) { 963 dev_kfree_skb_any(tx_skb->skb); 964 tx_skb->skb = NULL; 965 } 966 } 967 968 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 969 { 970 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 971 struct macb_dma_desc_64 *desc_64; 972 973 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 974 desc_64 = macb_64b_desc(bp, desc); 975 desc_64->addrh = upper_32_bits(addr); 976 /* The low bits of RX address contain the RX_USED bit, clearing 977 * of which allows packet RX. Make sure the high bits are also 978 * visible to HW at that point. 979 */ 980 dma_wmb(); 981 } 982 #endif 983 desc->addr = lower_32_bits(addr); 984 } 985 986 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 987 { 988 dma_addr_t addr = 0; 989 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 990 struct macb_dma_desc_64 *desc_64; 991 992 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 993 desc_64 = macb_64b_desc(bp, desc); 994 addr = ((u64)(desc_64->addrh) << 32); 995 } 996 #endif 997 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 998 return addr; 999 } 1000 1001 static void macb_tx_error_task(struct work_struct *work) 1002 { 1003 struct macb_queue *queue = container_of(work, struct macb_queue, 1004 tx_error_task); 1005 struct macb *bp = queue->bp; 1006 struct macb_tx_skb *tx_skb; 1007 struct macb_dma_desc *desc; 1008 struct sk_buff *skb; 1009 unsigned int tail; 1010 unsigned long flags; 1011 1012 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 1013 (unsigned int)(queue - bp->queues), 1014 queue->tx_tail, queue->tx_head); 1015 1016 /* Prevent the queue IRQ handlers from running: each of them may call 1017 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 1018 * As explained below, we have to halt the transmission before updating 1019 * TBQP registers so we call netif_tx_stop_all_queues() to notify the 1020 * network engine about the macb/gem being halted. 1021 */ 1022 spin_lock_irqsave(&bp->lock, flags); 1023 1024 /* Make sure nobody is trying to queue up new packets */ 1025 netif_tx_stop_all_queues(bp->dev); 1026 1027 /* Stop transmission now 1028 * (in case we have just queued new packets) 1029 * macb/gem must be halted to write TBQP register 1030 */ 1031 if (macb_halt_tx(bp)) 1032 /* Just complain for now, reinitializing TX path can be good */ 1033 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 1034 1035 /* Treat frames in TX queue including the ones that caused the error. 1036 * Free transmit buffers in upper layer. 1037 */ 1038 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 1039 u32 ctrl; 1040 1041 desc = macb_tx_desc(queue, tail); 1042 ctrl = desc->ctrl; 1043 tx_skb = macb_tx_skb(queue, tail); 1044 skb = tx_skb->skb; 1045 1046 if (ctrl & MACB_BIT(TX_USED)) { 1047 /* skb is set for the last buffer of the frame */ 1048 while (!skb) { 1049 macb_tx_unmap(bp, tx_skb); 1050 tail++; 1051 tx_skb = macb_tx_skb(queue, tail); 1052 skb = tx_skb->skb; 1053 } 1054 1055 /* ctrl still refers to the first buffer descriptor 1056 * since it's the only one written back by the hardware 1057 */ 1058 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { 1059 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", 1060 macb_tx_ring_wrap(bp, tail), 1061 skb->data); 1062 bp->dev->stats.tx_packets++; 1063 queue->stats.tx_packets++; 1064 bp->dev->stats.tx_bytes += skb->len; 1065 queue->stats.tx_bytes += skb->len; 1066 } 1067 } else { 1068 /* "Buffers exhausted mid-frame" errors may only happen 1069 * if the driver is buggy, so complain loudly about 1070 * those. Statistics are updated by hardware. 1071 */ 1072 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 1073 netdev_err(bp->dev, 1074 "BUG: TX buffers exhausted mid-frame\n"); 1075 1076 desc->ctrl = ctrl | MACB_BIT(TX_USED); 1077 } 1078 1079 macb_tx_unmap(bp, tx_skb); 1080 } 1081 1082 /* Set end of TX queue */ 1083 desc = macb_tx_desc(queue, 0); 1084 macb_set_addr(bp, desc, 0); 1085 desc->ctrl = MACB_BIT(TX_USED); 1086 1087 /* Make descriptor updates visible to hardware */ 1088 wmb(); 1089 1090 /* Reinitialize the TX desc queue */ 1091 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); 1092 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1093 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 1094 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); 1095 #endif 1096 /* Make TX ring reflect state of hardware */ 1097 queue->tx_head = 0; 1098 queue->tx_tail = 0; 1099 1100 /* Housework before enabling TX IRQ */ 1101 macb_writel(bp, TSR, macb_readl(bp, TSR)); 1102 queue_writel(queue, IER, MACB_TX_INT_FLAGS); 1103 1104 /* Now we are ready to start transmission again */ 1105 netif_tx_start_all_queues(bp->dev); 1106 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1107 1108 spin_unlock_irqrestore(&bp->lock, flags); 1109 } 1110 1111 static void macb_tx_interrupt(struct macb_queue *queue) 1112 { 1113 unsigned int tail; 1114 unsigned int head; 1115 u32 status; 1116 struct macb *bp = queue->bp; 1117 u16 queue_index = queue - bp->queues; 1118 1119 status = macb_readl(bp, TSR); 1120 macb_writel(bp, TSR, status); 1121 1122 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1123 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 1124 1125 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 1126 (unsigned long)status); 1127 1128 head = queue->tx_head; 1129 for (tail = queue->tx_tail; tail != head; tail++) { 1130 struct macb_tx_skb *tx_skb; 1131 struct sk_buff *skb; 1132 struct macb_dma_desc *desc; 1133 u32 ctrl; 1134 1135 desc = macb_tx_desc(queue, tail); 1136 1137 /* Make hw descriptor updates visible to CPU */ 1138 rmb(); 1139 1140 ctrl = desc->ctrl; 1141 1142 /* TX_USED bit is only set by hardware on the very first buffer 1143 * descriptor of the transmitted frame. 1144 */ 1145 if (!(ctrl & MACB_BIT(TX_USED))) 1146 break; 1147 1148 /* Process all buffers of the current transmitted frame */ 1149 for (;; tail++) { 1150 tx_skb = macb_tx_skb(queue, tail); 1151 skb = tx_skb->skb; 1152 1153 /* First, update TX stats if needed */ 1154 if (skb) { 1155 if (unlikely(skb_shinfo(skb)->tx_flags & 1156 SKBTX_HW_TSTAMP) && 1157 gem_ptp_do_txstamp(queue, skb, desc) == 0) { 1158 /* skb now belongs to timestamp buffer 1159 * and will be removed later 1160 */ 1161 tx_skb->skb = NULL; 1162 } 1163 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 1164 macb_tx_ring_wrap(bp, tail), 1165 skb->data); 1166 bp->dev->stats.tx_packets++; 1167 queue->stats.tx_packets++; 1168 bp->dev->stats.tx_bytes += skb->len; 1169 queue->stats.tx_bytes += skb->len; 1170 } 1171 1172 /* Now we can safely release resources */ 1173 macb_tx_unmap(bp, tx_skb); 1174 1175 /* skb is set only for the last buffer of the frame. 1176 * WARNING: at this point skb has been freed by 1177 * macb_tx_unmap(). 1178 */ 1179 if (skb) 1180 break; 1181 } 1182 } 1183 1184 queue->tx_tail = tail; 1185 if (__netif_subqueue_stopped(bp->dev, queue_index) && 1186 CIRC_CNT(queue->tx_head, queue->tx_tail, 1187 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) 1188 netif_wake_subqueue(bp->dev, queue_index); 1189 } 1190 1191 static void gem_rx_refill(struct macb_queue *queue) 1192 { 1193 unsigned int entry; 1194 struct sk_buff *skb; 1195 dma_addr_t paddr; 1196 struct macb *bp = queue->bp; 1197 struct macb_dma_desc *desc; 1198 1199 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, 1200 bp->rx_ring_size) > 0) { 1201 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); 1202 1203 /* Make hw descriptor updates visible to CPU */ 1204 rmb(); 1205 1206 queue->rx_prepared_head++; 1207 desc = macb_rx_desc(queue, entry); 1208 1209 if (!queue->rx_skbuff[entry]) { 1210 /* allocate sk_buff for this free entry in ring */ 1211 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 1212 if (unlikely(!skb)) { 1213 netdev_err(bp->dev, 1214 "Unable to allocate sk_buff\n"); 1215 break; 1216 } 1217 1218 /* now fill corresponding descriptor entry */ 1219 paddr = dma_map_single(&bp->pdev->dev, skb->data, 1220 bp->rx_buffer_size, 1221 DMA_FROM_DEVICE); 1222 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 1223 dev_kfree_skb(skb); 1224 break; 1225 } 1226 1227 queue->rx_skbuff[entry] = skb; 1228 1229 if (entry == bp->rx_ring_size - 1) 1230 paddr |= MACB_BIT(RX_WRAP); 1231 desc->ctrl = 0; 1232 /* Setting addr clears RX_USED and allows reception, 1233 * make sure ctrl is cleared first to avoid a race. 1234 */ 1235 dma_wmb(); 1236 macb_set_addr(bp, desc, paddr); 1237 1238 /* properly align Ethernet header */ 1239 skb_reserve(skb, NET_IP_ALIGN); 1240 } else { 1241 desc->ctrl = 0; 1242 dma_wmb(); 1243 desc->addr &= ~MACB_BIT(RX_USED); 1244 } 1245 } 1246 1247 /* Make descriptor updates visible to hardware */ 1248 wmb(); 1249 1250 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", 1251 queue, queue->rx_prepared_head, queue->rx_tail); 1252 } 1253 1254 /* Mark DMA descriptors from begin up to and not including end as unused */ 1255 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, 1256 unsigned int end) 1257 { 1258 unsigned int frag; 1259 1260 for (frag = begin; frag != end; frag++) { 1261 struct macb_dma_desc *desc = macb_rx_desc(queue, frag); 1262 1263 desc->addr &= ~MACB_BIT(RX_USED); 1264 } 1265 1266 /* Make descriptor updates visible to hardware */ 1267 wmb(); 1268 1269 /* When this happens, the hardware stats registers for 1270 * whatever caused this is updated, so we don't have to record 1271 * anything. 1272 */ 1273 } 1274 1275 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, 1276 int budget) 1277 { 1278 struct macb *bp = queue->bp; 1279 unsigned int len; 1280 unsigned int entry; 1281 struct sk_buff *skb; 1282 struct macb_dma_desc *desc; 1283 int count = 0; 1284 1285 while (count < budget) { 1286 u32 ctrl; 1287 dma_addr_t addr; 1288 bool rxused; 1289 1290 entry = macb_rx_ring_wrap(bp, queue->rx_tail); 1291 desc = macb_rx_desc(queue, entry); 1292 1293 /* Make hw descriptor updates visible to CPU */ 1294 rmb(); 1295 1296 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1297 addr = macb_get_addr(bp, desc); 1298 1299 if (!rxused) 1300 break; 1301 1302 /* Ensure ctrl is at least as up-to-date as rxused */ 1303 dma_rmb(); 1304 1305 ctrl = desc->ctrl; 1306 1307 queue->rx_tail++; 1308 count++; 1309 1310 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { 1311 netdev_err(bp->dev, 1312 "not whole frame pointed by descriptor\n"); 1313 bp->dev->stats.rx_dropped++; 1314 queue->stats.rx_dropped++; 1315 break; 1316 } 1317 skb = queue->rx_skbuff[entry]; 1318 if (unlikely(!skb)) { 1319 netdev_err(bp->dev, 1320 "inconsistent Rx descriptor chain\n"); 1321 bp->dev->stats.rx_dropped++; 1322 queue->stats.rx_dropped++; 1323 break; 1324 } 1325 /* now everything is ready for receiving packet */ 1326 queue->rx_skbuff[entry] = NULL; 1327 len = ctrl & bp->rx_frm_len_mask; 1328 1329 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); 1330 1331 skb_put(skb, len); 1332 dma_unmap_single(&bp->pdev->dev, addr, 1333 bp->rx_buffer_size, DMA_FROM_DEVICE); 1334 1335 skb->protocol = eth_type_trans(skb, bp->dev); 1336 skb_checksum_none_assert(skb); 1337 if (bp->dev->features & NETIF_F_RXCSUM && 1338 !(bp->dev->flags & IFF_PROMISC) && 1339 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) 1340 skb->ip_summed = CHECKSUM_UNNECESSARY; 1341 1342 bp->dev->stats.rx_packets++; 1343 queue->stats.rx_packets++; 1344 bp->dev->stats.rx_bytes += skb->len; 1345 queue->stats.rx_bytes += skb->len; 1346 1347 gem_ptp_do_rxstamp(bp, skb, desc); 1348 1349 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1350 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1351 skb->len, skb->csum); 1352 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, 1353 skb_mac_header(skb), 16, true); 1354 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, 1355 skb->data, 32, true); 1356 #endif 1357 1358 napi_gro_receive(napi, skb); 1359 } 1360 1361 gem_rx_refill(queue); 1362 1363 return count; 1364 } 1365 1366 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, 1367 unsigned int first_frag, unsigned int last_frag) 1368 { 1369 unsigned int len; 1370 unsigned int frag; 1371 unsigned int offset; 1372 struct sk_buff *skb; 1373 struct macb_dma_desc *desc; 1374 struct macb *bp = queue->bp; 1375 1376 desc = macb_rx_desc(queue, last_frag); 1377 len = desc->ctrl & bp->rx_frm_len_mask; 1378 1379 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 1380 macb_rx_ring_wrap(bp, first_frag), 1381 macb_rx_ring_wrap(bp, last_frag), len); 1382 1383 /* The ethernet header starts NET_IP_ALIGN bytes into the 1384 * first buffer. Since the header is 14 bytes, this makes the 1385 * payload word-aligned. 1386 * 1387 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy 1388 * the two padding bytes into the skb so that we avoid hitting 1389 * the slowpath in memcpy(), and pull them off afterwards. 1390 */ 1391 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); 1392 if (!skb) { 1393 bp->dev->stats.rx_dropped++; 1394 for (frag = first_frag; ; frag++) { 1395 desc = macb_rx_desc(queue, frag); 1396 desc->addr &= ~MACB_BIT(RX_USED); 1397 if (frag == last_frag) 1398 break; 1399 } 1400 1401 /* Make descriptor updates visible to hardware */ 1402 wmb(); 1403 1404 return 1; 1405 } 1406 1407 offset = 0; 1408 len += NET_IP_ALIGN; 1409 skb_checksum_none_assert(skb); 1410 skb_put(skb, len); 1411 1412 for (frag = first_frag; ; frag++) { 1413 unsigned int frag_len = bp->rx_buffer_size; 1414 1415 if (offset + frag_len > len) { 1416 if (unlikely(frag != last_frag)) { 1417 dev_kfree_skb_any(skb); 1418 return -1; 1419 } 1420 frag_len = len - offset; 1421 } 1422 skb_copy_to_linear_data_offset(skb, offset, 1423 macb_rx_buffer(queue, frag), 1424 frag_len); 1425 offset += bp->rx_buffer_size; 1426 desc = macb_rx_desc(queue, frag); 1427 desc->addr &= ~MACB_BIT(RX_USED); 1428 1429 if (frag == last_frag) 1430 break; 1431 } 1432 1433 /* Make descriptor updates visible to hardware */ 1434 wmb(); 1435 1436 __skb_pull(skb, NET_IP_ALIGN); 1437 skb->protocol = eth_type_trans(skb, bp->dev); 1438 1439 bp->dev->stats.rx_packets++; 1440 bp->dev->stats.rx_bytes += skb->len; 1441 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 1442 skb->len, skb->csum); 1443 napi_gro_receive(napi, skb); 1444 1445 return 0; 1446 } 1447 1448 static inline void macb_init_rx_ring(struct macb_queue *queue) 1449 { 1450 struct macb *bp = queue->bp; 1451 dma_addr_t addr; 1452 struct macb_dma_desc *desc = NULL; 1453 int i; 1454 1455 addr = queue->rx_buffers_dma; 1456 for (i = 0; i < bp->rx_ring_size; i++) { 1457 desc = macb_rx_desc(queue, i); 1458 macb_set_addr(bp, desc, addr); 1459 desc->ctrl = 0; 1460 addr += bp->rx_buffer_size; 1461 } 1462 desc->addr |= MACB_BIT(RX_WRAP); 1463 queue->rx_tail = 0; 1464 } 1465 1466 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, 1467 int budget) 1468 { 1469 struct macb *bp = queue->bp; 1470 bool reset_rx_queue = false; 1471 int received = 0; 1472 unsigned int tail; 1473 int first_frag = -1; 1474 1475 for (tail = queue->rx_tail; budget > 0; tail++) { 1476 struct macb_dma_desc *desc = macb_rx_desc(queue, tail); 1477 u32 ctrl; 1478 1479 /* Make hw descriptor updates visible to CPU */ 1480 rmb(); 1481 1482 if (!(desc->addr & MACB_BIT(RX_USED))) 1483 break; 1484 1485 /* Ensure ctrl is at least as up-to-date as addr */ 1486 dma_rmb(); 1487 1488 ctrl = desc->ctrl; 1489 1490 if (ctrl & MACB_BIT(RX_SOF)) { 1491 if (first_frag != -1) 1492 discard_partial_frame(queue, first_frag, tail); 1493 first_frag = tail; 1494 } 1495 1496 if (ctrl & MACB_BIT(RX_EOF)) { 1497 int dropped; 1498 1499 if (unlikely(first_frag == -1)) { 1500 reset_rx_queue = true; 1501 continue; 1502 } 1503 1504 dropped = macb_rx_frame(queue, napi, first_frag, tail); 1505 first_frag = -1; 1506 if (unlikely(dropped < 0)) { 1507 reset_rx_queue = true; 1508 continue; 1509 } 1510 if (!dropped) { 1511 received++; 1512 budget--; 1513 } 1514 } 1515 } 1516 1517 if (unlikely(reset_rx_queue)) { 1518 unsigned long flags; 1519 u32 ctrl; 1520 1521 netdev_err(bp->dev, "RX queue corruption: reset it\n"); 1522 1523 spin_lock_irqsave(&bp->lock, flags); 1524 1525 ctrl = macb_readl(bp, NCR); 1526 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1527 1528 macb_init_rx_ring(queue); 1529 queue_writel(queue, RBQP, queue->rx_ring_dma); 1530 1531 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1532 1533 spin_unlock_irqrestore(&bp->lock, flags); 1534 return received; 1535 } 1536 1537 if (first_frag != -1) 1538 queue->rx_tail = first_frag; 1539 else 1540 queue->rx_tail = tail; 1541 1542 return received; 1543 } 1544 1545 static int macb_poll(struct napi_struct *napi, int budget) 1546 { 1547 struct macb_queue *queue = container_of(napi, struct macb_queue, napi); 1548 struct macb *bp = queue->bp; 1549 int work_done; 1550 u32 status; 1551 1552 status = macb_readl(bp, RSR); 1553 macb_writel(bp, RSR, status); 1554 1555 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1556 (unsigned long)status, budget); 1557 1558 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); 1559 if (work_done < budget) { 1560 napi_complete_done(napi, work_done); 1561 1562 /* Packets received while interrupts were disabled */ 1563 status = macb_readl(bp, RSR); 1564 if (status) { 1565 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1566 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1567 napi_reschedule(napi); 1568 } else { 1569 queue_writel(queue, IER, bp->rx_intr_mask); 1570 } 1571 } 1572 1573 /* TODO: Handle errors */ 1574 1575 return work_done; 1576 } 1577 1578 static void macb_hresp_error_task(struct tasklet_struct *t) 1579 { 1580 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); 1581 struct net_device *dev = bp->dev; 1582 struct macb_queue *queue; 1583 unsigned int q; 1584 u32 ctrl; 1585 1586 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1587 queue_writel(queue, IDR, bp->rx_intr_mask | 1588 MACB_TX_INT_FLAGS | 1589 MACB_BIT(HRESP)); 1590 } 1591 ctrl = macb_readl(bp, NCR); 1592 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 1593 macb_writel(bp, NCR, ctrl); 1594 1595 netif_tx_stop_all_queues(dev); 1596 netif_carrier_off(dev); 1597 1598 bp->macbgem_ops.mog_init_rings(bp); 1599 1600 /* Initialize TX and RX buffers */ 1601 macb_init_buffers(bp); 1602 1603 /* Enable interrupts */ 1604 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1605 queue_writel(queue, IER, 1606 bp->rx_intr_mask | 1607 MACB_TX_INT_FLAGS | 1608 MACB_BIT(HRESP)); 1609 1610 ctrl |= MACB_BIT(RE) | MACB_BIT(TE); 1611 macb_writel(bp, NCR, ctrl); 1612 1613 netif_carrier_on(dev); 1614 netif_tx_start_all_queues(dev); 1615 } 1616 1617 static void macb_tx_restart(struct macb_queue *queue) 1618 { 1619 unsigned int head = queue->tx_head; 1620 unsigned int tail = queue->tx_tail; 1621 struct macb *bp = queue->bp; 1622 1623 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1624 queue_writel(queue, ISR, MACB_BIT(TXUBR)); 1625 1626 if (head == tail) 1627 return; 1628 1629 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1630 } 1631 1632 static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) 1633 { 1634 struct macb_queue *queue = dev_id; 1635 struct macb *bp = queue->bp; 1636 u32 status; 1637 1638 status = queue_readl(queue, ISR); 1639 1640 if (unlikely(!status)) 1641 return IRQ_NONE; 1642 1643 spin_lock(&bp->lock); 1644 1645 if (status & MACB_BIT(WOL)) { 1646 queue_writel(queue, IDR, MACB_BIT(WOL)); 1647 macb_writel(bp, WOL, 0); 1648 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", 1649 (unsigned int)(queue - bp->queues), 1650 (unsigned long)status); 1651 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1652 queue_writel(queue, ISR, MACB_BIT(WOL)); 1653 pm_wakeup_event(&bp->pdev->dev, 0); 1654 } 1655 1656 spin_unlock(&bp->lock); 1657 1658 return IRQ_HANDLED; 1659 } 1660 1661 static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) 1662 { 1663 struct macb_queue *queue = dev_id; 1664 struct macb *bp = queue->bp; 1665 u32 status; 1666 1667 status = queue_readl(queue, ISR); 1668 1669 if (unlikely(!status)) 1670 return IRQ_NONE; 1671 1672 spin_lock(&bp->lock); 1673 1674 if (status & GEM_BIT(WOL)) { 1675 queue_writel(queue, IDR, GEM_BIT(WOL)); 1676 gem_writel(bp, WOL, 0); 1677 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", 1678 (unsigned int)(queue - bp->queues), 1679 (unsigned long)status); 1680 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1681 queue_writel(queue, ISR, GEM_BIT(WOL)); 1682 pm_wakeup_event(&bp->pdev->dev, 0); 1683 } 1684 1685 spin_unlock(&bp->lock); 1686 1687 return IRQ_HANDLED; 1688 } 1689 1690 static irqreturn_t macb_interrupt(int irq, void *dev_id) 1691 { 1692 struct macb_queue *queue = dev_id; 1693 struct macb *bp = queue->bp; 1694 struct net_device *dev = bp->dev; 1695 u32 status, ctrl; 1696 1697 status = queue_readl(queue, ISR); 1698 1699 if (unlikely(!status)) 1700 return IRQ_NONE; 1701 1702 spin_lock(&bp->lock); 1703 1704 while (status) { 1705 /* close possible race with dev_close */ 1706 if (unlikely(!netif_running(dev))) { 1707 queue_writel(queue, IDR, -1); 1708 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1709 queue_writel(queue, ISR, -1); 1710 break; 1711 } 1712 1713 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 1714 (unsigned int)(queue - bp->queues), 1715 (unsigned long)status); 1716 1717 if (status & bp->rx_intr_mask) { 1718 /* There's no point taking any more interrupts 1719 * until we have processed the buffers. The 1720 * scheduling call may fail if the poll routine 1721 * is already scheduled, so disable interrupts 1722 * now. 1723 */ 1724 queue_writel(queue, IDR, bp->rx_intr_mask); 1725 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1726 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1727 1728 if (napi_schedule_prep(&queue->napi)) { 1729 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 1730 __napi_schedule(&queue->napi); 1731 } 1732 } 1733 1734 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1735 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 1736 schedule_work(&queue->tx_error_task); 1737 1738 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1739 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1740 1741 break; 1742 } 1743 1744 if (status & MACB_BIT(TCOMP)) 1745 macb_tx_interrupt(queue); 1746 1747 if (status & MACB_BIT(TXUBR)) 1748 macb_tx_restart(queue); 1749 1750 /* Link change detection isn't possible with RMII, so we'll 1751 * add that if/when we get our hands on a full-blown MII PHY. 1752 */ 1753 1754 /* There is a hardware issue under heavy load where DMA can 1755 * stop, this causes endless "used buffer descriptor read" 1756 * interrupts but it can be cleared by re-enabling RX. See 1757 * the at91rm9200 manual, section 41.3.1 or the Zynq manual 1758 * section 16.7.4 for details. RXUBR is only enabled for 1759 * these two versions. 1760 */ 1761 if (status & MACB_BIT(RXUBR)) { 1762 ctrl = macb_readl(bp, NCR); 1763 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1764 wmb(); 1765 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1766 1767 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1768 queue_writel(queue, ISR, MACB_BIT(RXUBR)); 1769 } 1770 1771 if (status & MACB_BIT(ISR_ROVR)) { 1772 /* We missed at least one packet */ 1773 if (macb_is_gem(bp)) 1774 bp->hw_stats.gem.rx_overruns++; 1775 else 1776 bp->hw_stats.macb.rx_overruns++; 1777 1778 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1779 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1780 } 1781 1782 if (status & MACB_BIT(HRESP)) { 1783 tasklet_schedule(&bp->hresp_err_tasklet); 1784 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1785 1786 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1787 queue_writel(queue, ISR, MACB_BIT(HRESP)); 1788 } 1789 status = queue_readl(queue, ISR); 1790 } 1791 1792 spin_unlock(&bp->lock); 1793 1794 return IRQ_HANDLED; 1795 } 1796 1797 #ifdef CONFIG_NET_POLL_CONTROLLER 1798 /* Polling receive - used by netconsole and other diagnostic tools 1799 * to allow network i/o with interrupts disabled. 1800 */ 1801 static void macb_poll_controller(struct net_device *dev) 1802 { 1803 struct macb *bp = netdev_priv(dev); 1804 struct macb_queue *queue; 1805 unsigned long flags; 1806 unsigned int q; 1807 1808 local_irq_save(flags); 1809 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1810 macb_interrupt(dev->irq, queue); 1811 local_irq_restore(flags); 1812 } 1813 #endif 1814 1815 static unsigned int macb_tx_map(struct macb *bp, 1816 struct macb_queue *queue, 1817 struct sk_buff *skb, 1818 unsigned int hdrlen) 1819 { 1820 dma_addr_t mapping; 1821 unsigned int len, entry, i, tx_head = queue->tx_head; 1822 struct macb_tx_skb *tx_skb = NULL; 1823 struct macb_dma_desc *desc; 1824 unsigned int offset, size, count = 0; 1825 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1826 unsigned int eof = 1, mss_mfs = 0; 1827 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 1828 1829 /* LSO */ 1830 if (skb_shinfo(skb)->gso_size != 0) { 1831 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1832 /* UDP - UFO */ 1833 lso_ctrl = MACB_LSO_UFO_ENABLE; 1834 else 1835 /* TCP - TSO */ 1836 lso_ctrl = MACB_LSO_TSO_ENABLE; 1837 } 1838 1839 /* First, map non-paged data */ 1840 len = skb_headlen(skb); 1841 1842 /* first buffer length */ 1843 size = hdrlen; 1844 1845 offset = 0; 1846 while (len) { 1847 entry = macb_tx_ring_wrap(bp, tx_head); 1848 tx_skb = &queue->tx_skb[entry]; 1849 1850 mapping = dma_map_single(&bp->pdev->dev, 1851 skb->data + offset, 1852 size, DMA_TO_DEVICE); 1853 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1854 goto dma_error; 1855 1856 /* Save info to properly release resources */ 1857 tx_skb->skb = NULL; 1858 tx_skb->mapping = mapping; 1859 tx_skb->size = size; 1860 tx_skb->mapped_as_page = false; 1861 1862 len -= size; 1863 offset += size; 1864 count++; 1865 tx_head++; 1866 1867 size = min(len, bp->max_tx_length); 1868 } 1869 1870 /* Then, map paged data from fragments */ 1871 for (f = 0; f < nr_frags; f++) { 1872 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 1873 1874 len = skb_frag_size(frag); 1875 offset = 0; 1876 while (len) { 1877 size = min(len, bp->max_tx_length); 1878 entry = macb_tx_ring_wrap(bp, tx_head); 1879 tx_skb = &queue->tx_skb[entry]; 1880 1881 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1882 offset, size, DMA_TO_DEVICE); 1883 if (dma_mapping_error(&bp->pdev->dev, mapping)) 1884 goto dma_error; 1885 1886 /* Save info to properly release resources */ 1887 tx_skb->skb = NULL; 1888 tx_skb->mapping = mapping; 1889 tx_skb->size = size; 1890 tx_skb->mapped_as_page = true; 1891 1892 len -= size; 1893 offset += size; 1894 count++; 1895 tx_head++; 1896 } 1897 } 1898 1899 /* Should never happen */ 1900 if (unlikely(!tx_skb)) { 1901 netdev_err(bp->dev, "BUG! empty skb!\n"); 1902 return 0; 1903 } 1904 1905 /* This is the last buffer of the frame: save socket buffer */ 1906 tx_skb->skb = skb; 1907 1908 /* Update TX ring: update buffer descriptors in reverse order 1909 * to avoid race condition 1910 */ 1911 1912 /* Set 'TX_USED' bit in buffer descriptor at tx_head position 1913 * to set the end of TX queue 1914 */ 1915 i = tx_head; 1916 entry = macb_tx_ring_wrap(bp, i); 1917 ctrl = MACB_BIT(TX_USED); 1918 desc = macb_tx_desc(queue, entry); 1919 desc->ctrl = ctrl; 1920 1921 if (lso_ctrl) { 1922 if (lso_ctrl == MACB_LSO_UFO_ENABLE) 1923 /* include header and FCS in value given to h/w */ 1924 mss_mfs = skb_shinfo(skb)->gso_size + 1925 skb_transport_offset(skb) + 1926 ETH_FCS_LEN; 1927 else /* TSO */ { 1928 mss_mfs = skb_shinfo(skb)->gso_size; 1929 /* TCP Sequence Number Source Select 1930 * can be set only for TSO 1931 */ 1932 seq_ctrl = 0; 1933 } 1934 } 1935 1936 do { 1937 i--; 1938 entry = macb_tx_ring_wrap(bp, i); 1939 tx_skb = &queue->tx_skb[entry]; 1940 desc = macb_tx_desc(queue, entry); 1941 1942 ctrl = (u32)tx_skb->size; 1943 if (eof) { 1944 ctrl |= MACB_BIT(TX_LAST); 1945 eof = 0; 1946 } 1947 if (unlikely(entry == (bp->tx_ring_size - 1))) 1948 ctrl |= MACB_BIT(TX_WRAP); 1949 1950 /* First descriptor is header descriptor */ 1951 if (i == queue->tx_head) { 1952 ctrl |= MACB_BF(TX_LSO, lso_ctrl); 1953 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); 1954 if ((bp->dev->features & NETIF_F_HW_CSUM) && 1955 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) 1956 ctrl |= MACB_BIT(TX_NOCRC); 1957 } else 1958 /* Only set MSS/MFS on payload descriptors 1959 * (second or later descriptor) 1960 */ 1961 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1962 1963 /* Set TX buffer descriptor */ 1964 macb_set_addr(bp, desc, tx_skb->mapping); 1965 /* desc->addr must be visible to hardware before clearing 1966 * 'TX_USED' bit in desc->ctrl. 1967 */ 1968 wmb(); 1969 desc->ctrl = ctrl; 1970 } while (i != queue->tx_head); 1971 1972 queue->tx_head = tx_head; 1973 1974 return count; 1975 1976 dma_error: 1977 netdev_err(bp->dev, "TX DMA map failed\n"); 1978 1979 for (i = queue->tx_head; i != tx_head; i++) { 1980 tx_skb = macb_tx_skb(queue, i); 1981 1982 macb_tx_unmap(bp, tx_skb); 1983 } 1984 1985 return 0; 1986 } 1987 1988 static netdev_features_t macb_features_check(struct sk_buff *skb, 1989 struct net_device *dev, 1990 netdev_features_t features) 1991 { 1992 unsigned int nr_frags, f; 1993 unsigned int hdrlen; 1994 1995 /* Validate LSO compatibility */ 1996 1997 /* there is only one buffer or protocol is not UDP */ 1998 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1999 return features; 2000 2001 /* length of header */ 2002 hdrlen = skb_transport_offset(skb); 2003 2004 /* For UFO only: 2005 * When software supplies two or more payload buffers all payload buffers 2006 * apart from the last must be a multiple of 8 bytes in size. 2007 */ 2008 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) 2009 return features & ~MACB_NETIF_LSO; 2010 2011 nr_frags = skb_shinfo(skb)->nr_frags; 2012 /* No need to check last fragment */ 2013 nr_frags--; 2014 for (f = 0; f < nr_frags; f++) { 2015 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 2016 2017 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) 2018 return features & ~MACB_NETIF_LSO; 2019 } 2020 return features; 2021 } 2022 2023 static inline int macb_clear_csum(struct sk_buff *skb) 2024 { 2025 /* no change for packets without checksum offloading */ 2026 if (skb->ip_summed != CHECKSUM_PARTIAL) 2027 return 0; 2028 2029 /* make sure we can modify the header */ 2030 if (unlikely(skb_cow_head(skb, 0))) 2031 return -1; 2032 2033 /* initialize checksum field 2034 * This is required - at least for Zynq, which otherwise calculates 2035 * wrong UDP header checksums for UDP packets with UDP data len <=2 2036 */ 2037 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; 2038 return 0; 2039 } 2040 2041 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) 2042 { 2043 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); 2044 int padlen = ETH_ZLEN - (*skb)->len; 2045 int headroom = skb_headroom(*skb); 2046 int tailroom = skb_tailroom(*skb); 2047 struct sk_buff *nskb; 2048 u32 fcs; 2049 2050 if (!(ndev->features & NETIF_F_HW_CSUM) || 2051 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || 2052 skb_shinfo(*skb)->gso_size) /* Not available for GSO */ 2053 return 0; 2054 2055 if (padlen <= 0) { 2056 /* FCS could be appeded to tailroom. */ 2057 if (tailroom >= ETH_FCS_LEN) 2058 goto add_fcs; 2059 /* FCS could be appeded by moving data to headroom. */ 2060 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 2061 padlen = 0; 2062 /* No room for FCS, need to reallocate skb. */ 2063 else 2064 padlen = ETH_FCS_LEN; 2065 } else { 2066 /* Add room for FCS. */ 2067 padlen += ETH_FCS_LEN; 2068 } 2069 2070 if (!cloned && headroom + tailroom >= padlen) { 2071 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 2072 skb_set_tail_pointer(*skb, (*skb)->len); 2073 } else { 2074 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 2075 if (!nskb) 2076 return -ENOMEM; 2077 2078 dev_consume_skb_any(*skb); 2079 *skb = nskb; 2080 } 2081 2082 if (padlen > ETH_FCS_LEN) 2083 skb_put_zero(*skb, padlen - ETH_FCS_LEN); 2084 2085 add_fcs: 2086 /* set FCS to packet */ 2087 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); 2088 fcs = ~fcs; 2089 2090 skb_put_u8(*skb, fcs & 0xff); 2091 skb_put_u8(*skb, (fcs >> 8) & 0xff); 2092 skb_put_u8(*skb, (fcs >> 16) & 0xff); 2093 skb_put_u8(*skb, (fcs >> 24) & 0xff); 2094 2095 return 0; 2096 } 2097 2098 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 2099 { 2100 u16 queue_index = skb_get_queue_mapping(skb); 2101 struct macb *bp = netdev_priv(dev); 2102 struct macb_queue *queue = &bp->queues[queue_index]; 2103 unsigned long flags; 2104 unsigned int desc_cnt, nr_frags, frag_size, f; 2105 unsigned int hdrlen; 2106 bool is_lso; 2107 netdev_tx_t ret = NETDEV_TX_OK; 2108 2109 if (macb_clear_csum(skb)) { 2110 dev_kfree_skb_any(skb); 2111 return ret; 2112 } 2113 2114 if (macb_pad_and_fcs(&skb, dev)) { 2115 dev_kfree_skb_any(skb); 2116 return ret; 2117 } 2118 2119 is_lso = (skb_shinfo(skb)->gso_size != 0); 2120 2121 if (is_lso) { 2122 /* length of headers */ 2123 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 2124 /* only queue eth + ip headers separately for UDP */ 2125 hdrlen = skb_transport_offset(skb); 2126 else 2127 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 2128 if (skb_headlen(skb) < hdrlen) { 2129 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); 2130 /* if this is required, would need to copy to single buffer */ 2131 return NETDEV_TX_BUSY; 2132 } 2133 } else 2134 hdrlen = min(skb_headlen(skb), bp->max_tx_length); 2135 2136 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 2137 netdev_vdbg(bp->dev, 2138 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 2139 queue_index, skb->len, skb->head, skb->data, 2140 skb_tail_pointer(skb), skb_end_pointer(skb)); 2141 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 2142 skb->data, 16, true); 2143 #endif 2144 2145 /* Count how many TX buffer descriptors are needed to send this 2146 * socket buffer: skb fragments of jumbo frames may need to be 2147 * split into many buffer descriptors. 2148 */ 2149 if (is_lso && (skb_headlen(skb) > hdrlen)) 2150 /* extra header descriptor if also payload in first buffer */ 2151 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; 2152 else 2153 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 2154 nr_frags = skb_shinfo(skb)->nr_frags; 2155 for (f = 0; f < nr_frags; f++) { 2156 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 2157 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); 2158 } 2159 2160 spin_lock_irqsave(&bp->lock, flags); 2161 2162 /* This is a hard error, log it. */ 2163 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, 2164 bp->tx_ring_size) < desc_cnt) { 2165 netif_stop_subqueue(dev, queue_index); 2166 spin_unlock_irqrestore(&bp->lock, flags); 2167 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 2168 queue->tx_head, queue->tx_tail); 2169 return NETDEV_TX_BUSY; 2170 } 2171 2172 /* Map socket buffer for DMA transfer */ 2173 if (!macb_tx_map(bp, queue, skb, hdrlen)) { 2174 dev_kfree_skb_any(skb); 2175 goto unlock; 2176 } 2177 2178 /* Make newly initialized descriptor visible to hardware */ 2179 wmb(); 2180 skb_tx_timestamp(skb); 2181 2182 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 2183 2184 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) 2185 netif_stop_subqueue(dev, queue_index); 2186 2187 unlock: 2188 spin_unlock_irqrestore(&bp->lock, flags); 2189 2190 return ret; 2191 } 2192 2193 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) 2194 { 2195 if (!macb_is_gem(bp)) { 2196 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; 2197 } else { 2198 bp->rx_buffer_size = size; 2199 2200 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 2201 netdev_dbg(bp->dev, 2202 "RX buffer must be multiple of %d bytes, expanding\n", 2203 RX_BUFFER_MULTIPLE); 2204 bp->rx_buffer_size = 2205 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 2206 } 2207 } 2208 2209 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", 2210 bp->dev->mtu, bp->rx_buffer_size); 2211 } 2212 2213 static void gem_free_rx_buffers(struct macb *bp) 2214 { 2215 struct sk_buff *skb; 2216 struct macb_dma_desc *desc; 2217 struct macb_queue *queue; 2218 dma_addr_t addr; 2219 unsigned int q; 2220 int i; 2221 2222 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2223 if (!queue->rx_skbuff) 2224 continue; 2225 2226 for (i = 0; i < bp->rx_ring_size; i++) { 2227 skb = queue->rx_skbuff[i]; 2228 2229 if (!skb) 2230 continue; 2231 2232 desc = macb_rx_desc(queue, i); 2233 addr = macb_get_addr(bp, desc); 2234 2235 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 2236 DMA_FROM_DEVICE); 2237 dev_kfree_skb_any(skb); 2238 skb = NULL; 2239 } 2240 2241 kfree(queue->rx_skbuff); 2242 queue->rx_skbuff = NULL; 2243 } 2244 } 2245 2246 static void macb_free_rx_buffers(struct macb *bp) 2247 { 2248 struct macb_queue *queue = &bp->queues[0]; 2249 2250 if (queue->rx_buffers) { 2251 dma_free_coherent(&bp->pdev->dev, 2252 bp->rx_ring_size * bp->rx_buffer_size, 2253 queue->rx_buffers, queue->rx_buffers_dma); 2254 queue->rx_buffers = NULL; 2255 } 2256 } 2257 2258 static void macb_free_consistent(struct macb *bp) 2259 { 2260 struct macb_queue *queue; 2261 unsigned int q; 2262 int size; 2263 2264 bp->macbgem_ops.mog_free_rx_buffers(bp); 2265 2266 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2267 kfree(queue->tx_skb); 2268 queue->tx_skb = NULL; 2269 if (queue->tx_ring) { 2270 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2271 dma_free_coherent(&bp->pdev->dev, size, 2272 queue->tx_ring, queue->tx_ring_dma); 2273 queue->tx_ring = NULL; 2274 } 2275 if (queue->rx_ring) { 2276 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2277 dma_free_coherent(&bp->pdev->dev, size, 2278 queue->rx_ring, queue->rx_ring_dma); 2279 queue->rx_ring = NULL; 2280 } 2281 } 2282 } 2283 2284 static int gem_alloc_rx_buffers(struct macb *bp) 2285 { 2286 struct macb_queue *queue; 2287 unsigned int q; 2288 int size; 2289 2290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2291 size = bp->rx_ring_size * sizeof(struct sk_buff *); 2292 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); 2293 if (!queue->rx_skbuff) 2294 return -ENOMEM; 2295 else 2296 netdev_dbg(bp->dev, 2297 "Allocated %d RX struct sk_buff entries at %p\n", 2298 bp->rx_ring_size, queue->rx_skbuff); 2299 } 2300 return 0; 2301 } 2302 2303 static int macb_alloc_rx_buffers(struct macb *bp) 2304 { 2305 struct macb_queue *queue = &bp->queues[0]; 2306 int size; 2307 2308 size = bp->rx_ring_size * bp->rx_buffer_size; 2309 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 2310 &queue->rx_buffers_dma, GFP_KERNEL); 2311 if (!queue->rx_buffers) 2312 return -ENOMEM; 2313 2314 netdev_dbg(bp->dev, 2315 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 2316 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); 2317 return 0; 2318 } 2319 2320 static int macb_alloc_consistent(struct macb *bp) 2321 { 2322 struct macb_queue *queue; 2323 unsigned int q; 2324 int size; 2325 2326 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2327 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; 2328 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2329 &queue->tx_ring_dma, 2330 GFP_KERNEL); 2331 if (!queue->tx_ring) 2332 goto out_err; 2333 netdev_dbg(bp->dev, 2334 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 2335 q, size, (unsigned long)queue->tx_ring_dma, 2336 queue->tx_ring); 2337 2338 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); 2339 queue->tx_skb = kmalloc(size, GFP_KERNEL); 2340 if (!queue->tx_skb) 2341 goto out_err; 2342 2343 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; 2344 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 2345 &queue->rx_ring_dma, GFP_KERNEL); 2346 if (!queue->rx_ring) 2347 goto out_err; 2348 netdev_dbg(bp->dev, 2349 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 2350 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); 2351 } 2352 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 2353 goto out_err; 2354 2355 return 0; 2356 2357 out_err: 2358 macb_free_consistent(bp); 2359 return -ENOMEM; 2360 } 2361 2362 static void gem_init_rings(struct macb *bp) 2363 { 2364 struct macb_queue *queue; 2365 struct macb_dma_desc *desc = NULL; 2366 unsigned int q; 2367 int i; 2368 2369 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2370 for (i = 0; i < bp->tx_ring_size; i++) { 2371 desc = macb_tx_desc(queue, i); 2372 macb_set_addr(bp, desc, 0); 2373 desc->ctrl = MACB_BIT(TX_USED); 2374 } 2375 desc->ctrl |= MACB_BIT(TX_WRAP); 2376 queue->tx_head = 0; 2377 queue->tx_tail = 0; 2378 2379 queue->rx_tail = 0; 2380 queue->rx_prepared_head = 0; 2381 2382 gem_rx_refill(queue); 2383 } 2384 2385 } 2386 2387 static void macb_init_rings(struct macb *bp) 2388 { 2389 int i; 2390 struct macb_dma_desc *desc = NULL; 2391 2392 macb_init_rx_ring(&bp->queues[0]); 2393 2394 for (i = 0; i < bp->tx_ring_size; i++) { 2395 desc = macb_tx_desc(&bp->queues[0], i); 2396 macb_set_addr(bp, desc, 0); 2397 desc->ctrl = MACB_BIT(TX_USED); 2398 } 2399 bp->queues[0].tx_head = 0; 2400 bp->queues[0].tx_tail = 0; 2401 desc->ctrl |= MACB_BIT(TX_WRAP); 2402 } 2403 2404 static void macb_reset_hw(struct macb *bp) 2405 { 2406 struct macb_queue *queue; 2407 unsigned int q; 2408 u32 ctrl = macb_readl(bp, NCR); 2409 2410 /* Disable RX and TX (XXX: Should we halt the transmission 2411 * more gracefully?) 2412 */ 2413 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); 2414 2415 /* Clear the stats registers (XXX: Update stats first?) */ 2416 ctrl |= MACB_BIT(CLRSTAT); 2417 2418 macb_writel(bp, NCR, ctrl); 2419 2420 /* Clear all status flags */ 2421 macb_writel(bp, TSR, -1); 2422 macb_writel(bp, RSR, -1); 2423 2424 /* Disable all interrupts */ 2425 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2426 queue_writel(queue, IDR, -1); 2427 queue_readl(queue, ISR); 2428 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 2429 queue_writel(queue, ISR, -1); 2430 } 2431 } 2432 2433 static u32 gem_mdc_clk_div(struct macb *bp) 2434 { 2435 u32 config; 2436 unsigned long pclk_hz = clk_get_rate(bp->pclk); 2437 2438 if (pclk_hz <= 20000000) 2439 config = GEM_BF(CLK, GEM_CLK_DIV8); 2440 else if (pclk_hz <= 40000000) 2441 config = GEM_BF(CLK, GEM_CLK_DIV16); 2442 else if (pclk_hz <= 80000000) 2443 config = GEM_BF(CLK, GEM_CLK_DIV32); 2444 else if (pclk_hz <= 120000000) 2445 config = GEM_BF(CLK, GEM_CLK_DIV48); 2446 else if (pclk_hz <= 160000000) 2447 config = GEM_BF(CLK, GEM_CLK_DIV64); 2448 else 2449 config = GEM_BF(CLK, GEM_CLK_DIV96); 2450 2451 return config; 2452 } 2453 2454 static u32 macb_mdc_clk_div(struct macb *bp) 2455 { 2456 u32 config; 2457 unsigned long pclk_hz; 2458 2459 if (macb_is_gem(bp)) 2460 return gem_mdc_clk_div(bp); 2461 2462 pclk_hz = clk_get_rate(bp->pclk); 2463 if (pclk_hz <= 20000000) 2464 config = MACB_BF(CLK, MACB_CLK_DIV8); 2465 else if (pclk_hz <= 40000000) 2466 config = MACB_BF(CLK, MACB_CLK_DIV16); 2467 else if (pclk_hz <= 80000000) 2468 config = MACB_BF(CLK, MACB_CLK_DIV32); 2469 else 2470 config = MACB_BF(CLK, MACB_CLK_DIV64); 2471 2472 return config; 2473 } 2474 2475 /* Get the DMA bus width field of the network configuration register that we 2476 * should program. We find the width from decoding the design configuration 2477 * register to find the maximum supported data bus width. 2478 */ 2479 static u32 macb_dbw(struct macb *bp) 2480 { 2481 if (!macb_is_gem(bp)) 2482 return 0; 2483 2484 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { 2485 case 4: 2486 return GEM_BF(DBW, GEM_DBW128); 2487 case 2: 2488 return GEM_BF(DBW, GEM_DBW64); 2489 case 1: 2490 default: 2491 return GEM_BF(DBW, GEM_DBW32); 2492 } 2493 } 2494 2495 /* Configure the receive DMA engine 2496 * - use the correct receive buffer size 2497 * - set best burst length for DMA operations 2498 * (if not supported by FIFO, it will fallback to default) 2499 * - set both rx/tx packet buffers to full memory size 2500 * These are configurable parameters for GEM. 2501 */ 2502 static void macb_configure_dma(struct macb *bp) 2503 { 2504 struct macb_queue *queue; 2505 u32 buffer_size; 2506 unsigned int q; 2507 u32 dmacfg; 2508 2509 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; 2510 if (macb_is_gem(bp)) { 2511 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 2512 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2513 if (q) 2514 queue_writel(queue, RBQS, buffer_size); 2515 else 2516 dmacfg |= GEM_BF(RXBS, buffer_size); 2517 } 2518 if (bp->dma_burst_length) 2519 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); 2520 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 2521 dmacfg &= ~GEM_BIT(ENDIA_PKT); 2522 2523 if (bp->native_io) 2524 dmacfg &= ~GEM_BIT(ENDIA_DESC); 2525 else 2526 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 2527 2528 if (bp->dev->features & NETIF_F_HW_CSUM) 2529 dmacfg |= GEM_BIT(TXCOEN); 2530 else 2531 dmacfg &= ~GEM_BIT(TXCOEN); 2532 2533 dmacfg &= ~GEM_BIT(ADDR64); 2534 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2535 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2536 dmacfg |= GEM_BIT(ADDR64); 2537 #endif 2538 #ifdef CONFIG_MACB_USE_HWSTAMP 2539 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2540 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2541 #endif 2542 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2543 dmacfg); 2544 gem_writel(bp, DMACFG, dmacfg); 2545 } 2546 } 2547 2548 static void macb_init_hw(struct macb *bp) 2549 { 2550 u32 config; 2551 2552 macb_reset_hw(bp); 2553 macb_set_hwaddr(bp); 2554 2555 config = macb_mdc_clk_div(bp); 2556 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 2557 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 2558 if (bp->caps & MACB_CAPS_JUMBO) 2559 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ 2560 else 2561 config |= MACB_BIT(BIG); /* Receive oversized frames */ 2562 if (bp->dev->flags & IFF_PROMISC) 2563 config |= MACB_BIT(CAF); /* Copy All Frames */ 2564 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) 2565 config |= GEM_BIT(RXCOEN); 2566 if (!(bp->dev->flags & IFF_BROADCAST)) 2567 config |= MACB_BIT(NBC); /* No BroadCast */ 2568 config |= macb_dbw(bp); 2569 macb_writel(bp, NCFGR, config); 2570 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) 2571 gem_writel(bp, JML, bp->jumbo_max_len); 2572 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; 2573 if (bp->caps & MACB_CAPS_JUMBO) 2574 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; 2575 2576 macb_configure_dma(bp); 2577 } 2578 2579 /* The hash address register is 64 bits long and takes up two 2580 * locations in the memory map. The least significant bits are stored 2581 * in EMAC_HSL and the most significant bits in EMAC_HSH. 2582 * 2583 * The unicast hash enable and the multicast hash enable bits in the 2584 * network configuration register enable the reception of hash matched 2585 * frames. The destination address is reduced to a 6 bit index into 2586 * the 64 bit hash register using the following hash function. The 2587 * hash function is an exclusive or of every sixth bit of the 2588 * destination address. 2589 * 2590 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 2591 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 2592 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 2593 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 2594 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 2595 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 2596 * 2597 * da[0] represents the least significant bit of the first byte 2598 * received, that is, the multicast/unicast indicator, and da[47] 2599 * represents the most significant bit of the last byte received. If 2600 * the hash index, hi[n], points to a bit that is set in the hash 2601 * register then the frame will be matched according to whether the 2602 * frame is multicast or unicast. A multicast match will be signalled 2603 * if the multicast hash enable bit is set, da[0] is 1 and the hash 2604 * index points to a bit set in the hash register. A unicast match 2605 * will be signalled if the unicast hash enable bit is set, da[0] is 0 2606 * and the hash index points to a bit set in the hash register. To 2607 * receive all multicast frames, the hash register should be set with 2608 * all ones and the multicast hash enable bit should be set in the 2609 * network configuration register. 2610 */ 2611 2612 static inline int hash_bit_value(int bitnr, __u8 *addr) 2613 { 2614 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 2615 return 1; 2616 return 0; 2617 } 2618 2619 /* Return the hash index value for the specified address. */ 2620 static int hash_get_index(__u8 *addr) 2621 { 2622 int i, j, bitval; 2623 int hash_index = 0; 2624 2625 for (j = 0; j < 6; j++) { 2626 for (i = 0, bitval = 0; i < 8; i++) 2627 bitval ^= hash_bit_value(i * 6 + j, addr); 2628 2629 hash_index |= (bitval << j); 2630 } 2631 2632 return hash_index; 2633 } 2634 2635 /* Add multicast addresses to the internal multicast-hash table. */ 2636 static void macb_sethashtable(struct net_device *dev) 2637 { 2638 struct netdev_hw_addr *ha; 2639 unsigned long mc_filter[2]; 2640 unsigned int bitnr; 2641 struct macb *bp = netdev_priv(dev); 2642 2643 mc_filter[0] = 0; 2644 mc_filter[1] = 0; 2645 2646 netdev_for_each_mc_addr(ha, dev) { 2647 bitnr = hash_get_index(ha->addr); 2648 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 2649 } 2650 2651 macb_or_gem_writel(bp, HRB, mc_filter[0]); 2652 macb_or_gem_writel(bp, HRT, mc_filter[1]); 2653 } 2654 2655 /* Enable/Disable promiscuous and multicast modes. */ 2656 static void macb_set_rx_mode(struct net_device *dev) 2657 { 2658 unsigned long cfg; 2659 struct macb *bp = netdev_priv(dev); 2660 2661 cfg = macb_readl(bp, NCFGR); 2662 2663 if (dev->flags & IFF_PROMISC) { 2664 /* Enable promiscuous mode */ 2665 cfg |= MACB_BIT(CAF); 2666 2667 /* Disable RX checksum offload */ 2668 if (macb_is_gem(bp)) 2669 cfg &= ~GEM_BIT(RXCOEN); 2670 } else { 2671 /* Disable promiscuous mode */ 2672 cfg &= ~MACB_BIT(CAF); 2673 2674 /* Enable RX checksum offload only if requested */ 2675 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) 2676 cfg |= GEM_BIT(RXCOEN); 2677 } 2678 2679 if (dev->flags & IFF_ALLMULTI) { 2680 /* Enable all multicast mode */ 2681 macb_or_gem_writel(bp, HRB, -1); 2682 macb_or_gem_writel(bp, HRT, -1); 2683 cfg |= MACB_BIT(NCFGR_MTI); 2684 } else if (!netdev_mc_empty(dev)) { 2685 /* Enable specific multicasts */ 2686 macb_sethashtable(dev); 2687 cfg |= MACB_BIT(NCFGR_MTI); 2688 } else if (dev->flags & (~IFF_ALLMULTI)) { 2689 /* Disable all multicast mode */ 2690 macb_or_gem_writel(bp, HRB, 0); 2691 macb_or_gem_writel(bp, HRT, 0); 2692 cfg &= ~MACB_BIT(NCFGR_MTI); 2693 } 2694 2695 macb_writel(bp, NCFGR, cfg); 2696 } 2697 2698 static int macb_open(struct net_device *dev) 2699 { 2700 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; 2701 struct macb *bp = netdev_priv(dev); 2702 struct macb_queue *queue; 2703 unsigned int q; 2704 int err; 2705 2706 netdev_dbg(bp->dev, "open\n"); 2707 2708 err = pm_runtime_get_sync(&bp->pdev->dev); 2709 if (err < 0) 2710 goto pm_exit; 2711 2712 /* RX buffers initialization */ 2713 macb_init_rx_buffer_size(bp, bufsz); 2714 2715 err = macb_alloc_consistent(bp); 2716 if (err) { 2717 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 2718 err); 2719 goto pm_exit; 2720 } 2721 2722 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2723 napi_enable(&queue->napi); 2724 2725 macb_init_hw(bp); 2726 2727 err = macb_phylink_connect(bp); 2728 if (err) 2729 goto reset_hw; 2730 2731 netif_tx_start_all_queues(dev); 2732 2733 if (bp->ptp_info) 2734 bp->ptp_info->ptp_init(dev); 2735 2736 return 0; 2737 2738 reset_hw: 2739 macb_reset_hw(bp); 2740 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2741 napi_disable(&queue->napi); 2742 macb_free_consistent(bp); 2743 pm_exit: 2744 pm_runtime_put_sync(&bp->pdev->dev); 2745 return err; 2746 } 2747 2748 static int macb_close(struct net_device *dev) 2749 { 2750 struct macb *bp = netdev_priv(dev); 2751 struct macb_queue *queue; 2752 unsigned long flags; 2753 unsigned int q; 2754 2755 netif_tx_stop_all_queues(dev); 2756 2757 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2758 napi_disable(&queue->napi); 2759 2760 phylink_stop(bp->phylink); 2761 phylink_disconnect_phy(bp->phylink); 2762 2763 spin_lock_irqsave(&bp->lock, flags); 2764 macb_reset_hw(bp); 2765 netif_carrier_off(dev); 2766 spin_unlock_irqrestore(&bp->lock, flags); 2767 2768 macb_free_consistent(bp); 2769 2770 if (bp->ptp_info) 2771 bp->ptp_info->ptp_remove(dev); 2772 2773 pm_runtime_put(&bp->pdev->dev); 2774 2775 return 0; 2776 } 2777 2778 static int macb_change_mtu(struct net_device *dev, int new_mtu) 2779 { 2780 if (netif_running(dev)) 2781 return -EBUSY; 2782 2783 dev->mtu = new_mtu; 2784 2785 return 0; 2786 } 2787 2788 static void gem_update_stats(struct macb *bp) 2789 { 2790 struct macb_queue *queue; 2791 unsigned int i, q, idx; 2792 unsigned long *stat; 2793 2794 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 2795 2796 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 2797 u32 offset = gem_statistics[i].offset; 2798 u64 val = bp->macb_reg_readl(bp, offset); 2799 2800 bp->ethtool_stats[i] += val; 2801 *p += val; 2802 2803 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 2804 /* Add GEM_OCTTXH, GEM_OCTRXH */ 2805 val = bp->macb_reg_readl(bp, offset + 4); 2806 bp->ethtool_stats[i] += ((u64)val) << 32; 2807 *(++p) += val; 2808 } 2809 } 2810 2811 idx = GEM_STATS_LEN; 2812 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 2813 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) 2814 bp->ethtool_stats[idx++] = *stat; 2815 } 2816 2817 static struct net_device_stats *gem_get_stats(struct macb *bp) 2818 { 2819 struct gem_stats *hwstat = &bp->hw_stats.gem; 2820 struct net_device_stats *nstat = &bp->dev->stats; 2821 2822 gem_update_stats(bp); 2823 2824 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + 2825 hwstat->rx_alignment_errors + 2826 hwstat->rx_resource_errors + 2827 hwstat->rx_overruns + 2828 hwstat->rx_oversize_frames + 2829 hwstat->rx_jabbers + 2830 hwstat->rx_undersized_frames + 2831 hwstat->rx_length_field_frame_errors); 2832 nstat->tx_errors = (hwstat->tx_late_collisions + 2833 hwstat->tx_excessive_collisions + 2834 hwstat->tx_underrun + 2835 hwstat->tx_carrier_sense_errors); 2836 nstat->multicast = hwstat->rx_multicast_frames; 2837 nstat->collisions = (hwstat->tx_single_collision_frames + 2838 hwstat->tx_multiple_collision_frames + 2839 hwstat->tx_excessive_collisions); 2840 nstat->rx_length_errors = (hwstat->rx_oversize_frames + 2841 hwstat->rx_jabbers + 2842 hwstat->rx_undersized_frames + 2843 hwstat->rx_length_field_frame_errors); 2844 nstat->rx_over_errors = hwstat->rx_resource_errors; 2845 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; 2846 nstat->rx_frame_errors = hwstat->rx_alignment_errors; 2847 nstat->rx_fifo_errors = hwstat->rx_overruns; 2848 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; 2849 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; 2850 nstat->tx_fifo_errors = hwstat->tx_underrun; 2851 2852 return nstat; 2853 } 2854 2855 static void gem_get_ethtool_stats(struct net_device *dev, 2856 struct ethtool_stats *stats, u64 *data) 2857 { 2858 struct macb *bp; 2859 2860 bp = netdev_priv(dev); 2861 gem_update_stats(bp); 2862 memcpy(data, &bp->ethtool_stats, sizeof(u64) 2863 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); 2864 } 2865 2866 static int gem_get_sset_count(struct net_device *dev, int sset) 2867 { 2868 struct macb *bp = netdev_priv(dev); 2869 2870 switch (sset) { 2871 case ETH_SS_STATS: 2872 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; 2873 default: 2874 return -EOPNOTSUPP; 2875 } 2876 } 2877 2878 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2879 { 2880 char stat_string[ETH_GSTRING_LEN]; 2881 struct macb *bp = netdev_priv(dev); 2882 struct macb_queue *queue; 2883 unsigned int i; 2884 unsigned int q; 2885 2886 switch (sset) { 2887 case ETH_SS_STATS: 2888 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) 2889 memcpy(p, gem_statistics[i].stat_string, 2890 ETH_GSTRING_LEN); 2891 2892 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 2893 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { 2894 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", 2895 q, queue_statistics[i].stat_string); 2896 memcpy(p, stat_string, ETH_GSTRING_LEN); 2897 } 2898 } 2899 break; 2900 } 2901 } 2902 2903 static struct net_device_stats *macb_get_stats(struct net_device *dev) 2904 { 2905 struct macb *bp = netdev_priv(dev); 2906 struct net_device_stats *nstat = &bp->dev->stats; 2907 struct macb_stats *hwstat = &bp->hw_stats.macb; 2908 2909 if (macb_is_gem(bp)) 2910 return gem_get_stats(bp); 2911 2912 /* read stats from hardware */ 2913 macb_update_stats(bp); 2914 2915 /* Convert HW stats into netdevice stats */ 2916 nstat->rx_errors = (hwstat->rx_fcs_errors + 2917 hwstat->rx_align_errors + 2918 hwstat->rx_resource_errors + 2919 hwstat->rx_overruns + 2920 hwstat->rx_oversize_pkts + 2921 hwstat->rx_jabbers + 2922 hwstat->rx_undersize_pkts + 2923 hwstat->rx_length_mismatch); 2924 nstat->tx_errors = (hwstat->tx_late_cols + 2925 hwstat->tx_excessive_cols + 2926 hwstat->tx_underruns + 2927 hwstat->tx_carrier_errors + 2928 hwstat->sqe_test_errors); 2929 nstat->collisions = (hwstat->tx_single_cols + 2930 hwstat->tx_multiple_cols + 2931 hwstat->tx_excessive_cols); 2932 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 2933 hwstat->rx_jabbers + 2934 hwstat->rx_undersize_pkts + 2935 hwstat->rx_length_mismatch); 2936 nstat->rx_over_errors = hwstat->rx_resource_errors + 2937 hwstat->rx_overruns; 2938 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 2939 nstat->rx_frame_errors = hwstat->rx_align_errors; 2940 nstat->rx_fifo_errors = hwstat->rx_overruns; 2941 /* XXX: What does "missed" mean? */ 2942 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 2943 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 2944 nstat->tx_fifo_errors = hwstat->tx_underruns; 2945 /* Don't know about heartbeat or window errors... */ 2946 2947 return nstat; 2948 } 2949 2950 static int macb_get_regs_len(struct net_device *netdev) 2951 { 2952 return MACB_GREGS_NBR * sizeof(u32); 2953 } 2954 2955 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2956 void *p) 2957 { 2958 struct macb *bp = netdev_priv(dev); 2959 unsigned int tail, head; 2960 u32 *regs_buff = p; 2961 2962 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 2963 | MACB_GREGS_VERSION; 2964 2965 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); 2966 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); 2967 2968 regs_buff[0] = macb_readl(bp, NCR); 2969 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); 2970 regs_buff[2] = macb_readl(bp, NSR); 2971 regs_buff[3] = macb_readl(bp, TSR); 2972 regs_buff[4] = macb_readl(bp, RBQP); 2973 regs_buff[5] = macb_readl(bp, TBQP); 2974 regs_buff[6] = macb_readl(bp, RSR); 2975 regs_buff[7] = macb_readl(bp, IMR); 2976 2977 regs_buff[8] = tail; 2978 regs_buff[9] = head; 2979 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 2980 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 2981 2982 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2983 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2984 if (macb_is_gem(bp)) 2985 regs_buff[13] = gem_readl(bp, DMACFG); 2986 } 2987 2988 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2989 { 2990 struct macb *bp = netdev_priv(netdev); 2991 2992 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { 2993 phylink_ethtool_get_wol(bp->phylink, wol); 2994 wol->supported |= WAKE_MAGIC; 2995 2996 if (bp->wol & MACB_WOL_ENABLED) 2997 wol->wolopts |= WAKE_MAGIC; 2998 } 2999 } 3000 3001 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3002 { 3003 struct macb *bp = netdev_priv(netdev); 3004 int ret; 3005 3006 /* Pass the order to phylink layer */ 3007 ret = phylink_ethtool_set_wol(bp->phylink, wol); 3008 /* Don't manage WoL on MAC if handled by the PHY 3009 * or if there's a failure in talking to the PHY 3010 */ 3011 if (!ret || ret != -EOPNOTSUPP) 3012 return ret; 3013 3014 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || 3015 (wol->wolopts & ~WAKE_MAGIC)) 3016 return -EOPNOTSUPP; 3017 3018 if (wol->wolopts & WAKE_MAGIC) 3019 bp->wol |= MACB_WOL_ENABLED; 3020 else 3021 bp->wol &= ~MACB_WOL_ENABLED; 3022 3023 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); 3024 3025 return 0; 3026 } 3027 3028 static int macb_get_link_ksettings(struct net_device *netdev, 3029 struct ethtool_link_ksettings *kset) 3030 { 3031 struct macb *bp = netdev_priv(netdev); 3032 3033 return phylink_ethtool_ksettings_get(bp->phylink, kset); 3034 } 3035 3036 static int macb_set_link_ksettings(struct net_device *netdev, 3037 const struct ethtool_link_ksettings *kset) 3038 { 3039 struct macb *bp = netdev_priv(netdev); 3040 3041 return phylink_ethtool_ksettings_set(bp->phylink, kset); 3042 } 3043 3044 static void macb_get_ringparam(struct net_device *netdev, 3045 struct ethtool_ringparam *ring) 3046 { 3047 struct macb *bp = netdev_priv(netdev); 3048 3049 ring->rx_max_pending = MAX_RX_RING_SIZE; 3050 ring->tx_max_pending = MAX_TX_RING_SIZE; 3051 3052 ring->rx_pending = bp->rx_ring_size; 3053 ring->tx_pending = bp->tx_ring_size; 3054 } 3055 3056 static int macb_set_ringparam(struct net_device *netdev, 3057 struct ethtool_ringparam *ring) 3058 { 3059 struct macb *bp = netdev_priv(netdev); 3060 u32 new_rx_size, new_tx_size; 3061 unsigned int reset = 0; 3062 3063 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 3064 return -EINVAL; 3065 3066 new_rx_size = clamp_t(u32, ring->rx_pending, 3067 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); 3068 new_rx_size = roundup_pow_of_two(new_rx_size); 3069 3070 new_tx_size = clamp_t(u32, ring->tx_pending, 3071 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); 3072 new_tx_size = roundup_pow_of_two(new_tx_size); 3073 3074 if ((new_tx_size == bp->tx_ring_size) && 3075 (new_rx_size == bp->rx_ring_size)) { 3076 /* nothing to do */ 3077 return 0; 3078 } 3079 3080 if (netif_running(bp->dev)) { 3081 reset = 1; 3082 macb_close(bp->dev); 3083 } 3084 3085 bp->rx_ring_size = new_rx_size; 3086 bp->tx_ring_size = new_tx_size; 3087 3088 if (reset) 3089 macb_open(bp->dev); 3090 3091 return 0; 3092 } 3093 3094 #ifdef CONFIG_MACB_USE_HWSTAMP 3095 static unsigned int gem_get_tsu_rate(struct macb *bp) 3096 { 3097 struct clk *tsu_clk; 3098 unsigned int tsu_rate; 3099 3100 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); 3101 if (!IS_ERR(tsu_clk)) 3102 tsu_rate = clk_get_rate(tsu_clk); 3103 /* try pclk instead */ 3104 else if (!IS_ERR(bp->pclk)) { 3105 tsu_clk = bp->pclk; 3106 tsu_rate = clk_get_rate(tsu_clk); 3107 } else 3108 return -ENOTSUPP; 3109 return tsu_rate; 3110 } 3111 3112 static s32 gem_get_ptp_max_adj(void) 3113 { 3114 return 64000000; 3115 } 3116 3117 static int gem_get_ts_info(struct net_device *dev, 3118 struct ethtool_ts_info *info) 3119 { 3120 struct macb *bp = netdev_priv(dev); 3121 3122 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 3123 ethtool_op_get_ts_info(dev, info); 3124 return 0; 3125 } 3126 3127 info->so_timestamping = 3128 SOF_TIMESTAMPING_TX_SOFTWARE | 3129 SOF_TIMESTAMPING_RX_SOFTWARE | 3130 SOF_TIMESTAMPING_SOFTWARE | 3131 SOF_TIMESTAMPING_TX_HARDWARE | 3132 SOF_TIMESTAMPING_RX_HARDWARE | 3133 SOF_TIMESTAMPING_RAW_HARDWARE; 3134 info->tx_types = 3135 (1 << HWTSTAMP_TX_ONESTEP_SYNC) | 3136 (1 << HWTSTAMP_TX_OFF) | 3137 (1 << HWTSTAMP_TX_ON); 3138 info->rx_filters = 3139 (1 << HWTSTAMP_FILTER_NONE) | 3140 (1 << HWTSTAMP_FILTER_ALL); 3141 3142 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; 3143 3144 return 0; 3145 } 3146 3147 static struct macb_ptp_info gem_ptp_info = { 3148 .ptp_init = gem_ptp_init, 3149 .ptp_remove = gem_ptp_remove, 3150 .get_ptp_max_adj = gem_get_ptp_max_adj, 3151 .get_tsu_rate = gem_get_tsu_rate, 3152 .get_ts_info = gem_get_ts_info, 3153 .get_hwtst = gem_get_hwtst, 3154 .set_hwtst = gem_set_hwtst, 3155 }; 3156 #endif 3157 3158 static int macb_get_ts_info(struct net_device *netdev, 3159 struct ethtool_ts_info *info) 3160 { 3161 struct macb *bp = netdev_priv(netdev); 3162 3163 if (bp->ptp_info) 3164 return bp->ptp_info->get_ts_info(netdev, info); 3165 3166 return ethtool_op_get_ts_info(netdev, info); 3167 } 3168 3169 static void gem_enable_flow_filters(struct macb *bp, bool enable) 3170 { 3171 struct net_device *netdev = bp->dev; 3172 struct ethtool_rx_fs_item *item; 3173 u32 t2_scr; 3174 int num_t2_scr; 3175 3176 if (!(netdev->features & NETIF_F_NTUPLE)) 3177 return; 3178 3179 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); 3180 3181 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3182 struct ethtool_rx_flow_spec *fs = &item->fs; 3183 struct ethtool_tcpip4_spec *tp4sp_m; 3184 3185 if (fs->location >= num_t2_scr) 3186 continue; 3187 3188 t2_scr = gem_readl_n(bp, SCRT2, fs->location); 3189 3190 /* enable/disable screener regs for the flow entry */ 3191 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); 3192 3193 /* only enable fields with no masking */ 3194 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3195 3196 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) 3197 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); 3198 else 3199 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); 3200 3201 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) 3202 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); 3203 else 3204 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); 3205 3206 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) 3207 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); 3208 else 3209 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); 3210 3211 gem_writel_n(bp, SCRT2, fs->location, t2_scr); 3212 } 3213 } 3214 3215 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) 3216 { 3217 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; 3218 uint16_t index = fs->location; 3219 u32 w0, w1, t2_scr; 3220 bool cmp_a = false; 3221 bool cmp_b = false; 3222 bool cmp_c = false; 3223 3224 tp4sp_v = &(fs->h_u.tcp_ip4_spec); 3225 tp4sp_m = &(fs->m_u.tcp_ip4_spec); 3226 3227 /* ignore field if any masking set */ 3228 if (tp4sp_m->ip4src == 0xFFFFFFFF) { 3229 /* 1st compare reg - IP source address */ 3230 w0 = 0; 3231 w1 = 0; 3232 w0 = tp4sp_v->ip4src; 3233 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3234 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3235 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); 3236 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); 3237 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); 3238 cmp_a = true; 3239 } 3240 3241 /* ignore field if any masking set */ 3242 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { 3243 /* 2nd compare reg - IP destination address */ 3244 w0 = 0; 3245 w1 = 0; 3246 w0 = tp4sp_v->ip4dst; 3247 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3248 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); 3249 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); 3250 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); 3251 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); 3252 cmp_b = true; 3253 } 3254 3255 /* ignore both port fields if masking set in both */ 3256 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { 3257 /* 3rd compare reg - source port, destination port */ 3258 w0 = 0; 3259 w1 = 0; 3260 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); 3261 if (tp4sp_m->psrc == tp4sp_m->pdst) { 3262 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); 3263 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3264 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ 3265 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3266 } else { 3267 /* only one port definition */ 3268 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ 3269 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); 3270 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ 3271 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); 3272 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); 3273 } else { /* dst port */ 3274 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); 3275 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); 3276 } 3277 } 3278 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); 3279 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); 3280 cmp_c = true; 3281 } 3282 3283 t2_scr = 0; 3284 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); 3285 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); 3286 if (cmp_a) 3287 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); 3288 if (cmp_b) 3289 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); 3290 if (cmp_c) 3291 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); 3292 gem_writel_n(bp, SCRT2, index, t2_scr); 3293 } 3294 3295 static int gem_add_flow_filter(struct net_device *netdev, 3296 struct ethtool_rxnfc *cmd) 3297 { 3298 struct macb *bp = netdev_priv(netdev); 3299 struct ethtool_rx_flow_spec *fs = &cmd->fs; 3300 struct ethtool_rx_fs_item *item, *newfs; 3301 unsigned long flags; 3302 int ret = -EINVAL; 3303 bool added = false; 3304 3305 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); 3306 if (newfs == NULL) 3307 return -ENOMEM; 3308 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); 3309 3310 netdev_dbg(netdev, 3311 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3312 fs->flow_type, (int)fs->ring_cookie, fs->location, 3313 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3314 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3315 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); 3316 3317 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3318 3319 /* find correct place to add in list */ 3320 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3321 if (item->fs.location > newfs->fs.location) { 3322 list_add_tail(&newfs->list, &item->list); 3323 added = true; 3324 break; 3325 } else if (item->fs.location == fs->location) { 3326 netdev_err(netdev, "Rule not added: location %d not free!\n", 3327 fs->location); 3328 ret = -EBUSY; 3329 goto err; 3330 } 3331 } 3332 if (!added) 3333 list_add_tail(&newfs->list, &bp->rx_fs_list.list); 3334 3335 gem_prog_cmp_regs(bp, fs); 3336 bp->rx_fs_list.count++; 3337 /* enable filtering if NTUPLE on */ 3338 gem_enable_flow_filters(bp, 1); 3339 3340 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3341 return 0; 3342 3343 err: 3344 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3345 kfree(newfs); 3346 return ret; 3347 } 3348 3349 static int gem_del_flow_filter(struct net_device *netdev, 3350 struct ethtool_rxnfc *cmd) 3351 { 3352 struct macb *bp = netdev_priv(netdev); 3353 struct ethtool_rx_fs_item *item; 3354 struct ethtool_rx_flow_spec *fs; 3355 unsigned long flags; 3356 3357 spin_lock_irqsave(&bp->rx_fs_lock, flags); 3358 3359 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3360 if (item->fs.location == cmd->fs.location) { 3361 /* disable screener regs for the flow entry */ 3362 fs = &(item->fs); 3363 netdev_dbg(netdev, 3364 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", 3365 fs->flow_type, (int)fs->ring_cookie, fs->location, 3366 htonl(fs->h_u.tcp_ip4_spec.ip4src), 3367 htonl(fs->h_u.tcp_ip4_spec.ip4dst), 3368 htons(fs->h_u.tcp_ip4_spec.psrc), 3369 htons(fs->h_u.tcp_ip4_spec.pdst)); 3370 3371 gem_writel_n(bp, SCRT2, fs->location, 0); 3372 3373 list_del(&item->list); 3374 bp->rx_fs_list.count--; 3375 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3376 kfree(item); 3377 return 0; 3378 } 3379 } 3380 3381 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); 3382 return -EINVAL; 3383 } 3384 3385 static int gem_get_flow_entry(struct net_device *netdev, 3386 struct ethtool_rxnfc *cmd) 3387 { 3388 struct macb *bp = netdev_priv(netdev); 3389 struct ethtool_rx_fs_item *item; 3390 3391 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3392 if (item->fs.location == cmd->fs.location) { 3393 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); 3394 return 0; 3395 } 3396 } 3397 return -EINVAL; 3398 } 3399 3400 static int gem_get_all_flow_entries(struct net_device *netdev, 3401 struct ethtool_rxnfc *cmd, u32 *rule_locs) 3402 { 3403 struct macb *bp = netdev_priv(netdev); 3404 struct ethtool_rx_fs_item *item; 3405 uint32_t cnt = 0; 3406 3407 list_for_each_entry(item, &bp->rx_fs_list.list, list) { 3408 if (cnt == cmd->rule_cnt) 3409 return -EMSGSIZE; 3410 rule_locs[cnt] = item->fs.location; 3411 cnt++; 3412 } 3413 cmd->data = bp->max_tuples; 3414 cmd->rule_cnt = cnt; 3415 3416 return 0; 3417 } 3418 3419 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3420 u32 *rule_locs) 3421 { 3422 struct macb *bp = netdev_priv(netdev); 3423 int ret = 0; 3424 3425 switch (cmd->cmd) { 3426 case ETHTOOL_GRXRINGS: 3427 cmd->data = bp->num_queues; 3428 break; 3429 case ETHTOOL_GRXCLSRLCNT: 3430 cmd->rule_cnt = bp->rx_fs_list.count; 3431 break; 3432 case ETHTOOL_GRXCLSRULE: 3433 ret = gem_get_flow_entry(netdev, cmd); 3434 break; 3435 case ETHTOOL_GRXCLSRLALL: 3436 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); 3437 break; 3438 default: 3439 netdev_err(netdev, 3440 "Command parameter %d is not supported\n", cmd->cmd); 3441 ret = -EOPNOTSUPP; 3442 } 3443 3444 return ret; 3445 } 3446 3447 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3448 { 3449 struct macb *bp = netdev_priv(netdev); 3450 int ret; 3451 3452 switch (cmd->cmd) { 3453 case ETHTOOL_SRXCLSRLINS: 3454 if ((cmd->fs.location >= bp->max_tuples) 3455 || (cmd->fs.ring_cookie >= bp->num_queues)) { 3456 ret = -EINVAL; 3457 break; 3458 } 3459 ret = gem_add_flow_filter(netdev, cmd); 3460 break; 3461 case ETHTOOL_SRXCLSRLDEL: 3462 ret = gem_del_flow_filter(netdev, cmd); 3463 break; 3464 default: 3465 netdev_err(netdev, 3466 "Command parameter %d is not supported\n", cmd->cmd); 3467 ret = -EOPNOTSUPP; 3468 } 3469 3470 return ret; 3471 } 3472 3473 static const struct ethtool_ops macb_ethtool_ops = { 3474 .get_regs_len = macb_get_regs_len, 3475 .get_regs = macb_get_regs, 3476 .get_link = ethtool_op_get_link, 3477 .get_ts_info = ethtool_op_get_ts_info, 3478 .get_wol = macb_get_wol, 3479 .set_wol = macb_set_wol, 3480 .get_link_ksettings = macb_get_link_ksettings, 3481 .set_link_ksettings = macb_set_link_ksettings, 3482 .get_ringparam = macb_get_ringparam, 3483 .set_ringparam = macb_set_ringparam, 3484 }; 3485 3486 static const struct ethtool_ops gem_ethtool_ops = { 3487 .get_regs_len = macb_get_regs_len, 3488 .get_regs = macb_get_regs, 3489 .get_wol = macb_get_wol, 3490 .set_wol = macb_set_wol, 3491 .get_link = ethtool_op_get_link, 3492 .get_ts_info = macb_get_ts_info, 3493 .get_ethtool_stats = gem_get_ethtool_stats, 3494 .get_strings = gem_get_ethtool_strings, 3495 .get_sset_count = gem_get_sset_count, 3496 .get_link_ksettings = macb_get_link_ksettings, 3497 .set_link_ksettings = macb_set_link_ksettings, 3498 .get_ringparam = macb_get_ringparam, 3499 .set_ringparam = macb_set_ringparam, 3500 .get_rxnfc = gem_get_rxnfc, 3501 .set_rxnfc = gem_set_rxnfc, 3502 }; 3503 3504 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3505 { 3506 struct macb *bp = netdev_priv(dev); 3507 3508 if (!netif_running(dev)) 3509 return -EINVAL; 3510 3511 if (bp->ptp_info) { 3512 switch (cmd) { 3513 case SIOCSHWTSTAMP: 3514 return bp->ptp_info->set_hwtst(dev, rq, cmd); 3515 case SIOCGHWTSTAMP: 3516 return bp->ptp_info->get_hwtst(dev, rq); 3517 } 3518 } 3519 3520 return phylink_mii_ioctl(bp->phylink, rq, cmd); 3521 } 3522 3523 static inline void macb_set_txcsum_feature(struct macb *bp, 3524 netdev_features_t features) 3525 { 3526 u32 val; 3527 3528 if (!macb_is_gem(bp)) 3529 return; 3530 3531 val = gem_readl(bp, DMACFG); 3532 if (features & NETIF_F_HW_CSUM) 3533 val |= GEM_BIT(TXCOEN); 3534 else 3535 val &= ~GEM_BIT(TXCOEN); 3536 3537 gem_writel(bp, DMACFG, val); 3538 } 3539 3540 static inline void macb_set_rxcsum_feature(struct macb *bp, 3541 netdev_features_t features) 3542 { 3543 struct net_device *netdev = bp->dev; 3544 u32 val; 3545 3546 if (!macb_is_gem(bp)) 3547 return; 3548 3549 val = gem_readl(bp, NCFGR); 3550 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) 3551 val |= GEM_BIT(RXCOEN); 3552 else 3553 val &= ~GEM_BIT(RXCOEN); 3554 3555 gem_writel(bp, NCFGR, val); 3556 } 3557 3558 static inline void macb_set_rxflow_feature(struct macb *bp, 3559 netdev_features_t features) 3560 { 3561 if (!macb_is_gem(bp)) 3562 return; 3563 3564 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); 3565 } 3566 3567 static int macb_set_features(struct net_device *netdev, 3568 netdev_features_t features) 3569 { 3570 struct macb *bp = netdev_priv(netdev); 3571 netdev_features_t changed = features ^ netdev->features; 3572 3573 /* TX checksum offload */ 3574 if (changed & NETIF_F_HW_CSUM) 3575 macb_set_txcsum_feature(bp, features); 3576 3577 /* RX checksum offload */ 3578 if (changed & NETIF_F_RXCSUM) 3579 macb_set_rxcsum_feature(bp, features); 3580 3581 /* RX Flow Filters */ 3582 if (changed & NETIF_F_NTUPLE) 3583 macb_set_rxflow_feature(bp, features); 3584 3585 return 0; 3586 } 3587 3588 static void macb_restore_features(struct macb *bp) 3589 { 3590 struct net_device *netdev = bp->dev; 3591 netdev_features_t features = netdev->features; 3592 3593 /* TX checksum offload */ 3594 macb_set_txcsum_feature(bp, features); 3595 3596 /* RX checksum offload */ 3597 macb_set_rxcsum_feature(bp, features); 3598 3599 /* RX Flow Filters */ 3600 macb_set_rxflow_feature(bp, features); 3601 } 3602 3603 static const struct net_device_ops macb_netdev_ops = { 3604 .ndo_open = macb_open, 3605 .ndo_stop = macb_close, 3606 .ndo_start_xmit = macb_start_xmit, 3607 .ndo_set_rx_mode = macb_set_rx_mode, 3608 .ndo_get_stats = macb_get_stats, 3609 .ndo_do_ioctl = macb_ioctl, 3610 .ndo_validate_addr = eth_validate_addr, 3611 .ndo_change_mtu = macb_change_mtu, 3612 .ndo_set_mac_address = eth_mac_addr, 3613 #ifdef CONFIG_NET_POLL_CONTROLLER 3614 .ndo_poll_controller = macb_poll_controller, 3615 #endif 3616 .ndo_set_features = macb_set_features, 3617 .ndo_features_check = macb_features_check, 3618 }; 3619 3620 /* Configure peripheral capabilities according to device tree 3621 * and integration options used 3622 */ 3623 static void macb_configure_caps(struct macb *bp, 3624 const struct macb_config *dt_conf) 3625 { 3626 u32 dcfg; 3627 3628 if (dt_conf) 3629 bp->caps = dt_conf->caps; 3630 3631 if (hw_is_gem(bp->regs, bp->native_io)) { 3632 bp->caps |= MACB_CAPS_MACB_IS_GEM; 3633 3634 dcfg = gem_readl(bp, DCFG1); 3635 if (GEM_BFEXT(IRQCOR, dcfg) == 0) 3636 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; 3637 if (GEM_BFEXT(NO_PCS, dcfg) == 0) 3638 bp->caps |= MACB_CAPS_PCS; 3639 dcfg = gem_readl(bp, DCFG12); 3640 if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1) 3641 bp->caps |= MACB_CAPS_HIGH_SPEED; 3642 dcfg = gem_readl(bp, DCFG2); 3643 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) 3644 bp->caps |= MACB_CAPS_FIFO_MODE; 3645 #ifdef CONFIG_MACB_USE_HWSTAMP 3646 if (gem_has_ptp(bp)) { 3647 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) 3648 dev_err(&bp->pdev->dev, 3649 "GEM doesn't support hardware ptp.\n"); 3650 else { 3651 bp->hw_dma_cap |= HW_DMA_CAP_PTP; 3652 bp->ptp_info = &gem_ptp_info; 3653 } 3654 } 3655 #endif 3656 } 3657 3658 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 3659 } 3660 3661 static void macb_probe_queues(void __iomem *mem, 3662 bool native_io, 3663 unsigned int *queue_mask, 3664 unsigned int *num_queues) 3665 { 3666 *queue_mask = 0x1; 3667 *num_queues = 1; 3668 3669 /* is it macb or gem ? 3670 * 3671 * We need to read directly from the hardware here because 3672 * we are early in the probe process and don't have the 3673 * MACB_CAPS_MACB_IS_GEM flag positioned 3674 */ 3675 if (!hw_is_gem(mem, native_io)) 3676 return; 3677 3678 /* bit 0 is never set but queue 0 always exists */ 3679 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; 3680 *num_queues = hweight32(*queue_mask); 3681 } 3682 3683 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, 3684 struct clk **hclk, struct clk **tx_clk, 3685 struct clk **rx_clk, struct clk **tsu_clk) 3686 { 3687 struct macb_platform_data *pdata; 3688 int err; 3689 3690 pdata = dev_get_platdata(&pdev->dev); 3691 if (pdata) { 3692 *pclk = pdata->pclk; 3693 *hclk = pdata->hclk; 3694 } else { 3695 *pclk = devm_clk_get(&pdev->dev, "pclk"); 3696 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3697 } 3698 3699 if (IS_ERR_OR_NULL(*pclk)) { 3700 err = PTR_ERR(*pclk); 3701 if (!err) 3702 err = -ENODEV; 3703 3704 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3705 return err; 3706 } 3707 3708 if (IS_ERR_OR_NULL(*hclk)) { 3709 err = PTR_ERR(*hclk); 3710 if (!err) 3711 err = -ENODEV; 3712 3713 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3714 return err; 3715 } 3716 3717 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); 3718 if (IS_ERR(*tx_clk)) 3719 return PTR_ERR(*tx_clk); 3720 3721 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); 3722 if (IS_ERR(*rx_clk)) 3723 return PTR_ERR(*rx_clk); 3724 3725 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); 3726 if (IS_ERR(*tsu_clk)) 3727 return PTR_ERR(*tsu_clk); 3728 3729 err = clk_prepare_enable(*pclk); 3730 if (err) { 3731 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3732 return err; 3733 } 3734 3735 err = clk_prepare_enable(*hclk); 3736 if (err) { 3737 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3738 goto err_disable_pclk; 3739 } 3740 3741 err = clk_prepare_enable(*tx_clk); 3742 if (err) { 3743 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3744 goto err_disable_hclk; 3745 } 3746 3747 err = clk_prepare_enable(*rx_clk); 3748 if (err) { 3749 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3750 goto err_disable_txclk; 3751 } 3752 3753 err = clk_prepare_enable(*tsu_clk); 3754 if (err) { 3755 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3756 goto err_disable_rxclk; 3757 } 3758 3759 return 0; 3760 3761 err_disable_rxclk: 3762 clk_disable_unprepare(*rx_clk); 3763 3764 err_disable_txclk: 3765 clk_disable_unprepare(*tx_clk); 3766 3767 err_disable_hclk: 3768 clk_disable_unprepare(*hclk); 3769 3770 err_disable_pclk: 3771 clk_disable_unprepare(*pclk); 3772 3773 return err; 3774 } 3775 3776 static int macb_init(struct platform_device *pdev) 3777 { 3778 struct net_device *dev = platform_get_drvdata(pdev); 3779 unsigned int hw_q, q; 3780 struct macb *bp = netdev_priv(dev); 3781 struct macb_queue *queue; 3782 int err; 3783 u32 val, reg; 3784 3785 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; 3786 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; 3787 3788 /* set the queue register mapping once for all: queue0 has a special 3789 * register mapping but we don't want to test the queue index then 3790 * compute the corresponding register offset at run time. 3791 */ 3792 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 3793 if (!(bp->queue_mask & (1 << hw_q))) 3794 continue; 3795 3796 queue = &bp->queues[q]; 3797 queue->bp = bp; 3798 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); 3799 if (hw_q) { 3800 queue->ISR = GEM_ISR(hw_q - 1); 3801 queue->IER = GEM_IER(hw_q - 1); 3802 queue->IDR = GEM_IDR(hw_q - 1); 3803 queue->IMR = GEM_IMR(hw_q - 1); 3804 queue->TBQP = GEM_TBQP(hw_q - 1); 3805 queue->RBQP = GEM_RBQP(hw_q - 1); 3806 queue->RBQS = GEM_RBQS(hw_q - 1); 3807 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3808 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3809 queue->TBQPH = GEM_TBQPH(hw_q - 1); 3810 queue->RBQPH = GEM_RBQPH(hw_q - 1); 3811 } 3812 #endif 3813 } else { 3814 /* queue0 uses legacy registers */ 3815 queue->ISR = MACB_ISR; 3816 queue->IER = MACB_IER; 3817 queue->IDR = MACB_IDR; 3818 queue->IMR = MACB_IMR; 3819 queue->TBQP = MACB_TBQP; 3820 queue->RBQP = MACB_RBQP; 3821 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3822 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 3823 queue->TBQPH = MACB_TBQPH; 3824 queue->RBQPH = MACB_RBQPH; 3825 } 3826 #endif 3827 } 3828 3829 /* get irq: here we use the linux queue index, not the hardware 3830 * queue index. the queue irq definitions in the device tree 3831 * must remove the optional gaps that could exist in the 3832 * hardware queue mask. 3833 */ 3834 queue->irq = platform_get_irq(pdev, q); 3835 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 3836 IRQF_SHARED, dev->name, queue); 3837 if (err) { 3838 dev_err(&pdev->dev, 3839 "Unable to request IRQ %d (error %d)\n", 3840 queue->irq, err); 3841 return err; 3842 } 3843 3844 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 3845 q++; 3846 } 3847 3848 dev->netdev_ops = &macb_netdev_ops; 3849 3850 /* setup appropriated routines according to adapter type */ 3851 if (macb_is_gem(bp)) { 3852 bp->max_tx_length = GEM_MAX_TX_LEN; 3853 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; 3854 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 3855 bp->macbgem_ops.mog_init_rings = gem_init_rings; 3856 bp->macbgem_ops.mog_rx = gem_rx; 3857 dev->ethtool_ops = &gem_ethtool_ops; 3858 } else { 3859 bp->max_tx_length = MACB_MAX_TX_LEN; 3860 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 3861 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 3862 bp->macbgem_ops.mog_init_rings = macb_init_rings; 3863 bp->macbgem_ops.mog_rx = macb_rx; 3864 dev->ethtool_ops = &macb_ethtool_ops; 3865 } 3866 3867 /* Set features */ 3868 dev->hw_features = NETIF_F_SG; 3869 3870 /* Check LSO capability */ 3871 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) 3872 dev->hw_features |= MACB_NETIF_LSO; 3873 3874 /* Checksum offload is only available on gem with packet buffer */ 3875 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) 3876 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3877 if (bp->caps & MACB_CAPS_SG_DISABLED) 3878 dev->hw_features &= ~NETIF_F_SG; 3879 dev->features = dev->hw_features; 3880 3881 /* Check RX Flow Filters support. 3882 * Max Rx flows set by availability of screeners & compare regs: 3883 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 3884 */ 3885 reg = gem_readl(bp, DCFG8); 3886 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 3887 GEM_BFEXT(T2SCR, reg)); 3888 if (bp->max_tuples > 0) { 3889 /* also needs one ethtype match to check IPv4 */ 3890 if (GEM_BFEXT(SCR2ETH, reg) > 0) { 3891 /* program this reg now */ 3892 reg = 0; 3893 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); 3894 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); 3895 /* Filtering is supported in hw but don't enable it in kernel now */ 3896 dev->hw_features |= NETIF_F_NTUPLE; 3897 /* init Rx flow definitions */ 3898 INIT_LIST_HEAD(&bp->rx_fs_list.list); 3899 bp->rx_fs_list.count = 0; 3900 spin_lock_init(&bp->rx_fs_lock); 3901 } else 3902 bp->max_tuples = 0; 3903 } 3904 3905 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { 3906 val = 0; 3907 if (phy_interface_mode_is_rgmii(bp->phy_interface)) 3908 val = GEM_BIT(RGMII); 3909 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && 3910 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3911 val = MACB_BIT(RMII); 3912 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) 3913 val = MACB_BIT(MII); 3914 3915 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) 3916 val |= MACB_BIT(CLKEN); 3917 3918 macb_or_gem_writel(bp, USRIO, val); 3919 } 3920 3921 /* Set MII management clock divider */ 3922 val = macb_mdc_clk_div(bp); 3923 val |= macb_dbw(bp); 3924 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) 3925 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); 3926 macb_writel(bp, NCFGR, val); 3927 3928 return 0; 3929 } 3930 3931 #if defined(CONFIG_OF) 3932 /* 1518 rounded up */ 3933 #define AT91ETHER_MAX_RBUFF_SZ 0x600 3934 /* max number of receive buffers */ 3935 #define AT91ETHER_MAX_RX_DESCR 9 3936 3937 static struct sifive_fu540_macb_mgmt *mgmt; 3938 3939 static int at91ether_alloc_coherent(struct macb *lp) 3940 { 3941 struct macb_queue *q = &lp->queues[0]; 3942 3943 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 3944 (AT91ETHER_MAX_RX_DESCR * 3945 macb_dma_desc_get_size(lp)), 3946 &q->rx_ring_dma, GFP_KERNEL); 3947 if (!q->rx_ring) 3948 return -ENOMEM; 3949 3950 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 3951 AT91ETHER_MAX_RX_DESCR * 3952 AT91ETHER_MAX_RBUFF_SZ, 3953 &q->rx_buffers_dma, GFP_KERNEL); 3954 if (!q->rx_buffers) { 3955 dma_free_coherent(&lp->pdev->dev, 3956 AT91ETHER_MAX_RX_DESCR * 3957 macb_dma_desc_get_size(lp), 3958 q->rx_ring, q->rx_ring_dma); 3959 q->rx_ring = NULL; 3960 return -ENOMEM; 3961 } 3962 3963 return 0; 3964 } 3965 3966 static void at91ether_free_coherent(struct macb *lp) 3967 { 3968 struct macb_queue *q = &lp->queues[0]; 3969 3970 if (q->rx_ring) { 3971 dma_free_coherent(&lp->pdev->dev, 3972 AT91ETHER_MAX_RX_DESCR * 3973 macb_dma_desc_get_size(lp), 3974 q->rx_ring, q->rx_ring_dma); 3975 q->rx_ring = NULL; 3976 } 3977 3978 if (q->rx_buffers) { 3979 dma_free_coherent(&lp->pdev->dev, 3980 AT91ETHER_MAX_RX_DESCR * 3981 AT91ETHER_MAX_RBUFF_SZ, 3982 q->rx_buffers, q->rx_buffers_dma); 3983 q->rx_buffers = NULL; 3984 } 3985 } 3986 3987 /* Initialize and start the Receiver and Transmit subsystems */ 3988 static int at91ether_start(struct macb *lp) 3989 { 3990 struct macb_queue *q = &lp->queues[0]; 3991 struct macb_dma_desc *desc; 3992 dma_addr_t addr; 3993 u32 ctl; 3994 int i, ret; 3995 3996 ret = at91ether_alloc_coherent(lp); 3997 if (ret) 3998 return ret; 3999 4000 addr = q->rx_buffers_dma; 4001 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 4002 desc = macb_rx_desc(q, i); 4003 macb_set_addr(lp, desc, addr); 4004 desc->ctrl = 0; 4005 addr += AT91ETHER_MAX_RBUFF_SZ; 4006 } 4007 4008 /* Set the Wrap bit on the last descriptor */ 4009 desc->addr |= MACB_BIT(RX_WRAP); 4010 4011 /* Reset buffer index */ 4012 q->rx_tail = 0; 4013 4014 /* Program address of descriptor list in Rx Buffer Queue register */ 4015 macb_writel(lp, RBQP, q->rx_ring_dma); 4016 4017 /* Enable Receive and Transmit */ 4018 ctl = macb_readl(lp, NCR); 4019 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); 4020 4021 /* Enable MAC interrupts */ 4022 macb_writel(lp, IER, MACB_BIT(RCOMP) | 4023 MACB_BIT(RXUBR) | 4024 MACB_BIT(ISR_TUND) | 4025 MACB_BIT(ISR_RLE) | 4026 MACB_BIT(TCOMP) | 4027 MACB_BIT(RM9200_TBRE) | 4028 MACB_BIT(ISR_ROVR) | 4029 MACB_BIT(HRESP)); 4030 4031 return 0; 4032 } 4033 4034 static void at91ether_stop(struct macb *lp) 4035 { 4036 u32 ctl; 4037 4038 /* Disable MAC interrupts */ 4039 macb_writel(lp, IDR, MACB_BIT(RCOMP) | 4040 MACB_BIT(RXUBR) | 4041 MACB_BIT(ISR_TUND) | 4042 MACB_BIT(ISR_RLE) | 4043 MACB_BIT(TCOMP) | 4044 MACB_BIT(RM9200_TBRE) | 4045 MACB_BIT(ISR_ROVR) | 4046 MACB_BIT(HRESP)); 4047 4048 /* Disable Receiver and Transmitter */ 4049 ctl = macb_readl(lp, NCR); 4050 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); 4051 4052 /* Free resources. */ 4053 at91ether_free_coherent(lp); 4054 } 4055 4056 /* Open the ethernet interface */ 4057 static int at91ether_open(struct net_device *dev) 4058 { 4059 struct macb *lp = netdev_priv(dev); 4060 u32 ctl; 4061 int ret; 4062 4063 ret = pm_runtime_get_sync(&lp->pdev->dev); 4064 if (ret < 0) { 4065 pm_runtime_put_noidle(&lp->pdev->dev); 4066 return ret; 4067 } 4068 4069 /* Clear internal statistics */ 4070 ctl = macb_readl(lp, NCR); 4071 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); 4072 4073 macb_set_hwaddr(lp); 4074 4075 ret = at91ether_start(lp); 4076 if (ret) 4077 goto pm_exit; 4078 4079 ret = macb_phylink_connect(lp); 4080 if (ret) 4081 goto stop; 4082 4083 netif_start_queue(dev); 4084 4085 return 0; 4086 4087 stop: 4088 at91ether_stop(lp); 4089 pm_exit: 4090 pm_runtime_put_sync(&lp->pdev->dev); 4091 return ret; 4092 } 4093 4094 /* Close the interface */ 4095 static int at91ether_close(struct net_device *dev) 4096 { 4097 struct macb *lp = netdev_priv(dev); 4098 4099 netif_stop_queue(dev); 4100 4101 phylink_stop(lp->phylink); 4102 phylink_disconnect_phy(lp->phylink); 4103 4104 at91ether_stop(lp); 4105 4106 return pm_runtime_put(&lp->pdev->dev); 4107 } 4108 4109 /* Transmit packet */ 4110 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, 4111 struct net_device *dev) 4112 { 4113 struct macb *lp = netdev_priv(dev); 4114 unsigned long flags; 4115 4116 if (lp->rm9200_tx_len < 2) { 4117 int desc = lp->rm9200_tx_tail; 4118 4119 /* Store packet information (to free when Tx completed) */ 4120 lp->rm9200_txq[desc].skb = skb; 4121 lp->rm9200_txq[desc].size = skb->len; 4122 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, 4123 skb->len, DMA_TO_DEVICE); 4124 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { 4125 dev_kfree_skb_any(skb); 4126 dev->stats.tx_dropped++; 4127 netdev_err(dev, "%s: DMA mapping error\n", __func__); 4128 return NETDEV_TX_OK; 4129 } 4130 4131 spin_lock_irqsave(&lp->lock, flags); 4132 4133 lp->rm9200_tx_tail = (desc + 1) & 1; 4134 lp->rm9200_tx_len++; 4135 if (lp->rm9200_tx_len > 1) 4136 netif_stop_queue(dev); 4137 4138 spin_unlock_irqrestore(&lp->lock, flags); 4139 4140 /* Set address of the data in the Transmit Address register */ 4141 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); 4142 /* Set length of the packet in the Transmit Control register */ 4143 macb_writel(lp, TCR, skb->len); 4144 4145 } else { 4146 netdev_err(dev, "%s called, but device is busy!\n", __func__); 4147 return NETDEV_TX_BUSY; 4148 } 4149 4150 return NETDEV_TX_OK; 4151 } 4152 4153 /* Extract received frame from buffer descriptors and sent to upper layers. 4154 * (Called from interrupt context) 4155 */ 4156 static void at91ether_rx(struct net_device *dev) 4157 { 4158 struct macb *lp = netdev_priv(dev); 4159 struct macb_queue *q = &lp->queues[0]; 4160 struct macb_dma_desc *desc; 4161 unsigned char *p_recv; 4162 struct sk_buff *skb; 4163 unsigned int pktlen; 4164 4165 desc = macb_rx_desc(q, q->rx_tail); 4166 while (desc->addr & MACB_BIT(RX_USED)) { 4167 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 4168 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); 4169 skb = netdev_alloc_skb(dev, pktlen + 2); 4170 if (skb) { 4171 skb_reserve(skb, 2); 4172 skb_put_data(skb, p_recv, pktlen); 4173 4174 skb->protocol = eth_type_trans(skb, dev); 4175 dev->stats.rx_packets++; 4176 dev->stats.rx_bytes += pktlen; 4177 netif_rx(skb); 4178 } else { 4179 dev->stats.rx_dropped++; 4180 } 4181 4182 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) 4183 dev->stats.multicast++; 4184 4185 /* reset ownership bit */ 4186 desc->addr &= ~MACB_BIT(RX_USED); 4187 4188 /* wrap after last buffer */ 4189 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 4190 q->rx_tail = 0; 4191 else 4192 q->rx_tail++; 4193 4194 desc = macb_rx_desc(q, q->rx_tail); 4195 } 4196 } 4197 4198 /* MAC interrupt handler */ 4199 static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 4200 { 4201 struct net_device *dev = dev_id; 4202 struct macb *lp = netdev_priv(dev); 4203 u32 intstatus, ctl; 4204 unsigned int desc; 4205 unsigned int qlen; 4206 u32 tsr; 4207 4208 /* MAC Interrupt Status register indicates what interrupts are pending. 4209 * It is automatically cleared once read. 4210 */ 4211 intstatus = macb_readl(lp, ISR); 4212 4213 /* Receive complete */ 4214 if (intstatus & MACB_BIT(RCOMP)) 4215 at91ether_rx(dev); 4216 4217 /* Transmit complete */ 4218 if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) { 4219 /* The TCOM bit is set even if the transmission failed */ 4220 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) 4221 dev->stats.tx_errors++; 4222 4223 spin_lock(&lp->lock); 4224 4225 tsr = macb_readl(lp, TSR); 4226 4227 /* we have three possibilities here: 4228 * - all pending packets transmitted (TGO, implies BNQ) 4229 * - only first packet transmitted (!TGO && BNQ) 4230 * - two frames pending (!TGO && !BNQ) 4231 * Note that TGO ("transmit go") is called "IDLE" on RM9200. 4232 */ 4233 qlen = (tsr & MACB_BIT(TGO)) ? 0 : 4234 (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2; 4235 4236 while (lp->rm9200_tx_len > qlen) { 4237 desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1; 4238 dev_consume_skb_irq(lp->rm9200_txq[desc].skb); 4239 lp->rm9200_txq[desc].skb = NULL; 4240 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, 4241 lp->rm9200_txq[desc].size, DMA_TO_DEVICE); 4242 dev->stats.tx_packets++; 4243 dev->stats.tx_bytes += lp->rm9200_txq[desc].size; 4244 lp->rm9200_tx_len--; 4245 } 4246 4247 if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev)) 4248 netif_wake_queue(dev); 4249 4250 spin_unlock(&lp->lock); 4251 } 4252 4253 /* Work-around for EMAC Errata section 41.3.1 */ 4254 if (intstatus & MACB_BIT(RXUBR)) { 4255 ctl = macb_readl(lp, NCR); 4256 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 4257 wmb(); 4258 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 4259 } 4260 4261 if (intstatus & MACB_BIT(ISR_ROVR)) 4262 netdev_err(dev, "ROVR error\n"); 4263 4264 return IRQ_HANDLED; 4265 } 4266 4267 #ifdef CONFIG_NET_POLL_CONTROLLER 4268 static void at91ether_poll_controller(struct net_device *dev) 4269 { 4270 unsigned long flags; 4271 4272 local_irq_save(flags); 4273 at91ether_interrupt(dev->irq, dev); 4274 local_irq_restore(flags); 4275 } 4276 #endif 4277 4278 static const struct net_device_ops at91ether_netdev_ops = { 4279 .ndo_open = at91ether_open, 4280 .ndo_stop = at91ether_close, 4281 .ndo_start_xmit = at91ether_start_xmit, 4282 .ndo_get_stats = macb_get_stats, 4283 .ndo_set_rx_mode = macb_set_rx_mode, 4284 .ndo_set_mac_address = eth_mac_addr, 4285 .ndo_do_ioctl = macb_ioctl, 4286 .ndo_validate_addr = eth_validate_addr, 4287 #ifdef CONFIG_NET_POLL_CONTROLLER 4288 .ndo_poll_controller = at91ether_poll_controller, 4289 #endif 4290 }; 4291 4292 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, 4293 struct clk **hclk, struct clk **tx_clk, 4294 struct clk **rx_clk, struct clk **tsu_clk) 4295 { 4296 int err; 4297 4298 *hclk = NULL; 4299 *tx_clk = NULL; 4300 *rx_clk = NULL; 4301 *tsu_clk = NULL; 4302 4303 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); 4304 if (IS_ERR(*pclk)) 4305 return PTR_ERR(*pclk); 4306 4307 err = clk_prepare_enable(*pclk); 4308 if (err) { 4309 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 4310 return err; 4311 } 4312 4313 return 0; 4314 } 4315 4316 static int at91ether_init(struct platform_device *pdev) 4317 { 4318 struct net_device *dev = platform_get_drvdata(pdev); 4319 struct macb *bp = netdev_priv(dev); 4320 int err; 4321 4322 bp->queues[0].bp = bp; 4323 4324 dev->netdev_ops = &at91ether_netdev_ops; 4325 dev->ethtool_ops = &macb_ethtool_ops; 4326 4327 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 4328 0, dev->name, dev); 4329 if (err) 4330 return err; 4331 4332 macb_writel(bp, NCR, 0); 4333 4334 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); 4335 4336 return 0; 4337 } 4338 4339 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, 4340 unsigned long parent_rate) 4341 { 4342 return mgmt->rate; 4343 } 4344 4345 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, 4346 unsigned long *parent_rate) 4347 { 4348 if (WARN_ON(rate < 2500000)) 4349 return 2500000; 4350 else if (rate == 2500000) 4351 return 2500000; 4352 else if (WARN_ON(rate < 13750000)) 4353 return 2500000; 4354 else if (WARN_ON(rate < 25000000)) 4355 return 25000000; 4356 else if (rate == 25000000) 4357 return 25000000; 4358 else if (WARN_ON(rate < 75000000)) 4359 return 25000000; 4360 else if (WARN_ON(rate < 125000000)) 4361 return 125000000; 4362 else if (rate == 125000000) 4363 return 125000000; 4364 4365 WARN_ON(rate > 125000000); 4366 4367 return 125000000; 4368 } 4369 4370 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, 4371 unsigned long parent_rate) 4372 { 4373 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); 4374 if (rate != 125000000) 4375 iowrite32(1, mgmt->reg); 4376 else 4377 iowrite32(0, mgmt->reg); 4378 mgmt->rate = rate; 4379 4380 return 0; 4381 } 4382 4383 static const struct clk_ops fu540_c000_ops = { 4384 .recalc_rate = fu540_macb_tx_recalc_rate, 4385 .round_rate = fu540_macb_tx_round_rate, 4386 .set_rate = fu540_macb_tx_set_rate, 4387 }; 4388 4389 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, 4390 struct clk **hclk, struct clk **tx_clk, 4391 struct clk **rx_clk, struct clk **tsu_clk) 4392 { 4393 struct clk_init_data init; 4394 int err = 0; 4395 4396 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); 4397 if (err) 4398 return err; 4399 4400 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); 4401 if (!mgmt) 4402 return -ENOMEM; 4403 4404 init.name = "sifive-gemgxl-mgmt"; 4405 init.ops = &fu540_c000_ops; 4406 init.flags = 0; 4407 init.num_parents = 0; 4408 4409 mgmt->rate = 0; 4410 mgmt->hw.init = &init; 4411 4412 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); 4413 if (IS_ERR(*tx_clk)) 4414 return PTR_ERR(*tx_clk); 4415 4416 err = clk_prepare_enable(*tx_clk); 4417 if (err) 4418 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 4419 else 4420 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); 4421 4422 return 0; 4423 } 4424 4425 static int fu540_c000_init(struct platform_device *pdev) 4426 { 4427 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); 4428 if (IS_ERR(mgmt->reg)) 4429 return PTR_ERR(mgmt->reg); 4430 4431 return macb_init(pdev); 4432 } 4433 4434 static const struct macb_config fu540_c000_config = { 4435 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | 4436 MACB_CAPS_GEM_HAS_PTP, 4437 .dma_burst_length = 16, 4438 .clk_init = fu540_c000_clk_init, 4439 .init = fu540_c000_init, 4440 .jumbo_max_len = 10240, 4441 }; 4442 4443 static const struct macb_config at91sam9260_config = { 4444 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4445 .clk_init = macb_clk_init, 4446 .init = macb_init, 4447 }; 4448 4449 static const struct macb_config sama5d3macb_config = { 4450 .caps = MACB_CAPS_SG_DISABLED 4451 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4452 .clk_init = macb_clk_init, 4453 .init = macb_init, 4454 }; 4455 4456 static const struct macb_config pc302gem_config = { 4457 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 4458 .dma_burst_length = 16, 4459 .clk_init = macb_clk_init, 4460 .init = macb_init, 4461 }; 4462 4463 static const struct macb_config sama5d2_config = { 4464 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4465 .dma_burst_length = 16, 4466 .clk_init = macb_clk_init, 4467 .init = macb_init, 4468 }; 4469 4470 static const struct macb_config sama5d3_config = { 4471 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE 4472 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, 4473 .dma_burst_length = 16, 4474 .clk_init = macb_clk_init, 4475 .init = macb_init, 4476 .jumbo_max_len = 10240, 4477 }; 4478 4479 static const struct macb_config sama5d4_config = { 4480 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, 4481 .dma_burst_length = 4, 4482 .clk_init = macb_clk_init, 4483 .init = macb_init, 4484 }; 4485 4486 static const struct macb_config emac_config = { 4487 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, 4488 .clk_init = at91ether_clk_init, 4489 .init = at91ether_init, 4490 }; 4491 4492 static const struct macb_config np4_config = { 4493 .caps = MACB_CAPS_USRIO_DISABLED, 4494 .clk_init = macb_clk_init, 4495 .init = macb_init, 4496 }; 4497 4498 static const struct macb_config zynqmp_config = { 4499 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4500 MACB_CAPS_JUMBO | 4501 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, 4502 .dma_burst_length = 16, 4503 .clk_init = macb_clk_init, 4504 .init = macb_init, 4505 .jumbo_max_len = 10240, 4506 }; 4507 4508 static const struct macb_config zynq_config = { 4509 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | 4510 MACB_CAPS_NEEDS_RSTONUBR, 4511 .dma_burst_length = 16, 4512 .clk_init = macb_clk_init, 4513 .init = macb_init, 4514 }; 4515 4516 static const struct of_device_id macb_dt_ids[] = { 4517 { .compatible = "cdns,at32ap7000-macb" }, 4518 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 4519 { .compatible = "cdns,macb" }, 4520 { .compatible = "cdns,np4-macb", .data = &np4_config }, 4521 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, 4522 { .compatible = "cdns,gem", .data = &pc302gem_config }, 4523 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, 4524 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 4525 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 4526 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, 4527 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 4528 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 4529 { .compatible = "cdns,emac", .data = &emac_config }, 4530 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4531 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4532 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, 4533 { /* sentinel */ } 4534 }; 4535 MODULE_DEVICE_TABLE(of, macb_dt_ids); 4536 #endif /* CONFIG_OF */ 4537 4538 static const struct macb_config default_gem_config = { 4539 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 4540 MACB_CAPS_JUMBO | 4541 MACB_CAPS_GEM_HAS_PTP, 4542 .dma_burst_length = 16, 4543 .clk_init = macb_clk_init, 4544 .init = macb_init, 4545 .jumbo_max_len = 10240, 4546 }; 4547 4548 static int macb_probe(struct platform_device *pdev) 4549 { 4550 const struct macb_config *macb_config = &default_gem_config; 4551 int (*clk_init)(struct platform_device *, struct clk **, 4552 struct clk **, struct clk **, struct clk **, 4553 struct clk **) = macb_config->clk_init; 4554 int (*init)(struct platform_device *) = macb_config->init; 4555 struct device_node *np = pdev->dev.of_node; 4556 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 4557 struct clk *tsu_clk = NULL; 4558 unsigned int queue_mask, num_queues; 4559 bool native_io; 4560 phy_interface_t interface; 4561 struct net_device *dev; 4562 struct resource *regs; 4563 void __iomem *mem; 4564 const char *mac; 4565 struct macb *bp; 4566 int err, val; 4567 4568 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4569 mem = devm_ioremap_resource(&pdev->dev, regs); 4570 if (IS_ERR(mem)) 4571 return PTR_ERR(mem); 4572 4573 if (np) { 4574 const struct of_device_id *match; 4575 4576 match = of_match_node(macb_dt_ids, np); 4577 if (match && match->data) { 4578 macb_config = match->data; 4579 clk_init = macb_config->clk_init; 4580 init = macb_config->init; 4581 } 4582 } 4583 4584 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 4585 if (err) 4586 return err; 4587 4588 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); 4589 pm_runtime_use_autosuspend(&pdev->dev); 4590 pm_runtime_get_noresume(&pdev->dev); 4591 pm_runtime_set_active(&pdev->dev); 4592 pm_runtime_enable(&pdev->dev); 4593 native_io = hw_is_native_io(mem); 4594 4595 macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 4596 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 4597 if (!dev) { 4598 err = -ENOMEM; 4599 goto err_disable_clocks; 4600 } 4601 4602 dev->base_addr = regs->start; 4603 4604 SET_NETDEV_DEV(dev, &pdev->dev); 4605 4606 bp = netdev_priv(dev); 4607 bp->pdev = pdev; 4608 bp->dev = dev; 4609 bp->regs = mem; 4610 bp->native_io = native_io; 4611 if (native_io) { 4612 bp->macb_reg_readl = hw_readl_native; 4613 bp->macb_reg_writel = hw_writel_native; 4614 } else { 4615 bp->macb_reg_readl = hw_readl; 4616 bp->macb_reg_writel = hw_writel; 4617 } 4618 bp->num_queues = num_queues; 4619 bp->queue_mask = queue_mask; 4620 if (macb_config) 4621 bp->dma_burst_length = macb_config->dma_burst_length; 4622 bp->pclk = pclk; 4623 bp->hclk = hclk; 4624 bp->tx_clk = tx_clk; 4625 bp->rx_clk = rx_clk; 4626 bp->tsu_clk = tsu_clk; 4627 if (macb_config) 4628 bp->jumbo_max_len = macb_config->jumbo_max_len; 4629 4630 bp->wol = 0; 4631 if (of_get_property(np, "magic-packet", NULL)) 4632 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; 4633 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 4634 4635 spin_lock_init(&bp->lock); 4636 4637 /* setup capabilities */ 4638 macb_configure_caps(bp, macb_config); 4639 4640 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 4641 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 4642 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 4643 bp->hw_dma_cap |= HW_DMA_CAP_64B; 4644 } 4645 #endif 4646 platform_set_drvdata(pdev, dev); 4647 4648 dev->irq = platform_get_irq(pdev, 0); 4649 if (dev->irq < 0) { 4650 err = dev->irq; 4651 goto err_out_free_netdev; 4652 } 4653 4654 /* MTU range: 68 - 1500 or 10240 */ 4655 dev->min_mtu = GEM_MTU_MIN_SIZE; 4656 if (bp->caps & MACB_CAPS_JUMBO) 4657 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; 4658 else 4659 dev->max_mtu = ETH_DATA_LEN; 4660 4661 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { 4662 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); 4663 if (val) 4664 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * 4665 macb_dma_desc_get_size(bp); 4666 4667 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); 4668 if (val) 4669 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * 4670 macb_dma_desc_get_size(bp); 4671 } 4672 4673 bp->rx_intr_mask = MACB_RX_INT_FLAGS; 4674 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) 4675 bp->rx_intr_mask |= MACB_BIT(RXUBR); 4676 4677 mac = of_get_mac_address(np); 4678 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4679 err = -EPROBE_DEFER; 4680 goto err_out_free_netdev; 4681 } else if (!IS_ERR_OR_NULL(mac)) { 4682 ether_addr_copy(bp->dev->dev_addr, mac); 4683 } else { 4684 macb_get_hwaddr(bp); 4685 } 4686 4687 err = of_get_phy_mode(np, &interface); 4688 if (err) 4689 /* not found in DT, MII by default */ 4690 bp->phy_interface = PHY_INTERFACE_MODE_MII; 4691 else 4692 bp->phy_interface = interface; 4693 4694 /* IP specific init */ 4695 err = init(pdev); 4696 if (err) 4697 goto err_out_free_netdev; 4698 4699 err = macb_mii_init(bp); 4700 if (err) 4701 goto err_out_free_netdev; 4702 4703 netif_carrier_off(dev); 4704 4705 err = register_netdev(dev); 4706 if (err) { 4707 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 4708 goto err_out_unregister_mdio; 4709 } 4710 4711 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); 4712 4713 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 4714 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 4715 dev->base_addr, dev->irq, dev->dev_addr); 4716 4717 pm_runtime_mark_last_busy(&bp->pdev->dev); 4718 pm_runtime_put_autosuspend(&bp->pdev->dev); 4719 4720 return 0; 4721 4722 err_out_unregister_mdio: 4723 mdiobus_unregister(bp->mii_bus); 4724 mdiobus_free(bp->mii_bus); 4725 4726 err_out_free_netdev: 4727 free_netdev(dev); 4728 4729 err_disable_clocks: 4730 clk_disable_unprepare(tx_clk); 4731 clk_disable_unprepare(hclk); 4732 clk_disable_unprepare(pclk); 4733 clk_disable_unprepare(rx_clk); 4734 clk_disable_unprepare(tsu_clk); 4735 pm_runtime_disable(&pdev->dev); 4736 pm_runtime_set_suspended(&pdev->dev); 4737 pm_runtime_dont_use_autosuspend(&pdev->dev); 4738 4739 return err; 4740 } 4741 4742 static int macb_remove(struct platform_device *pdev) 4743 { 4744 struct net_device *dev; 4745 struct macb *bp; 4746 4747 dev = platform_get_drvdata(pdev); 4748 4749 if (dev) { 4750 bp = netdev_priv(dev); 4751 mdiobus_unregister(bp->mii_bus); 4752 mdiobus_free(bp->mii_bus); 4753 4754 unregister_netdev(dev); 4755 tasklet_kill(&bp->hresp_err_tasklet); 4756 pm_runtime_disable(&pdev->dev); 4757 pm_runtime_dont_use_autosuspend(&pdev->dev); 4758 if (!pm_runtime_suspended(&pdev->dev)) { 4759 clk_disable_unprepare(bp->tx_clk); 4760 clk_disable_unprepare(bp->hclk); 4761 clk_disable_unprepare(bp->pclk); 4762 clk_disable_unprepare(bp->rx_clk); 4763 clk_disable_unprepare(bp->tsu_clk); 4764 pm_runtime_set_suspended(&pdev->dev); 4765 } 4766 phylink_destroy(bp->phylink); 4767 free_netdev(dev); 4768 } 4769 4770 return 0; 4771 } 4772 4773 static int __maybe_unused macb_suspend(struct device *dev) 4774 { 4775 struct net_device *netdev = dev_get_drvdata(dev); 4776 struct macb *bp = netdev_priv(netdev); 4777 struct macb_queue *queue = bp->queues; 4778 unsigned long flags; 4779 unsigned int q; 4780 int err; 4781 4782 if (!netif_running(netdev)) 4783 return 0; 4784 4785 if (bp->wol & MACB_WOL_ENABLED) { 4786 spin_lock_irqsave(&bp->lock, flags); 4787 /* Flush all status bits */ 4788 macb_writel(bp, TSR, -1); 4789 macb_writel(bp, RSR, -1); 4790 for (q = 0, queue = bp->queues; q < bp->num_queues; 4791 ++q, ++queue) { 4792 /* Disable all interrupts */ 4793 queue_writel(queue, IDR, -1); 4794 queue_readl(queue, ISR); 4795 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 4796 queue_writel(queue, ISR, -1); 4797 } 4798 /* Change interrupt handler and 4799 * Enable WoL IRQ on queue 0 4800 */ 4801 devm_free_irq(dev, bp->queues[0].irq, bp->queues); 4802 if (macb_is_gem(bp)) { 4803 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, 4804 IRQF_SHARED, netdev->name, bp->queues); 4805 if (err) { 4806 dev_err(dev, 4807 "Unable to request IRQ %d (error %d)\n", 4808 bp->queues[0].irq, err); 4809 spin_unlock_irqrestore(&bp->lock, flags); 4810 return err; 4811 } 4812 queue_writel(bp->queues, IER, GEM_BIT(WOL)); 4813 gem_writel(bp, WOL, MACB_BIT(MAG)); 4814 } else { 4815 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, 4816 IRQF_SHARED, netdev->name, bp->queues); 4817 if (err) { 4818 dev_err(dev, 4819 "Unable to request IRQ %d (error %d)\n", 4820 bp->queues[0].irq, err); 4821 spin_unlock_irqrestore(&bp->lock, flags); 4822 return err; 4823 } 4824 queue_writel(bp->queues, IER, MACB_BIT(WOL)); 4825 macb_writel(bp, WOL, MACB_BIT(MAG)); 4826 } 4827 spin_unlock_irqrestore(&bp->lock, flags); 4828 4829 enable_irq_wake(bp->queues[0].irq); 4830 } 4831 4832 netif_device_detach(netdev); 4833 for (q = 0, queue = bp->queues; q < bp->num_queues; 4834 ++q, ++queue) 4835 napi_disable(&queue->napi); 4836 4837 if (!(bp->wol & MACB_WOL_ENABLED)) { 4838 rtnl_lock(); 4839 phylink_stop(bp->phylink); 4840 rtnl_unlock(); 4841 spin_lock_irqsave(&bp->lock, flags); 4842 macb_reset_hw(bp); 4843 spin_unlock_irqrestore(&bp->lock, flags); 4844 } 4845 4846 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4847 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); 4848 4849 if (netdev->hw_features & NETIF_F_NTUPLE) 4850 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); 4851 4852 if (bp->ptp_info) 4853 bp->ptp_info->ptp_remove(netdev); 4854 if (!device_may_wakeup(dev)) 4855 pm_runtime_force_suspend(dev); 4856 4857 return 0; 4858 } 4859 4860 static int __maybe_unused macb_resume(struct device *dev) 4861 { 4862 struct net_device *netdev = dev_get_drvdata(dev); 4863 struct macb *bp = netdev_priv(netdev); 4864 struct macb_queue *queue = bp->queues; 4865 unsigned long flags; 4866 unsigned int q; 4867 int err; 4868 4869 if (!netif_running(netdev)) 4870 return 0; 4871 4872 if (!device_may_wakeup(dev)) 4873 pm_runtime_force_resume(dev); 4874 4875 if (bp->wol & MACB_WOL_ENABLED) { 4876 spin_lock_irqsave(&bp->lock, flags); 4877 /* Disable WoL */ 4878 if (macb_is_gem(bp)) { 4879 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); 4880 gem_writel(bp, WOL, 0); 4881 } else { 4882 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); 4883 macb_writel(bp, WOL, 0); 4884 } 4885 /* Clear ISR on queue 0 */ 4886 queue_readl(bp->queues, ISR); 4887 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 4888 queue_writel(bp->queues, ISR, -1); 4889 /* Replace interrupt handler on queue 0 */ 4890 devm_free_irq(dev, bp->queues[0].irq, bp->queues); 4891 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, 4892 IRQF_SHARED, netdev->name, bp->queues); 4893 if (err) { 4894 dev_err(dev, 4895 "Unable to request IRQ %d (error %d)\n", 4896 bp->queues[0].irq, err); 4897 spin_unlock_irqrestore(&bp->lock, flags); 4898 return err; 4899 } 4900 spin_unlock_irqrestore(&bp->lock, flags); 4901 4902 disable_irq_wake(bp->queues[0].irq); 4903 4904 /* Now make sure we disable phy before moving 4905 * to common restore path 4906 */ 4907 rtnl_lock(); 4908 phylink_stop(bp->phylink); 4909 rtnl_unlock(); 4910 } 4911 4912 for (q = 0, queue = bp->queues; q < bp->num_queues; 4913 ++q, ++queue) 4914 napi_enable(&queue->napi); 4915 4916 if (netdev->hw_features & NETIF_F_NTUPLE) 4917 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); 4918 4919 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 4920 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); 4921 4922 macb_writel(bp, NCR, MACB_BIT(MPE)); 4923 macb_init_hw(bp); 4924 macb_set_rx_mode(netdev); 4925 macb_restore_features(bp); 4926 rtnl_lock(); 4927 phylink_start(bp->phylink); 4928 rtnl_unlock(); 4929 4930 netif_device_attach(netdev); 4931 if (bp->ptp_info) 4932 bp->ptp_info->ptp_init(netdev); 4933 4934 return 0; 4935 } 4936 4937 static int __maybe_unused macb_runtime_suspend(struct device *dev) 4938 { 4939 struct net_device *netdev = dev_get_drvdata(dev); 4940 struct macb *bp = netdev_priv(netdev); 4941 4942 if (!(device_may_wakeup(dev))) { 4943 clk_disable_unprepare(bp->tx_clk); 4944 clk_disable_unprepare(bp->hclk); 4945 clk_disable_unprepare(bp->pclk); 4946 clk_disable_unprepare(bp->rx_clk); 4947 } 4948 clk_disable_unprepare(bp->tsu_clk); 4949 4950 return 0; 4951 } 4952 4953 static int __maybe_unused macb_runtime_resume(struct device *dev) 4954 { 4955 struct net_device *netdev = dev_get_drvdata(dev); 4956 struct macb *bp = netdev_priv(netdev); 4957 4958 if (!(device_may_wakeup(dev))) { 4959 clk_prepare_enable(bp->pclk); 4960 clk_prepare_enable(bp->hclk); 4961 clk_prepare_enable(bp->tx_clk); 4962 clk_prepare_enable(bp->rx_clk); 4963 } 4964 clk_prepare_enable(bp->tsu_clk); 4965 4966 return 0; 4967 } 4968 4969 static const struct dev_pm_ops macb_pm_ops = { 4970 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) 4971 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) 4972 }; 4973 4974 static struct platform_driver macb_driver = { 4975 .probe = macb_probe, 4976 .remove = macb_remove, 4977 .driver = { 4978 .name = "macb", 4979 .of_match_table = of_match_ptr(macb_dt_ids), 4980 .pm = &macb_pm_ops, 4981 }, 4982 }; 4983 4984 module_platform_driver(macb_driver); 4985 4986 MODULE_LICENSE("GPL"); 4987 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 4988 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 4989 MODULE_ALIAS("platform:macb"); 4990