1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Faraday FTGMAC100 Gigabit Ethernet 4 * 5 * (C) Copyright 2009-2011 Faraday Technology 6 * Po-Yu Chuang <ratbert@faraday-tech.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/clk.h> 12 #include <linux/reset.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/etherdevice.h> 15 #include <linux/ethtool.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_mdio.h> 22 #include <linux/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/property.h> 25 #include <linux/crc32.h> 26 #include <linux/if_vlan.h> 27 #include <linux/of_net.h> 28 #include <linux/phy_fixed.h> 29 #include <net/ip.h> 30 #include <net/ncsi.h> 31 32 #include "ftgmac100.h" 33 34 #define DRV_NAME "ftgmac100" 35 36 enum ftgmac100_mac_id { 37 FTGMAC100_FARADAY = 1, 38 FTGMAC100_AST2400, 39 FTGMAC100_AST2500, 40 FTGMAC100_AST2600 41 }; 42 43 struct ftgmac100_match_data { 44 enum ftgmac100_mac_id mac_id; 45 }; 46 47 /* Arbitrary values, I am not sure the HW has limits */ 48 #define MAX_RX_QUEUE_ENTRIES 1024 49 #define MAX_TX_QUEUE_ENTRIES 1024 50 #define MIN_RX_QUEUE_ENTRIES 32 51 #define MIN_TX_QUEUE_ENTRIES 32 52 53 /* Defaults */ 54 #define DEF_RX_QUEUE_ENTRIES 128 55 #define DEF_TX_QUEUE_ENTRIES 128 56 57 #define MAX_PKT_SIZE 1536 58 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ 59 60 /* Min number of tx ring entries before stopping queue */ 61 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) 62 63 #define FTGMAC_100MHZ 100000000 64 #define FTGMAC_25MHZ 25000000 65 66 /* For NC-SI to register a fixed-link phy device */ 67 static struct fixed_phy_status ncsi_phy_status = { 68 .link = 1, 69 .speed = SPEED_100, 70 .duplex = DUPLEX_FULL, 71 .pause = 0, 72 .asym_pause = 0 73 }; 74 75 struct ftgmac100 { 76 /* Registers */ 77 struct resource *res; 78 void __iomem *base; 79 80 enum ftgmac100_mac_id mac_id; 81 82 /* Rx ring */ 83 unsigned int rx_q_entries; 84 struct ftgmac100_rxdes *rxdes; 85 dma_addr_t rxdes_dma; 86 struct sk_buff **rx_skbs; 87 unsigned int rx_pointer; 88 u32 rxdes0_edorr_mask; 89 90 /* Tx ring */ 91 unsigned int tx_q_entries; 92 struct ftgmac100_txdes *txdes; 93 dma_addr_t txdes_dma; 94 struct sk_buff **tx_skbs; 95 unsigned int tx_clean_pointer; 96 unsigned int tx_pointer; 97 u32 txdes0_edotr_mask; 98 99 /* Used to signal the reset task of ring change request */ 100 unsigned int new_rx_q_entries; 101 unsigned int new_tx_q_entries; 102 103 /* Scratch page to use when rx skb alloc fails */ 104 void *rx_scratch; 105 dma_addr_t rx_scratch_dma; 106 107 /* Component structures */ 108 struct net_device *netdev; 109 struct device *dev; 110 struct ncsi_dev *ndev; 111 struct napi_struct napi; 112 struct work_struct reset_task; 113 struct mii_bus *mii_bus; 114 struct clk *clk; 115 116 /* AST2500/AST2600 RMII ref clock gate */ 117 struct clk *rclk; 118 /* Aspeed reset control */ 119 struct reset_control *rst; 120 121 /* Link management */ 122 int cur_speed; 123 int cur_duplex; 124 bool use_ncsi; 125 126 /* Multicast filter settings */ 127 u32 maht0; 128 u32 maht1; 129 130 /* Flow control settings */ 131 bool tx_pause; 132 bool rx_pause; 133 bool aneg_pause; 134 135 /* Misc */ 136 bool need_mac_restart; 137 bool is_aspeed; 138 }; 139 140 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) 141 { 142 struct net_device *netdev = priv->netdev; 143 int i; 144 145 /* NOTE: reset clears all registers */ 146 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 147 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 148 priv->base + FTGMAC100_OFFSET_MACCR); 149 for (i = 0; i < 200; i++) { 150 unsigned int maccr; 151 152 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 153 if (!(maccr & FTGMAC100_MACCR_SW_RST)) 154 return 0; 155 156 udelay(1); 157 } 158 159 netdev_err(netdev, "Hardware reset failed\n"); 160 return -EIO; 161 } 162 163 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) 164 { 165 u32 maccr = 0; 166 167 /* Aspeed RMII needs SCU reset to clear status */ 168 if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) { 169 int err; 170 171 err = reset_control_assert(priv->rst); 172 if (err) { 173 dev_err(priv->dev, "Failed to reset mac (%d)\n", err); 174 return err; 175 } 176 usleep_range(10000, 20000); 177 err = reset_control_deassert(priv->rst); 178 if (err) { 179 dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err); 180 return err; 181 } 182 } 183 184 switch (priv->cur_speed) { 185 case SPEED_10: 186 case 0: /* no link */ 187 break; 188 189 case SPEED_100: 190 maccr |= FTGMAC100_MACCR_FAST_MODE; 191 break; 192 193 case SPEED_1000: 194 maccr |= FTGMAC100_MACCR_GIGA_MODE; 195 break; 196 default: 197 netdev_err(priv->netdev, "Unknown speed %d !\n", 198 priv->cur_speed); 199 break; 200 } 201 202 /* (Re)initialize the queue pointers */ 203 priv->rx_pointer = 0; 204 priv->tx_clean_pointer = 0; 205 priv->tx_pointer = 0; 206 207 /* The doc says reset twice with 10us interval */ 208 if (ftgmac100_reset_mac(priv, maccr)) 209 return -EIO; 210 usleep_range(10, 1000); 211 return ftgmac100_reset_mac(priv, maccr); 212 } 213 214 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) 215 { 216 unsigned int maddr = mac[0] << 8 | mac[1]; 217 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 218 219 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); 220 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); 221 } 222 223 static int ftgmac100_initial_mac(struct ftgmac100 *priv) 224 { 225 u8 mac[ETH_ALEN]; 226 unsigned int m; 227 unsigned int l; 228 int err; 229 230 err = of_get_ethdev_address(priv->dev->of_node, priv->netdev); 231 if (err == -EPROBE_DEFER) 232 return err; 233 if (!err) { 234 dev_info(priv->dev, "Read MAC address %pM from device tree\n", 235 priv->netdev->dev_addr); 236 return 0; 237 } 238 239 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); 240 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); 241 242 mac[0] = (m >> 8) & 0xff; 243 mac[1] = m & 0xff; 244 mac[2] = (l >> 24) & 0xff; 245 mac[3] = (l >> 16) & 0xff; 246 mac[4] = (l >> 8) & 0xff; 247 mac[5] = l & 0xff; 248 249 if (is_valid_ether_addr(mac)) { 250 eth_hw_addr_set(priv->netdev, mac); 251 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); 252 } else { 253 eth_hw_addr_random(priv->netdev); 254 dev_info(priv->dev, "Generated random MAC address %pM\n", 255 priv->netdev->dev_addr); 256 } 257 258 return 0; 259 } 260 261 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) 262 { 263 int ret; 264 265 ret = eth_prepare_mac_addr_change(dev, p); 266 if (ret < 0) 267 return ret; 268 269 eth_commit_mac_addr_change(dev, p); 270 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); 271 272 return 0; 273 } 274 275 static void ftgmac100_config_pause(struct ftgmac100 *priv) 276 { 277 u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16); 278 279 /* Throttle tx queue when receiving pause frames */ 280 if (priv->rx_pause) 281 fcr |= FTGMAC100_FCR_FC_EN; 282 283 /* Enables sending pause frames when the RX queue is past a 284 * certain threshold. 285 */ 286 if (priv->tx_pause) 287 fcr |= FTGMAC100_FCR_FCTHR_EN; 288 289 iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR); 290 } 291 292 static void ftgmac100_init_hw(struct ftgmac100 *priv) 293 { 294 u32 reg, rfifo_sz, tfifo_sz; 295 296 /* Clear stale interrupts */ 297 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 298 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); 299 300 /* Setup RX ring buffer base */ 301 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); 302 303 /* Setup TX ring buffer base */ 304 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); 305 306 /* Configure RX buffer size */ 307 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), 308 priv->base + FTGMAC100_OFFSET_RBSR); 309 310 /* Set RX descriptor autopoll */ 311 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), 312 priv->base + FTGMAC100_OFFSET_APTC); 313 314 /* Write MAC address */ 315 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); 316 317 /* Write multicast filter */ 318 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 319 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 320 321 /* Configure descriptor sizes and increase burst sizes according 322 * to values in Aspeed SDK. The FIFO arbitration is enabled and 323 * the thresholds set based on the recommended values in the 324 * AST2400 specification. 325 */ 326 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ 327 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ 328 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ 329 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ 330 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ 331 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ 332 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ 333 priv->base + FTGMAC100_OFFSET_DBLAC); 334 335 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt 336 * mitigation doesn't seem to provide any benefit with NAPI so leave 337 * it at that. 338 */ 339 iowrite32(FTGMAC100_ITC_RXINT_THR(1) | 340 FTGMAC100_ITC_TXINT_THR(1), 341 priv->base + FTGMAC100_OFFSET_ITC); 342 343 /* Configure FIFO sizes in the TPAFCR register */ 344 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); 345 rfifo_sz = reg & 0x00000007; 346 tfifo_sz = (reg >> 3) & 0x00000007; 347 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); 348 reg &= ~0x3f000000; 349 reg |= (tfifo_sz << 27); 350 reg |= (rfifo_sz << 24); 351 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); 352 } 353 354 static void ftgmac100_start_hw(struct ftgmac100 *priv) 355 { 356 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 357 358 /* Keep the original GMAC and FAST bits */ 359 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); 360 361 /* Add all the main enable bits */ 362 maccr |= FTGMAC100_MACCR_TXDMA_EN | 363 FTGMAC100_MACCR_RXDMA_EN | 364 FTGMAC100_MACCR_TXMAC_EN | 365 FTGMAC100_MACCR_RXMAC_EN | 366 FTGMAC100_MACCR_CRC_APD | 367 FTGMAC100_MACCR_PHY_LINK_LEVEL | 368 FTGMAC100_MACCR_RX_RUNT | 369 FTGMAC100_MACCR_RX_BROADPKT; 370 371 /* Add other bits as needed */ 372 if (priv->cur_duplex == DUPLEX_FULL) 373 maccr |= FTGMAC100_MACCR_FULLDUP; 374 if (priv->netdev->flags & IFF_PROMISC) 375 maccr |= FTGMAC100_MACCR_RX_ALL; 376 if (priv->netdev->flags & IFF_ALLMULTI) 377 maccr |= FTGMAC100_MACCR_RX_MULTIPKT; 378 else if (netdev_mc_count(priv->netdev)) 379 maccr |= FTGMAC100_MACCR_HT_MULTI_EN; 380 381 /* Vlan filtering enabled */ 382 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 383 maccr |= FTGMAC100_MACCR_RM_VLAN; 384 385 /* Hit the HW */ 386 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 387 } 388 389 static void ftgmac100_stop_hw(struct ftgmac100 *priv) 390 { 391 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); 392 } 393 394 static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv) 395 { 396 struct netdev_hw_addr *ha; 397 398 priv->maht1 = 0; 399 priv->maht0 = 0; 400 netdev_for_each_mc_addr(ha, priv->netdev) { 401 u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr); 402 403 crc_val = (~(crc_val >> 2)) & 0x3f; 404 if (crc_val >= 32) 405 priv->maht1 |= 1ul << (crc_val - 32); 406 else 407 priv->maht0 |= 1ul << (crc_val); 408 } 409 } 410 411 static void ftgmac100_set_rx_mode(struct net_device *netdev) 412 { 413 struct ftgmac100 *priv = netdev_priv(netdev); 414 415 /* Setup the hash filter */ 416 ftgmac100_calc_mc_hash(priv); 417 418 /* Interface down ? that's all there is to do */ 419 if (!netif_running(netdev)) 420 return; 421 422 /* Update the HW */ 423 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); 424 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); 425 426 /* Reconfigure MACCR */ 427 ftgmac100_start_hw(priv); 428 } 429 430 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, 431 struct ftgmac100_rxdes *rxdes, gfp_t gfp) 432 { 433 struct net_device *netdev = priv->netdev; 434 struct sk_buff *skb; 435 dma_addr_t map; 436 int err = 0; 437 438 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 439 if (unlikely(!skb)) { 440 if (net_ratelimit()) 441 netdev_warn(netdev, "failed to allocate rx skb\n"); 442 err = -ENOMEM; 443 map = priv->rx_scratch_dma; 444 } else { 445 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, 446 DMA_FROM_DEVICE); 447 if (unlikely(dma_mapping_error(priv->dev, map))) { 448 if (net_ratelimit()) 449 netdev_err(netdev, "failed to map rx page\n"); 450 dev_kfree_skb_any(skb); 451 map = priv->rx_scratch_dma; 452 skb = NULL; 453 err = -ENOMEM; 454 } 455 } 456 457 /* Store skb */ 458 priv->rx_skbs[entry] = skb; 459 460 /* Store DMA address into RX desc */ 461 rxdes->rxdes3 = cpu_to_le32(map); 462 463 /* Ensure the above is ordered vs clearing the OWN bit */ 464 dma_wmb(); 465 466 /* Clean status (which resets own bit) */ 467 if (entry == (priv->rx_q_entries - 1)) 468 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); 469 else 470 rxdes->rxdes0 = 0; 471 472 return err; 473 } 474 475 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 476 unsigned int pointer) 477 { 478 return (pointer + 1) & (priv->rx_q_entries - 1); 479 } 480 481 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) 482 { 483 struct net_device *netdev = priv->netdev; 484 485 if (status & FTGMAC100_RXDES0_RX_ERR) 486 netdev->stats.rx_errors++; 487 488 if (status & FTGMAC100_RXDES0_CRC_ERR) 489 netdev->stats.rx_crc_errors++; 490 491 if (status & (FTGMAC100_RXDES0_FTL | 492 FTGMAC100_RXDES0_RUNT | 493 FTGMAC100_RXDES0_RX_ODD_NB)) 494 netdev->stats.rx_length_errors++; 495 } 496 497 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) 498 { 499 struct net_device *netdev = priv->netdev; 500 struct ftgmac100_rxdes *rxdes; 501 struct sk_buff *skb; 502 unsigned int pointer, size; 503 u32 status, csum_vlan; 504 dma_addr_t map; 505 506 /* Grab next RX descriptor */ 507 pointer = priv->rx_pointer; 508 rxdes = &priv->rxdes[pointer]; 509 510 /* Grab descriptor status */ 511 status = le32_to_cpu(rxdes->rxdes0); 512 513 /* Do we have a packet ? */ 514 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) 515 return false; 516 517 /* Order subsequent reads with the test for the ready bit */ 518 dma_rmb(); 519 520 /* We don't cope with fragmented RX packets */ 521 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || 522 !(status & FTGMAC100_RXDES0_LRS))) 523 goto drop; 524 525 /* Grab received size and csum vlan field in the descriptor */ 526 size = status & FTGMAC100_RXDES0_VDBC; 527 csum_vlan = le32_to_cpu(rxdes->rxdes1); 528 529 /* Any error (other than csum offload) flagged ? */ 530 if (unlikely(status & RXDES0_ANY_ERROR)) { 531 /* Correct for incorrect flagging of runt packets 532 * with vlan tags... Just accept a runt packet that 533 * has been flagged as vlan and whose size is at 534 * least 60 bytes. 535 */ 536 if ((status & FTGMAC100_RXDES0_RUNT) && 537 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && 538 (size >= 60)) 539 status &= ~FTGMAC100_RXDES0_RUNT; 540 541 /* Any error still in there ? */ 542 if (status & RXDES0_ANY_ERROR) { 543 ftgmac100_rx_packet_error(priv, status); 544 goto drop; 545 } 546 } 547 548 /* If the packet had no skb (failed to allocate earlier) 549 * then try to allocate one and skip 550 */ 551 skb = priv->rx_skbs[pointer]; 552 if (!unlikely(skb)) { 553 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 554 goto drop; 555 } 556 557 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) 558 netdev->stats.multicast++; 559 560 /* If the HW found checksum errors, bounce it to software. 561 * 562 * If we didn't, we need to see if the packet was recognized 563 * by HW as one of the supported checksummed protocols before 564 * we accept the HW test results. 565 */ 566 if (netdev->features & NETIF_F_RXCSUM) { 567 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | 568 FTGMAC100_RXDES1_UDP_CHKSUM_ERR | 569 FTGMAC100_RXDES1_IP_CHKSUM_ERR; 570 if ((csum_vlan & err_bits) || 571 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) 572 skb->ip_summed = CHECKSUM_NONE; 573 else 574 skb->ip_summed = CHECKSUM_UNNECESSARY; 575 } 576 577 /* Transfer received size to skb */ 578 skb_put(skb, size); 579 580 /* Extract vlan tag */ 581 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 582 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL)) 583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 584 csum_vlan & 0xffff); 585 586 /* Tear down DMA mapping, do necessary cache management */ 587 map = le32_to_cpu(rxdes->rxdes3); 588 589 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) 590 /* When we don't have an iommu, we can save cycles by not 591 * invalidating the cache for the part of the packet that 592 * wasn't received. 593 */ 594 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); 595 #else 596 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 597 #endif 598 599 600 /* Resplenish rx ring */ 601 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); 602 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 603 604 skb->protocol = eth_type_trans(skb, netdev); 605 606 netdev->stats.rx_packets++; 607 netdev->stats.rx_bytes += size; 608 609 /* push packet to protocol stack */ 610 if (skb->ip_summed == CHECKSUM_NONE) 611 netif_receive_skb(skb); 612 else 613 napi_gro_receive(&priv->napi, skb); 614 615 (*processed)++; 616 return true; 617 618 drop: 619 /* Clean rxdes0 (which resets own bit) */ 620 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); 621 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); 622 netdev->stats.rx_dropped++; 623 return true; 624 } 625 626 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, 627 unsigned int index) 628 { 629 if (index == (priv->tx_q_entries - 1)) 630 return priv->txdes0_edotr_mask; 631 else 632 return 0; 633 } 634 635 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, 636 unsigned int pointer) 637 { 638 return (pointer + 1) & (priv->tx_q_entries - 1); 639 } 640 641 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) 642 { 643 /* Returns the number of available slots in the TX queue 644 * 645 * This always leaves one free slot so we don't have to 646 * worry about empty vs. full, and this simplifies the 647 * test for ftgmac100_tx_buf_cleanable() below 648 */ 649 return (priv->tx_clean_pointer - priv->tx_pointer - 1) & 650 (priv->tx_q_entries - 1); 651 } 652 653 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) 654 { 655 return priv->tx_pointer != priv->tx_clean_pointer; 656 } 657 658 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, 659 unsigned int pointer, 660 struct sk_buff *skb, 661 struct ftgmac100_txdes *txdes, 662 u32 ctl_stat) 663 { 664 dma_addr_t map = le32_to_cpu(txdes->txdes3); 665 size_t len; 666 667 if (ctl_stat & FTGMAC100_TXDES0_FTS) { 668 len = skb_headlen(skb); 669 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); 670 } else { 671 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); 672 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); 673 } 674 675 /* Free SKB on last segment */ 676 if (ctl_stat & FTGMAC100_TXDES0_LTS) 677 dev_kfree_skb(skb); 678 priv->tx_skbs[pointer] = NULL; 679 } 680 681 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) 682 { 683 struct net_device *netdev = priv->netdev; 684 struct ftgmac100_txdes *txdes; 685 struct sk_buff *skb; 686 unsigned int pointer; 687 u32 ctl_stat; 688 689 pointer = priv->tx_clean_pointer; 690 txdes = &priv->txdes[pointer]; 691 692 ctl_stat = le32_to_cpu(txdes->txdes0); 693 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) 694 return false; 695 696 skb = priv->tx_skbs[pointer]; 697 netdev->stats.tx_packets++; 698 netdev->stats.tx_bytes += skb->len; 699 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 700 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 701 702 /* Ensure the descriptor config is visible before setting the tx 703 * pointer. 704 */ 705 smp_wmb(); 706 707 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); 708 709 return true; 710 } 711 712 static void ftgmac100_tx_complete(struct ftgmac100 *priv) 713 { 714 struct net_device *netdev = priv->netdev; 715 716 /* Process all completed packets */ 717 while (ftgmac100_tx_buf_cleanable(priv) && 718 ftgmac100_tx_complete_packet(priv)) 719 ; 720 721 /* Restart queue if needed */ 722 smp_mb(); 723 if (unlikely(netif_queue_stopped(netdev) && 724 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { 725 struct netdev_queue *txq; 726 727 txq = netdev_get_tx_queue(netdev, 0); 728 __netif_tx_lock(txq, smp_processor_id()); 729 if (netif_queue_stopped(netdev) && 730 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 731 netif_wake_queue(netdev); 732 __netif_tx_unlock(txq); 733 } 734 } 735 736 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) 737 { 738 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 739 u8 ip_proto = ip_hdr(skb)->protocol; 740 741 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; 742 switch(ip_proto) { 743 case IPPROTO_TCP: 744 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; 745 return true; 746 case IPPROTO_UDP: 747 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; 748 return true; 749 case IPPROTO_IP: 750 return true; 751 } 752 } 753 return skb_checksum_help(skb) == 0; 754 } 755 756 static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, 757 struct net_device *netdev) 758 { 759 struct ftgmac100 *priv = netdev_priv(netdev); 760 struct ftgmac100_txdes *txdes, *first; 761 unsigned int pointer, nfrags, len, i, j; 762 u32 f_ctl_stat, ctl_stat, csum_vlan; 763 dma_addr_t map; 764 765 /* The HW doesn't pad small frames */ 766 if (eth_skb_pad(skb)) { 767 netdev->stats.tx_dropped++; 768 return NETDEV_TX_OK; 769 } 770 771 /* Reject oversize packets */ 772 if (unlikely(skb->len > MAX_PKT_SIZE)) { 773 if (net_ratelimit()) 774 netdev_dbg(netdev, "tx packet too big\n"); 775 goto drop; 776 } 777 778 /* Do we have a limit on #fragments ? I yet have to get a reply 779 * from Aspeed. If there's one I haven't hit it. 780 */ 781 nfrags = skb_shinfo(skb)->nr_frags; 782 783 /* Setup HW checksumming */ 784 csum_vlan = 0; 785 if (skb->ip_summed == CHECKSUM_PARTIAL && 786 !ftgmac100_prep_tx_csum(skb, &csum_vlan)) 787 goto drop; 788 789 /* Add VLAN tag */ 790 if (skb_vlan_tag_present(skb)) { 791 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; 792 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; 793 } 794 795 /* Get header len */ 796 len = skb_headlen(skb); 797 798 /* Map the packet head */ 799 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); 800 if (dma_mapping_error(priv->dev, map)) { 801 if (net_ratelimit()) 802 netdev_err(netdev, "map tx packet head failed\n"); 803 goto drop; 804 } 805 806 /* Grab the next free tx descriptor */ 807 pointer = priv->tx_pointer; 808 txdes = first = &priv->txdes[pointer]; 809 810 /* Setup it up with the packet head. Don't write the head to the 811 * ring just yet 812 */ 813 priv->tx_skbs[pointer] = skb; 814 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 815 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 816 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 817 f_ctl_stat |= FTGMAC100_TXDES0_FTS; 818 if (nfrags == 0) 819 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 820 txdes->txdes3 = cpu_to_le32(map); 821 txdes->txdes1 = cpu_to_le32(csum_vlan); 822 823 /* Next descriptor */ 824 pointer = ftgmac100_next_tx_pointer(priv, pointer); 825 826 /* Add the fragments */ 827 for (i = 0; i < nfrags; i++) { 828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 829 830 len = skb_frag_size(frag); 831 832 /* Map it */ 833 map = skb_frag_dma_map(priv->dev, frag, 0, len, 834 DMA_TO_DEVICE); 835 if (dma_mapping_error(priv->dev, map)) 836 goto dma_err; 837 838 /* Setup descriptor */ 839 priv->tx_skbs[pointer] = skb; 840 txdes = &priv->txdes[pointer]; 841 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); 842 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; 843 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); 844 if (i == (nfrags - 1)) 845 ctl_stat |= FTGMAC100_TXDES0_LTS; 846 txdes->txdes0 = cpu_to_le32(ctl_stat); 847 txdes->txdes1 = 0; 848 txdes->txdes3 = cpu_to_le32(map); 849 850 /* Next one */ 851 pointer = ftgmac100_next_tx_pointer(priv, pointer); 852 } 853 854 /* Order the previous packet and descriptor udpates 855 * before setting the OWN bit on the first descriptor. 856 */ 857 dma_wmb(); 858 first->txdes0 = cpu_to_le32(f_ctl_stat); 859 860 /* Ensure the descriptor config is visible before setting the tx 861 * pointer. 862 */ 863 smp_wmb(); 864 865 /* Update next TX pointer */ 866 priv->tx_pointer = pointer; 867 868 /* If there isn't enough room for all the fragments of a new packet 869 * in the TX ring, stop the queue. The sequence below is race free 870 * vs. a concurrent restart in ftgmac100_poll() 871 */ 872 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { 873 netif_stop_queue(netdev); 874 /* Order the queue stop with the test below */ 875 smp_mb(); 876 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) 877 netif_wake_queue(netdev); 878 } 879 880 /* Poke transmitter to read the updated TX descriptors */ 881 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); 882 883 return NETDEV_TX_OK; 884 885 dma_err: 886 if (net_ratelimit()) 887 netdev_err(netdev, "map tx fragment failed\n"); 888 889 /* Free head */ 890 pointer = priv->tx_pointer; 891 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); 892 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); 893 894 /* Then all fragments */ 895 for (j = 0; j < i; j++) { 896 pointer = ftgmac100_next_tx_pointer(priv, pointer); 897 txdes = &priv->txdes[pointer]; 898 ctl_stat = le32_to_cpu(txdes->txdes0); 899 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); 900 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); 901 } 902 903 /* This cannot be reached if we successfully mapped the 904 * last fragment, so we know ftgmac100_free_tx_packet() 905 * hasn't freed the skb yet. 906 */ 907 drop: 908 /* Drop the packet */ 909 dev_kfree_skb_any(skb); 910 netdev->stats.tx_dropped++; 911 912 return NETDEV_TX_OK; 913 } 914 915 static void ftgmac100_free_buffers(struct ftgmac100 *priv) 916 { 917 int i; 918 919 /* Free all RX buffers */ 920 for (i = 0; i < priv->rx_q_entries; i++) { 921 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 922 struct sk_buff *skb = priv->rx_skbs[i]; 923 dma_addr_t map = le32_to_cpu(rxdes->rxdes3); 924 925 if (!skb) 926 continue; 927 928 priv->rx_skbs[i] = NULL; 929 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 930 dev_kfree_skb_any(skb); 931 } 932 933 /* Free all TX buffers */ 934 for (i = 0; i < priv->tx_q_entries; i++) { 935 struct ftgmac100_txdes *txdes = &priv->txdes[i]; 936 struct sk_buff *skb = priv->tx_skbs[i]; 937 938 if (!skb) 939 continue; 940 ftgmac100_free_tx_packet(priv, i, skb, txdes, 941 le32_to_cpu(txdes->txdes0)); 942 } 943 } 944 945 static void ftgmac100_free_rings(struct ftgmac100 *priv) 946 { 947 /* Free skb arrays */ 948 kfree(priv->rx_skbs); 949 kfree(priv->tx_skbs); 950 951 /* Free descriptors */ 952 if (priv->rxdes) 953 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * 954 sizeof(struct ftgmac100_rxdes), 955 priv->rxdes, priv->rxdes_dma); 956 priv->rxdes = NULL; 957 958 if (priv->txdes) 959 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * 960 sizeof(struct ftgmac100_txdes), 961 priv->txdes, priv->txdes_dma); 962 priv->txdes = NULL; 963 964 /* Free scratch packet buffer */ 965 if (priv->rx_scratch) 966 dma_free_coherent(priv->dev, RX_BUF_SIZE, 967 priv->rx_scratch, priv->rx_scratch_dma); 968 } 969 970 static int ftgmac100_alloc_rings(struct ftgmac100 *priv) 971 { 972 /* Allocate skb arrays */ 973 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), 974 GFP_KERNEL); 975 if (!priv->rx_skbs) 976 return -ENOMEM; 977 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), 978 GFP_KERNEL); 979 if (!priv->tx_skbs) 980 return -ENOMEM; 981 982 /* Allocate descriptors */ 983 priv->rxdes = dma_alloc_coherent(priv->dev, 984 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), 985 &priv->rxdes_dma, GFP_KERNEL); 986 if (!priv->rxdes) 987 return -ENOMEM; 988 priv->txdes = dma_alloc_coherent(priv->dev, 989 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), 990 &priv->txdes_dma, GFP_KERNEL); 991 if (!priv->txdes) 992 return -ENOMEM; 993 994 /* Allocate scratch packet buffer */ 995 priv->rx_scratch = dma_alloc_coherent(priv->dev, 996 RX_BUF_SIZE, 997 &priv->rx_scratch_dma, 998 GFP_KERNEL); 999 if (!priv->rx_scratch) 1000 return -ENOMEM; 1001 1002 return 0; 1003 } 1004 1005 static void ftgmac100_init_rings(struct ftgmac100 *priv) 1006 { 1007 struct ftgmac100_rxdes *rxdes = NULL; 1008 struct ftgmac100_txdes *txdes = NULL; 1009 int i; 1010 1011 /* Update entries counts */ 1012 priv->rx_q_entries = priv->new_rx_q_entries; 1013 priv->tx_q_entries = priv->new_tx_q_entries; 1014 1015 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) 1016 return; 1017 1018 /* Initialize RX ring */ 1019 for (i = 0; i < priv->rx_q_entries; i++) { 1020 rxdes = &priv->rxdes[i]; 1021 rxdes->rxdes0 = 0; 1022 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); 1023 } 1024 /* Mark the end of the ring */ 1025 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); 1026 1027 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) 1028 return; 1029 1030 /* Initialize TX ring */ 1031 for (i = 0; i < priv->tx_q_entries; i++) { 1032 txdes = &priv->txdes[i]; 1033 txdes->txdes0 = 0; 1034 } 1035 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); 1036 } 1037 1038 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) 1039 { 1040 int i; 1041 1042 for (i = 0; i < priv->rx_q_entries; i++) { 1043 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; 1044 1045 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) 1046 return -ENOMEM; 1047 } 1048 return 0; 1049 } 1050 1051 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 1052 { 1053 struct net_device *netdev = bus->priv; 1054 struct ftgmac100 *priv = netdev_priv(netdev); 1055 unsigned int phycr; 1056 int i; 1057 1058 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1059 1060 /* preserve MDC cycle threshold */ 1061 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1062 1063 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1064 FTGMAC100_PHYCR_REGAD(regnum) | 1065 FTGMAC100_PHYCR_MIIRD; 1066 1067 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1068 1069 for (i = 0; i < 10; i++) { 1070 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1071 1072 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { 1073 int data; 1074 1075 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); 1076 return FTGMAC100_PHYDATA_MIIRDATA(data); 1077 } 1078 1079 udelay(100); 1080 } 1081 1082 netdev_err(netdev, "mdio read timed out\n"); 1083 return -EIO; 1084 } 1085 1086 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, 1087 int regnum, u16 value) 1088 { 1089 struct net_device *netdev = bus->priv; 1090 struct ftgmac100 *priv = netdev_priv(netdev); 1091 unsigned int phycr; 1092 int data; 1093 int i; 1094 1095 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1096 1097 /* preserve MDC cycle threshold */ 1098 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; 1099 1100 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | 1101 FTGMAC100_PHYCR_REGAD(regnum) | 1102 FTGMAC100_PHYCR_MIIWR; 1103 1104 data = FTGMAC100_PHYDATA_MIIWDATA(value); 1105 1106 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); 1107 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); 1108 1109 for (i = 0; i < 10; i++) { 1110 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); 1111 1112 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) 1113 return 0; 1114 1115 udelay(100); 1116 } 1117 1118 netdev_err(netdev, "mdio write timed out\n"); 1119 return -EIO; 1120 } 1121 1122 static void ftgmac100_get_drvinfo(struct net_device *netdev, 1123 struct ethtool_drvinfo *info) 1124 { 1125 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 1126 strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 1127 } 1128 1129 static void 1130 ftgmac100_get_ringparam(struct net_device *netdev, 1131 struct ethtool_ringparam *ering, 1132 struct kernel_ethtool_ringparam *kernel_ering, 1133 struct netlink_ext_ack *extack) 1134 { 1135 struct ftgmac100 *priv = netdev_priv(netdev); 1136 1137 memset(ering, 0, sizeof(*ering)); 1138 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; 1139 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; 1140 ering->rx_pending = priv->rx_q_entries; 1141 ering->tx_pending = priv->tx_q_entries; 1142 } 1143 1144 static int 1145 ftgmac100_set_ringparam(struct net_device *netdev, 1146 struct ethtool_ringparam *ering, 1147 struct kernel_ethtool_ringparam *kernel_ering, 1148 struct netlink_ext_ack *extack) 1149 { 1150 struct ftgmac100 *priv = netdev_priv(netdev); 1151 1152 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || 1153 ering->tx_pending > MAX_TX_QUEUE_ENTRIES || 1154 ering->rx_pending < MIN_RX_QUEUE_ENTRIES || 1155 ering->tx_pending < MIN_TX_QUEUE_ENTRIES || 1156 !is_power_of_2(ering->rx_pending) || 1157 !is_power_of_2(ering->tx_pending)) 1158 return -EINVAL; 1159 1160 priv->new_rx_q_entries = ering->rx_pending; 1161 priv->new_tx_q_entries = ering->tx_pending; 1162 if (netif_running(netdev)) 1163 schedule_work(&priv->reset_task); 1164 1165 return 0; 1166 } 1167 1168 static void ftgmac100_get_pauseparam(struct net_device *netdev, 1169 struct ethtool_pauseparam *pause) 1170 { 1171 struct ftgmac100 *priv = netdev_priv(netdev); 1172 1173 pause->autoneg = priv->aneg_pause; 1174 pause->tx_pause = priv->tx_pause; 1175 pause->rx_pause = priv->rx_pause; 1176 } 1177 1178 static int ftgmac100_set_pauseparam(struct net_device *netdev, 1179 struct ethtool_pauseparam *pause) 1180 { 1181 struct ftgmac100 *priv = netdev_priv(netdev); 1182 struct phy_device *phydev = netdev->phydev; 1183 1184 priv->aneg_pause = pause->autoneg; 1185 priv->tx_pause = pause->tx_pause; 1186 priv->rx_pause = pause->rx_pause; 1187 1188 if (phydev) 1189 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); 1190 1191 if (netif_running(netdev)) { 1192 if (!(phydev && priv->aneg_pause)) 1193 ftgmac100_config_pause(priv); 1194 } 1195 1196 return 0; 1197 } 1198 1199 static const struct ethtool_ops ftgmac100_ethtool_ops = { 1200 .get_drvinfo = ftgmac100_get_drvinfo, 1201 .get_link = ethtool_op_get_link, 1202 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1203 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1204 .nway_reset = phy_ethtool_nway_reset, 1205 .get_ringparam = ftgmac100_get_ringparam, 1206 .set_ringparam = ftgmac100_set_ringparam, 1207 .get_pauseparam = ftgmac100_get_pauseparam, 1208 .set_pauseparam = ftgmac100_set_pauseparam, 1209 }; 1210 1211 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) 1212 { 1213 struct net_device *netdev = dev_id; 1214 struct ftgmac100 *priv = netdev_priv(netdev); 1215 unsigned int status, new_mask = FTGMAC100_INT_BAD; 1216 1217 /* Fetch and clear interrupt bits, process abnormal ones */ 1218 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1219 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); 1220 if (unlikely(status & FTGMAC100_INT_BAD)) { 1221 1222 /* RX buffer unavailable */ 1223 if (status & FTGMAC100_INT_NO_RXBUF) 1224 netdev->stats.rx_over_errors++; 1225 1226 /* received packet lost due to RX FIFO full */ 1227 if (status & FTGMAC100_INT_RPKT_LOST) 1228 netdev->stats.rx_fifo_errors++; 1229 1230 /* sent packet lost due to excessive TX collision */ 1231 if (status & FTGMAC100_INT_XPKT_LOST) 1232 netdev->stats.tx_fifo_errors++; 1233 1234 /* AHB error -> Reset the chip */ 1235 if (status & FTGMAC100_INT_AHB_ERR) { 1236 if (net_ratelimit()) 1237 netdev_warn(netdev, 1238 "AHB bus error ! Resetting chip.\n"); 1239 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1240 schedule_work(&priv->reset_task); 1241 return IRQ_HANDLED; 1242 } 1243 1244 /* We may need to restart the MAC after such errors, delay 1245 * this until after we have freed some Rx buffers though 1246 */ 1247 priv->need_mac_restart = true; 1248 1249 /* Disable those errors until we restart */ 1250 new_mask &= ~status; 1251 } 1252 1253 /* Only enable "bad" interrupts while NAPI is on */ 1254 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); 1255 1256 /* Schedule NAPI bh */ 1257 napi_schedule_irqoff(&priv->napi); 1258 1259 return IRQ_HANDLED; 1260 } 1261 1262 static bool ftgmac100_check_rx(struct ftgmac100 *priv) 1263 { 1264 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; 1265 1266 /* Do we have a packet ? */ 1267 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); 1268 } 1269 1270 static int ftgmac100_poll(struct napi_struct *napi, int budget) 1271 { 1272 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); 1273 int work_done = 0; 1274 bool more; 1275 1276 /* Handle TX completions */ 1277 if (ftgmac100_tx_buf_cleanable(priv)) 1278 ftgmac100_tx_complete(priv); 1279 1280 /* Handle RX packets */ 1281 do { 1282 more = ftgmac100_rx_packet(priv, &work_done); 1283 } while (more && work_done < budget); 1284 1285 1286 /* The interrupt is telling us to kick the MAC back to life 1287 * after an RX overflow 1288 */ 1289 if (unlikely(priv->need_mac_restart)) { 1290 ftgmac100_start_hw(priv); 1291 priv->need_mac_restart = false; 1292 1293 /* Re-enable "bad" interrupts */ 1294 iowrite32(FTGMAC100_INT_BAD, 1295 priv->base + FTGMAC100_OFFSET_IER); 1296 } 1297 1298 /* As long as we are waiting for transmit packets to be 1299 * completed we keep NAPI going 1300 */ 1301 if (ftgmac100_tx_buf_cleanable(priv)) 1302 work_done = budget; 1303 1304 if (work_done < budget) { 1305 /* We are about to re-enable all interrupts. However 1306 * the HW has been latching RX/TX packet interrupts while 1307 * they were masked. So we clear them first, then we need 1308 * to re-check if there's something to process 1309 */ 1310 iowrite32(FTGMAC100_INT_RXTX, 1311 priv->base + FTGMAC100_OFFSET_ISR); 1312 1313 /* Push the above (and provides a barrier vs. subsequent 1314 * reads of the descriptor). 1315 */ 1316 ioread32(priv->base + FTGMAC100_OFFSET_ISR); 1317 1318 /* Check RX and TX descriptors for more work to do */ 1319 if (ftgmac100_check_rx(priv) || 1320 ftgmac100_tx_buf_cleanable(priv)) 1321 return budget; 1322 1323 /* deschedule NAPI */ 1324 napi_complete(napi); 1325 1326 /* enable all interrupts */ 1327 iowrite32(FTGMAC100_INT_ALL, 1328 priv->base + FTGMAC100_OFFSET_IER); 1329 } 1330 1331 return work_done; 1332 } 1333 1334 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) 1335 { 1336 int err = 0; 1337 1338 /* Re-init descriptors (adjust queue sizes) */ 1339 ftgmac100_init_rings(priv); 1340 1341 /* Realloc rx descriptors */ 1342 err = ftgmac100_alloc_rx_buffers(priv); 1343 if (err && !ignore_alloc_err) 1344 return err; 1345 1346 /* Reinit and restart HW */ 1347 ftgmac100_init_hw(priv); 1348 ftgmac100_config_pause(priv); 1349 ftgmac100_start_hw(priv); 1350 1351 /* Re-enable the device */ 1352 napi_enable(&priv->napi); 1353 netif_start_queue(priv->netdev); 1354 1355 /* Enable all interrupts */ 1356 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); 1357 1358 return err; 1359 } 1360 1361 static void ftgmac100_reset(struct ftgmac100 *priv) 1362 { 1363 struct net_device *netdev = priv->netdev; 1364 int err; 1365 1366 netdev_dbg(netdev, "Resetting NIC...\n"); 1367 1368 /* Lock the world */ 1369 rtnl_lock(); 1370 if (netdev->phydev) 1371 mutex_lock(&netdev->phydev->lock); 1372 if (priv->mii_bus) 1373 mutex_lock(&priv->mii_bus->mdio_lock); 1374 1375 1376 /* Check if the interface is still up */ 1377 if (!netif_running(netdev)) 1378 goto bail; 1379 1380 /* Stop the network stack */ 1381 netif_trans_update(netdev); 1382 napi_disable(&priv->napi); 1383 netif_tx_disable(netdev); 1384 1385 /* Stop and reset the MAC */ 1386 ftgmac100_stop_hw(priv); 1387 err = ftgmac100_reset_and_config_mac(priv); 1388 if (err) { 1389 /* Not much we can do ... it might come back... */ 1390 netdev_err(netdev, "attempting to continue...\n"); 1391 } 1392 1393 /* Free all rx and tx buffers */ 1394 ftgmac100_free_buffers(priv); 1395 1396 /* Setup everything again and restart chip */ 1397 ftgmac100_init_all(priv, true); 1398 1399 netdev_dbg(netdev, "Reset done !\n"); 1400 bail: 1401 if (priv->mii_bus) 1402 mutex_unlock(&priv->mii_bus->mdio_lock); 1403 if (netdev->phydev) 1404 mutex_unlock(&netdev->phydev->lock); 1405 rtnl_unlock(); 1406 } 1407 1408 static void ftgmac100_reset_task(struct work_struct *work) 1409 { 1410 struct ftgmac100 *priv = container_of(work, struct ftgmac100, 1411 reset_task); 1412 1413 ftgmac100_reset(priv); 1414 } 1415 1416 static void ftgmac100_adjust_link(struct net_device *netdev) 1417 { 1418 struct ftgmac100 *priv = netdev_priv(netdev); 1419 struct phy_device *phydev = netdev->phydev; 1420 bool tx_pause, rx_pause; 1421 int new_speed; 1422 1423 /* We store "no link" as speed 0 */ 1424 if (!phydev->link) 1425 new_speed = 0; 1426 else 1427 new_speed = phydev->speed; 1428 1429 /* Grab pause settings from PHY if configured to do so */ 1430 if (priv->aneg_pause) { 1431 rx_pause = tx_pause = phydev->pause; 1432 if (phydev->asym_pause) 1433 tx_pause = !rx_pause; 1434 } else { 1435 rx_pause = priv->rx_pause; 1436 tx_pause = priv->tx_pause; 1437 } 1438 1439 /* Link hasn't changed, do nothing */ 1440 if (phydev->speed == priv->cur_speed && 1441 phydev->duplex == priv->cur_duplex && 1442 rx_pause == priv->rx_pause && 1443 tx_pause == priv->tx_pause) 1444 return; 1445 1446 /* Print status if we have a link or we had one and just lost it, 1447 * don't print otherwise. 1448 */ 1449 if (new_speed || priv->cur_speed) 1450 phy_print_status(phydev); 1451 1452 priv->cur_speed = new_speed; 1453 priv->cur_duplex = phydev->duplex; 1454 priv->rx_pause = rx_pause; 1455 priv->tx_pause = tx_pause; 1456 1457 /* Link is down, do nothing else */ 1458 if (!new_speed) 1459 return; 1460 1461 /* Disable all interrupts */ 1462 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1463 1464 /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock 1465 * order consistent to prevent dead lock. 1466 */ 1467 if (netdev->phydev) 1468 mutex_unlock(&netdev->phydev->lock); 1469 1470 ftgmac100_reset(priv); 1471 1472 if (netdev->phydev) 1473 mutex_lock(&netdev->phydev->lock); 1474 1475 } 1476 1477 static int ftgmac100_mii_probe(struct net_device *netdev) 1478 { 1479 struct ftgmac100 *priv = netdev_priv(netdev); 1480 struct platform_device *pdev = to_platform_device(priv->dev); 1481 struct device_node *np = pdev->dev.of_node; 1482 struct phy_device *phydev; 1483 phy_interface_t phy_intf; 1484 int err; 1485 1486 if (!priv->mii_bus) { 1487 dev_err(priv->dev, "No MDIO bus available\n"); 1488 return -ENODEV; 1489 } 1490 1491 /* Default to RGMII. It's a gigabit part after all */ 1492 err = of_get_phy_mode(np, &phy_intf); 1493 if (err) 1494 phy_intf = PHY_INTERFACE_MODE_RGMII; 1495 1496 /* Aspeed only supports these. I don't know about other IP 1497 * block vendors so I'm going to just let them through for 1498 * now. Note that this is only a warning if for some obscure 1499 * reason the DT really means to lie about it or it's a newer 1500 * part we don't know about. 1501 * 1502 * On the Aspeed SoC there are additionally straps and SCU 1503 * control bits that could tell us what the interface is 1504 * (or allow us to configure it while the IP block is held 1505 * in reset). For now I chose to keep this driver away from 1506 * those SoC specific bits and assume the device-tree is 1507 * right and the SCU has been configured properly by pinmux 1508 * or the firmware. 1509 */ 1510 if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { 1511 netdev_warn(netdev, 1512 "Unsupported PHY mode %s !\n", 1513 phy_modes(phy_intf)); 1514 } 1515 1516 phydev = phy_find_first(priv->mii_bus); 1517 if (!phydev) { 1518 netdev_info(netdev, "%s: no PHY found\n", netdev->name); 1519 return -ENODEV; 1520 } 1521 1522 phydev = phy_connect(netdev, phydev_name(phydev), 1523 &ftgmac100_adjust_link, phy_intf); 1524 1525 if (IS_ERR(phydev)) { 1526 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); 1527 return PTR_ERR(phydev); 1528 } 1529 1530 /* Indicate that we support PAUSE frames (see comment in 1531 * Documentation/networking/phy.rst) 1532 */ 1533 phy_support_asym_pause(phydev); 1534 1535 /* Display what we found */ 1536 phy_attached_info(phydev); 1537 1538 return 0; 1539 } 1540 1541 static int ftgmac100_open(struct net_device *netdev) 1542 { 1543 struct ftgmac100 *priv = netdev_priv(netdev); 1544 int err; 1545 1546 /* Allocate ring buffers */ 1547 err = ftgmac100_alloc_rings(priv); 1548 if (err) { 1549 netdev_err(netdev, "Failed to allocate descriptors\n"); 1550 return err; 1551 } 1552 1553 /* When using NC-SI we force the speed to 100Mbit/s full duplex, 1554 * 1555 * Otherwise we leave it set to 0 (no link), the link 1556 * message from the PHY layer will handle setting it up to 1557 * something else if needed. 1558 */ 1559 if (priv->use_ncsi) { 1560 priv->cur_duplex = DUPLEX_FULL; 1561 priv->cur_speed = SPEED_100; 1562 } else { 1563 priv->cur_duplex = 0; 1564 priv->cur_speed = 0; 1565 } 1566 1567 /* Reset the hardware */ 1568 err = ftgmac100_reset_and_config_mac(priv); 1569 if (err) 1570 goto err_hw; 1571 1572 /* Initialize NAPI */ 1573 netif_napi_add(netdev, &priv->napi, ftgmac100_poll); 1574 1575 /* Grab our interrupt */ 1576 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); 1577 if (err) { 1578 netdev_err(netdev, "failed to request irq %d\n", netdev->irq); 1579 goto err_irq; 1580 } 1581 1582 /* Start things up */ 1583 err = ftgmac100_init_all(priv, false); 1584 if (err) { 1585 netdev_err(netdev, "Failed to allocate packet buffers\n"); 1586 goto err_alloc; 1587 } 1588 1589 if (netdev->phydev) { 1590 /* If we have a PHY, start polling */ 1591 phy_start(netdev->phydev); 1592 } 1593 if (priv->use_ncsi) { 1594 /* If using NC-SI, set our carrier on and start the stack */ 1595 netif_carrier_on(netdev); 1596 1597 /* Start the NCSI device */ 1598 err = ncsi_start_dev(priv->ndev); 1599 if (err) 1600 goto err_ncsi; 1601 } 1602 1603 return 0; 1604 1605 err_ncsi: 1606 phy_stop(netdev->phydev); 1607 napi_disable(&priv->napi); 1608 netif_stop_queue(netdev); 1609 err_alloc: 1610 ftgmac100_free_buffers(priv); 1611 free_irq(netdev->irq, netdev); 1612 err_irq: 1613 netif_napi_del(&priv->napi); 1614 err_hw: 1615 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1616 ftgmac100_free_rings(priv); 1617 return err; 1618 } 1619 1620 static int ftgmac100_stop(struct net_device *netdev) 1621 { 1622 struct ftgmac100 *priv = netdev_priv(netdev); 1623 1624 /* Note about the reset task: We are called with the rtnl lock 1625 * held, so we are synchronized against the core of the reset 1626 * task. We must not try to synchronously cancel it otherwise 1627 * we can deadlock. But since it will test for netif_running() 1628 * which has already been cleared by the net core, we don't 1629 * anything special to do. 1630 */ 1631 1632 /* disable all interrupts */ 1633 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1634 1635 netif_stop_queue(netdev); 1636 napi_disable(&priv->napi); 1637 netif_napi_del(&priv->napi); 1638 if (netdev->phydev) 1639 phy_stop(netdev->phydev); 1640 if (priv->use_ncsi) 1641 ncsi_stop_dev(priv->ndev); 1642 1643 ftgmac100_stop_hw(priv); 1644 free_irq(netdev->irq, netdev); 1645 ftgmac100_free_buffers(priv); 1646 ftgmac100_free_rings(priv); 1647 1648 return 0; 1649 } 1650 1651 static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1652 { 1653 struct ftgmac100 *priv = netdev_priv(netdev); 1654 1655 /* Disable all interrupts */ 1656 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); 1657 1658 /* Do the reset outside of interrupt context */ 1659 schedule_work(&priv->reset_task); 1660 } 1661 1662 static int ftgmac100_set_features(struct net_device *netdev, 1663 netdev_features_t features) 1664 { 1665 struct ftgmac100 *priv = netdev_priv(netdev); 1666 netdev_features_t changed = netdev->features ^ features; 1667 1668 if (!netif_running(netdev)) 1669 return 0; 1670 1671 /* Update the vlan filtering bit */ 1672 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 1673 u32 maccr; 1674 1675 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 1676 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 1677 maccr |= FTGMAC100_MACCR_RM_VLAN; 1678 else 1679 maccr &= ~FTGMAC100_MACCR_RM_VLAN; 1680 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 1681 } 1682 1683 return 0; 1684 } 1685 1686 #ifdef CONFIG_NET_POLL_CONTROLLER 1687 static void ftgmac100_poll_controller(struct net_device *netdev) 1688 { 1689 unsigned long flags; 1690 1691 local_irq_save(flags); 1692 ftgmac100_interrupt(netdev->irq, netdev); 1693 local_irq_restore(flags); 1694 } 1695 #endif 1696 1697 static const struct net_device_ops ftgmac100_netdev_ops = { 1698 .ndo_open = ftgmac100_open, 1699 .ndo_stop = ftgmac100_stop, 1700 .ndo_start_xmit = ftgmac100_hard_start_xmit, 1701 .ndo_set_mac_address = ftgmac100_set_mac_addr, 1702 .ndo_validate_addr = eth_validate_addr, 1703 .ndo_eth_ioctl = phy_do_ioctl, 1704 .ndo_tx_timeout = ftgmac100_tx_timeout, 1705 .ndo_set_rx_mode = ftgmac100_set_rx_mode, 1706 .ndo_set_features = ftgmac100_set_features, 1707 #ifdef CONFIG_NET_POLL_CONTROLLER 1708 .ndo_poll_controller = ftgmac100_poll_controller, 1709 #endif 1710 .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, 1711 .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, 1712 }; 1713 1714 static int ftgmac100_setup_mdio(struct net_device *netdev) 1715 { 1716 struct ftgmac100 *priv = netdev_priv(netdev); 1717 struct platform_device *pdev = to_platform_device(priv->dev); 1718 struct device_node *np = pdev->dev.of_node; 1719 struct device_node *mdio_np; 1720 int err = 0; 1721 u32 reg; 1722 1723 /* initialize mdio bus */ 1724 priv->mii_bus = devm_mdiobus_alloc(priv->dev); 1725 if (!priv->mii_bus) 1726 return -EIO; 1727 1728 if (priv->mac_id == FTGMAC100_AST2400 || 1729 priv->mac_id == FTGMAC100_AST2500) { 1730 /* The AST2600 has a separate MDIO controller */ 1731 1732 /* For the AST2400 and AST2500 this driver only supports the 1733 * old MDIO interface 1734 */ 1735 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); 1736 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; 1737 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); 1738 } 1739 1740 priv->mii_bus->name = "ftgmac100_mdio"; 1741 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1742 pdev->name, pdev->id); 1743 priv->mii_bus->parent = priv->dev; 1744 priv->mii_bus->priv = priv->netdev; 1745 priv->mii_bus->read = ftgmac100_mdiobus_read; 1746 priv->mii_bus->write = ftgmac100_mdiobus_write; 1747 1748 mdio_np = of_get_child_by_name(np, "mdio"); 1749 1750 err = devm_of_mdiobus_register(priv->dev, priv->mii_bus, mdio_np); 1751 of_node_put(mdio_np); 1752 if (err) { 1753 dev_err(priv->dev, "Cannot register MDIO bus!\n"); 1754 return err; 1755 } 1756 1757 return 0; 1758 } 1759 1760 static void ftgmac100_phy_disconnect(struct net_device *netdev) 1761 { 1762 struct ftgmac100 *priv = netdev_priv(netdev); 1763 struct phy_device *phydev = netdev->phydev; 1764 1765 if (!phydev) 1766 return; 1767 1768 phy_disconnect(phydev); 1769 if (of_phy_is_fixed_link(priv->dev->of_node)) 1770 of_phy_deregister_fixed_link(priv->dev->of_node); 1771 1772 if (priv->use_ncsi) 1773 fixed_phy_unregister(phydev); 1774 } 1775 1776 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) 1777 { 1778 if (unlikely(nd->state != ncsi_dev_state_functional)) 1779 return; 1780 1781 netdev_dbg(nd->dev, "NCSI interface %s\n", 1782 nd->link_up ? "up" : "down"); 1783 } 1784 1785 static int ftgmac100_setup_clk(struct ftgmac100 *priv) 1786 { 1787 struct clk *clk; 1788 int rc; 1789 1790 clk = devm_clk_get_enabled(priv->dev, NULL /* MACCLK */); 1791 if (IS_ERR(clk)) 1792 return PTR_ERR(clk); 1793 priv->clk = clk; 1794 1795 /* Aspeed specifies a 100MHz clock is required for up to 1796 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz 1797 * is sufficient 1798 */ 1799 rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ : 1800 FTGMAC_100MHZ); 1801 if (rc) 1802 return rc; 1803 1804 /* RCLK is for RMII, typically used for NCSI. Optional because it's not 1805 * necessary if it's the AST2400 MAC, or the MAC is configured for 1806 * RGMII, or the controller is not an ASPEED-based controller. 1807 */ 1808 priv->rclk = devm_clk_get_optional_enabled(priv->dev, "RCLK"); 1809 if (IS_ERR(priv->rclk)) 1810 return PTR_ERR(priv->rclk); 1811 1812 return 0; 1813 } 1814 1815 static bool ftgmac100_has_child_node(struct device_node *np, const char *name) 1816 { 1817 struct device_node *child_np = of_get_child_by_name(np, name); 1818 bool ret = false; 1819 1820 if (child_np) { 1821 ret = true; 1822 of_node_put(child_np); 1823 } 1824 1825 return ret; 1826 } 1827 1828 static int ftgmac100_probe_ncsi(struct net_device *netdev, 1829 struct ftgmac100 *priv, 1830 struct platform_device *pdev) 1831 { 1832 struct device_node *np = pdev->dev.of_node; 1833 struct phy_device *phydev; 1834 int err; 1835 1836 if (!IS_ENABLED(CONFIG_NET_NCSI)) { 1837 dev_err(&pdev->dev, "NCSI stack not enabled\n"); 1838 return -EINVAL; 1839 } 1840 1841 dev_info(&pdev->dev, "Using NCSI interface\n"); 1842 priv->use_ncsi = true; 1843 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); 1844 if (!priv->ndev) 1845 return -EINVAL; 1846 1847 phydev = fixed_phy_register(&ncsi_phy_status, np); 1848 if (IS_ERR(phydev)) { 1849 dev_err(&pdev->dev, "failed to register fixed PHY device\n"); 1850 err = PTR_ERR(phydev); 1851 goto err_register_ndev; 1852 } 1853 err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link, 1854 PHY_INTERFACE_MODE_RMII); 1855 if (err) { 1856 dev_err(&pdev->dev, "Connecting PHY failed\n"); 1857 goto err_register_phy; 1858 } 1859 1860 return 0; 1861 err_register_phy: 1862 fixed_phy_unregister(phydev); 1863 err_register_ndev: 1864 if (priv->ndev) 1865 ncsi_unregister_dev(priv->ndev); 1866 priv->ndev = NULL; 1867 return err; 1868 } 1869 1870 static int ftgmac100_probe_dt(struct net_device *netdev, 1871 struct platform_device *pdev, 1872 struct ftgmac100 *priv, 1873 struct device_node *np) 1874 { 1875 struct phy_device *phy; 1876 int err; 1877 1878 if (of_get_property(np, "use-ncsi", NULL)) 1879 return ftgmac100_probe_ncsi(netdev, priv, pdev); 1880 1881 if (of_phy_is_fixed_link(np) || 1882 of_get_property(np, "phy-handle", NULL)) { 1883 /* Support "mdio"/"phy" child nodes for ast2400/2500 1884 * with an embedded MDIO controller. Automatically 1885 * scan the DTS for available PHYs and register 1886 * them. 2600 has an independent MDIO controller, not 1887 * part of the MAC. 1888 */ 1889 phy = of_phy_get_and_connect(priv->netdev, np, 1890 &ftgmac100_adjust_link); 1891 if (!phy) { 1892 dev_err(&pdev->dev, "Failed to connect to phy\n"); 1893 return -EINVAL; 1894 } 1895 1896 /* Indicate that we support PAUSE frames (see comment in 1897 * Documentation/networking/phy.rst) 1898 */ 1899 phy_support_asym_pause(phy); 1900 1901 /* Display what we found */ 1902 phy_attached_info(phy); 1903 return 0; 1904 } 1905 1906 if (!ftgmac100_has_child_node(np, "mdio")) { 1907 /* Support legacy ASPEED devicetree descriptions that 1908 * decribe a MAC with an embedded MDIO controller but 1909 * have no "mdio" child node. Automatically scan the 1910 * MDIO bus for available PHYs. 1911 */ 1912 err = ftgmac100_mii_probe(netdev); 1913 if (err) { 1914 dev_err(priv->dev, "MII probe failed!\n"); 1915 return err; 1916 } 1917 } 1918 1919 return 0; 1920 } 1921 1922 static int ftgmac100_probe(struct platform_device *pdev) 1923 { 1924 const struct ftgmac100_match_data *match_data; 1925 enum ftgmac100_mac_id mac_id; 1926 struct resource *res; 1927 int irq; 1928 struct net_device *netdev; 1929 struct ftgmac100 *priv; 1930 struct device_node *np; 1931 int err = 0; 1932 1933 np = pdev->dev.of_node; 1934 if (np) { 1935 match_data = of_device_get_match_data(&pdev->dev); 1936 if (!match_data) 1937 return -EINVAL; 1938 mac_id = match_data->mac_id; 1939 } else { 1940 mac_id = FTGMAC100_FARADAY; 1941 } 1942 1943 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1944 if (!res) 1945 return -ENXIO; 1946 1947 irq = platform_get_irq(pdev, 0); 1948 if (irq < 0) 1949 return irq; 1950 1951 /* setup net_device */ 1952 netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv)); 1953 if (!netdev) 1954 return -ENOMEM; 1955 1956 SET_NETDEV_DEV(netdev, &pdev->dev); 1957 1958 netdev->ethtool_ops = &ftgmac100_ethtool_ops; 1959 netdev->netdev_ops = &ftgmac100_netdev_ops; 1960 netdev->watchdog_timeo = 5 * HZ; 1961 1962 platform_set_drvdata(pdev, netdev); 1963 1964 /* setup private data */ 1965 priv = netdev_priv(netdev); 1966 priv->netdev = netdev; 1967 priv->dev = &pdev->dev; 1968 priv->mac_id = mac_id; 1969 INIT_WORK(&priv->reset_task, ftgmac100_reset_task); 1970 1971 /* map io memory */ 1972 priv->res = devm_request_mem_region(&pdev->dev, 1973 res->start, resource_size(res), 1974 dev_name(&pdev->dev)); 1975 if (!priv->res) { 1976 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1977 return -ENOMEM; 1978 } 1979 1980 priv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1981 if (!priv->base) { 1982 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1983 return -EIO; 1984 } 1985 1986 netdev->irq = irq; 1987 1988 /* Enable pause */ 1989 priv->tx_pause = true; 1990 priv->rx_pause = true; 1991 priv->aneg_pause = true; 1992 1993 /* MAC address from chip or random one */ 1994 err = ftgmac100_initial_mac(priv); 1995 if (err) 1996 return err; 1997 1998 if (priv->mac_id == FTGMAC100_AST2400 || 1999 priv->mac_id == FTGMAC100_AST2500 || 2000 priv->mac_id == FTGMAC100_AST2600) { 2001 priv->rxdes0_edorr_mask = BIT(30); 2002 priv->txdes0_edotr_mask = BIT(30); 2003 priv->is_aspeed = true; 2004 } else { 2005 priv->rxdes0_edorr_mask = BIT(15); 2006 priv->txdes0_edotr_mask = BIT(15); 2007 } 2008 2009 if (priv->mac_id == FTGMAC100_FARADAY || 2010 priv->mac_id == FTGMAC100_AST2400 || 2011 priv->mac_id == FTGMAC100_AST2500) { 2012 err = ftgmac100_setup_mdio(netdev); 2013 if (err) 2014 return err; 2015 } 2016 2017 if (np) { 2018 err = ftgmac100_probe_dt(netdev, pdev, priv, np); 2019 if (err) 2020 goto err; 2021 } 2022 2023 priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL); 2024 if (IS_ERR(priv->rst)) { 2025 err = PTR_ERR(priv->rst); 2026 goto err; 2027 } 2028 2029 if (priv->is_aspeed) { 2030 err = ftgmac100_setup_clk(priv); 2031 if (err) 2032 goto err; 2033 } 2034 2035 /* Disable ast2600 problematic HW arbitration */ 2036 if (priv->mac_id == FTGMAC100_AST2600) 2037 iowrite32(FTGMAC100_TM_DEFAULT, 2038 priv->base + FTGMAC100_OFFSET_TM); 2039 2040 /* Default ring sizes */ 2041 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; 2042 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; 2043 2044 /* Base feature set */ 2045 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 2046 NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | 2047 NETIF_F_HW_VLAN_CTAG_TX; 2048 2049 if (priv->use_ncsi) 2050 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2051 2052 /* AST2400 doesn't have working HW checksum generation */ 2053 if (priv->mac_id == FTGMAC100_AST2400) 2054 netdev->hw_features &= ~NETIF_F_HW_CSUM; 2055 2056 /* AST2600 tx checksum with NCSI is broken */ 2057 if (priv->use_ncsi && priv->mac_id == FTGMAC100_AST2600) 2058 netdev->hw_features &= ~NETIF_F_HW_CSUM; 2059 2060 if (np && of_get_property(np, "no-hw-checksum", NULL)) 2061 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); 2062 netdev->features |= netdev->hw_features; 2063 2064 /* register network device */ 2065 err = register_netdev(netdev); 2066 if (err) { 2067 dev_err(&pdev->dev, "Failed to register netdev\n"); 2068 goto err; 2069 } 2070 2071 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); 2072 2073 return 0; 2074 2075 err: 2076 ftgmac100_phy_disconnect(netdev); 2077 if (priv->ndev) 2078 ncsi_unregister_dev(priv->ndev); 2079 return err; 2080 } 2081 2082 static void ftgmac100_remove(struct platform_device *pdev) 2083 { 2084 struct net_device *netdev; 2085 struct ftgmac100 *priv; 2086 2087 netdev = platform_get_drvdata(pdev); 2088 priv = netdev_priv(netdev); 2089 2090 if (priv->ndev) 2091 ncsi_unregister_dev(priv->ndev); 2092 unregister_netdev(netdev); 2093 2094 /* There's a small chance the reset task will have been re-queued, 2095 * during stop, make sure it's gone before we free the structure. 2096 */ 2097 cancel_work_sync(&priv->reset_task); 2098 2099 ftgmac100_phy_disconnect(netdev); 2100 } 2101 2102 static const struct ftgmac100_match_data ftgmac100_match_data_ast2400 = { 2103 .mac_id = FTGMAC100_AST2400 2104 }; 2105 2106 static const struct ftgmac100_match_data ftgmac100_match_data_ast2500 = { 2107 .mac_id = FTGMAC100_AST2500 2108 }; 2109 2110 static const struct ftgmac100_match_data ftgmac100_match_data_ast2600 = { 2111 .mac_id = FTGMAC100_AST2600 2112 }; 2113 2114 static const struct ftgmac100_match_data ftgmac100_match_data_faraday = { 2115 .mac_id = FTGMAC100_FARADAY 2116 }; 2117 2118 static const struct of_device_id ftgmac100_of_match[] = { 2119 { .compatible = "aspeed,ast2400-mac", 2120 .data = &ftgmac100_match_data_ast2400}, 2121 { .compatible = "aspeed,ast2500-mac", 2122 .data = &ftgmac100_match_data_ast2500 }, 2123 { .compatible = "aspeed,ast2600-mac", 2124 .data = &ftgmac100_match_data_ast2600 }, 2125 { .compatible = "faraday,ftgmac100", 2126 .data = &ftgmac100_match_data_faraday }, 2127 { } 2128 }; 2129 MODULE_DEVICE_TABLE(of, ftgmac100_of_match); 2130 2131 static struct platform_driver ftgmac100_driver = { 2132 .probe = ftgmac100_probe, 2133 .remove = ftgmac100_remove, 2134 .driver = { 2135 .name = DRV_NAME, 2136 .of_match_table = ftgmac100_of_match, 2137 }, 2138 }; 2139 module_platform_driver(ftgmac100_driver); 2140 2141 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 2142 MODULE_DESCRIPTION("FTGMAC100 driver"); 2143 MODULE_LICENSE("GPL"); 2144