Lines Matching +full:mpfs +full:- +full:gpio

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004-2006 Atmel Corporation
10 #include <linux/clk-provider.h>
23 #include <linux/dma-mapping.h>
37 #include <linux/firmware/xlnx-zynqmp.h>
55 * (bp)->rx_ring_size)
61 * (bp)->tx_ring_size)
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
91 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
130 switch (bp->hw_dma_cap) {
155 switch (bp->hw_dma_cap) {
181 return index & (bp->tx_ring_size - 1);
187 index = macb_tx_ring_wrap(queue->bp, index);
188 index = macb_adj_dma_desc_idx(queue->bp, index);
189 return &queue->tx_ring[index];
195 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
202 offset = macb_tx_ring_wrap(queue->bp, index) *
203 macb_dma_desc_get_size(queue->bp);
205 return queue->tx_ring_dma + offset;
210 return index & (bp->rx_ring_size - 1);
215 index = macb_rx_ring_wrap(queue->bp, index);
216 index = macb_adj_dma_desc_idx(queue->bp, index);
217 return &queue->rx_ring[index];
222 return queue->rx_buffers + queue->bp->rx_buffer_size *
223 macb_rx_ring_wrap(queue->bp, index);
229 return __raw_readl(bp->regs + offset);
234 __raw_writel(value, bp->regs + offset);
239 return readl_relaxed(bp->regs + offset);
244 writel_relaxed(value, bp->regs + offset);
281 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
283 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
320 eth_hw_addr_set(bp->dev, addr);
325 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
326 eth_hw_addr_random(bp->dev);
339 struct macb *bp = bus->priv;
342 status = pm_runtime_resume_and_get(&bp->pdev->dev);
363 pm_runtime_mark_last_busy(&bp->pdev->dev);
364 pm_runtime_put_autosuspend(&bp->pdev->dev);
372 struct macb *bp = bus->priv;
375 status = pm_runtime_get_sync(&bp->pdev->dev);
377 pm_runtime_put_noidle(&bp->pdev->dev);
409 pm_runtime_mark_last_busy(&bp->pdev->dev);
410 pm_runtime_put_autosuspend(&bp->pdev->dev);
418 struct macb *bp = bus->priv;
421 status = pm_runtime_resume_and_get(&bp->pdev->dev);
441 pm_runtime_mark_last_busy(&bp->pdev->dev);
442 pm_runtime_put_autosuspend(&bp->pdev->dev);
451 struct macb *bp = bus->priv;
454 status = pm_runtime_get_sync(&bp->pdev->dev);
456 pm_runtime_put_noidle(&bp->pdev->dev);
487 pm_runtime_mark_last_busy(&bp->pdev->dev);
488 pm_runtime_put_autosuspend(&bp->pdev->dev);
498 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
499 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
501 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
503 upper_32_bits(queue->rx_ring_dma));
505 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
507 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
509 upper_32_bits(queue->tx_ring_dma));
515 * macb_set_tx_clk() - Set a clock to a new frequency
523 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
527 if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
534 rate_rounded = clk_round_rate(bp->tx_clk, rate);
541 ferr = abs(rate_rounded - rate);
544 netdev_warn(bp->dev,
548 if (clk_set_rate(bp->tx_clk, rate_rounded))
549 netdev_err(bp->dev, "adjusting tx_clk failed.\n");
574 state->speed = SPEED_10000;
575 state->duplex = 1;
576 state->an_complete = 1;
579 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
582 state->pause = MLO_PAUSE_RX;
602 state->link = 0;
634 struct net_device *ndev = to_net_dev(config->dev);
640 spin_lock_irqsave(&bp->lock, flags);
645 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
646 if (state->interface == PHY_INTERFACE_MODE_RMII)
652 if (state->interface == PHY_INTERFACE_MODE_SGMII) {
654 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
657 } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
658 bp->phy_interface == PHY_INTERFACE_MODE_MII) {
674 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
686 spin_unlock_irqrestore(&bp->lock, flags);
692 struct net_device *ndev = to_net_dev(config->dev);
698 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
699 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
701 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
716 struct net_device *ndev = to_net_dev(config->dev);
723 spin_lock_irqsave(&bp->lock, flags);
735 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
750 bp->macbgem_ops.mog_init_rings(bp);
753 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
755 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
760 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
764 spin_unlock_irqrestore(&bp->lock, flags);
766 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
782 struct net_device *ndev = to_net_dev(config->dev);
786 return &bp->phylink_usx_pcs;
788 return &bp->phylink_sgmii_pcs;
802 dn = of_parse_phandle(dn, "phy-handle", 0);
809 struct device_node *dn = bp->pdev->dev.of_node;
810 struct net_device *dev = bp->dev;
815 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
818 phydev = phy_find_first(bp->mii_bus);
821 return -ENXIO;
825 ret = phylink_connect_phy(bp->phylink, phydev);
833 phylink_start(bp->phylink);
841 struct net_device *ndev = to_net_dev(config->dev);
844 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
852 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
853 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
855 bp->phylink_config.dev = &dev->dev;
856 bp->phylink_config.type = PHYLINK_NETDEV;
857 bp->phylink_config.mac_managed_pm = true;
859 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
860 bp->phylink_config.poll_fixed_state = true;
861 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
864 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
868 bp->phylink_config.supported_interfaces);
870 bp->phylink_config.supported_interfaces);
873 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
874 bp->phylink_config.mac_capabilities |= MAC_1000FD;
875 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
876 bp->phylink_config.mac_capabilities |= MAC_1000HD;
879 bp->phylink_config.supported_interfaces);
880 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
882 if (bp->caps & MACB_CAPS_PCS)
884 bp->phylink_config.supported_interfaces);
886 if (bp->caps & MACB_CAPS_HIGH_SPEED) {
888 bp->phylink_config.supported_interfaces);
889 bp->phylink_config.mac_capabilities |= MAC_10000FD;
893 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
894 bp->phy_interface, &macb_phylink_ops);
895 if (IS_ERR(bp->phylink)) {
897 PTR_ERR(bp->phylink));
898 return PTR_ERR(bp->phylink);
906 struct device_node *child, *np = bp->pdev->dev.of_node;
912 return of_mdiobus_register(bp->mii_bus, mdio_np);
926 return of_mdiobus_register(bp->mii_bus, np);
929 return mdiobus_register(bp->mii_bus);
934 struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
935 int err = -ENXIO;
937 /* With fixed-link, we don't need to register the MDIO bus,
943 return macb_mii_probe(bp->dev);
948 bp->mii_bus = mdiobus_alloc();
949 if (!bp->mii_bus) {
950 err = -ENOMEM;
954 bp->mii_bus->name = "MACB_mii_bus";
955 bp->mii_bus->read = &macb_mdio_read_c22;
956 bp->mii_bus->write = &macb_mdio_write_c22;
957 bp->mii_bus->read_c45 = &macb_mdio_read_c45;
958 bp->mii_bus->write_c45 = &macb_mdio_write_c45;
959 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
960 bp->pdev->name, bp->pdev->id);
961 bp->mii_bus->priv = bp;
962 bp->mii_bus->parent = &bp->pdev->dev;
964 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
970 err = macb_mii_probe(bp->dev);
977 mdiobus_unregister(bp->mii_bus);
979 mdiobus_free(bp->mii_bus);
988 u64 *p = &bp->hw_stats.macb.rx_pause_frames;
989 u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
992 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
995 *p += bp->macb_reg_readl(bp, offset);
1013 if (tx_skb->mapping) {
1014 if (tx_skb->mapped_as_page)
1015 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1016 tx_skb->size, DMA_TO_DEVICE);
1018 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1019 tx_skb->size, DMA_TO_DEVICE);
1020 tx_skb->mapping = 0;
1023 if (tx_skb->skb) {
1024 napi_consume_skb(tx_skb->skb, budget);
1025 tx_skb->skb = NULL;
1034 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1036 desc_64->addrh = upper_32_bits(addr);
1044 desc->addr = lower_32_bits(addr);
1053 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1055 addr = ((u64)(desc_64->addrh) << 32);
1058 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1060 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
1071 struct macb *bp = queue->bp;
1081 queue_index = queue - bp->queues;
1082 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1083 queue_index, queue->tx_tail, queue->tx_head);
1091 napi_disable(&queue->napi_tx);
1092 spin_lock_irqsave(&bp->lock, flags);
1095 netif_tx_stop_all_queues(bp->dev);
1102 netdev_err(bp->dev, "BUG: halt tx timed out\n");
1110 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1114 ctrl = desc->ctrl;
1116 skb = tx_skb->skb;
1124 skb = tx_skb->skb;
1131 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1133 skb->data);
1134 bp->dev->stats.tx_packets++;
1135 queue->stats.tx_packets++;
1137 bp->dev->stats.tx_bytes += skb->len;
1138 queue->stats.tx_bytes += skb->len;
1139 bytes += skb->len;
1142 /* "Buffers exhausted mid-frame" errors may only happen
1147 netdev_err(bp->dev,
1148 "BUG: TX buffers exhausted mid-frame\n");
1150 desc->ctrl = ctrl | MACB_BIT(TX_USED);
1156 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1162 desc->ctrl = MACB_BIT(TX_USED);
1168 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1170 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1171 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1174 queue->tx_head = 0;
1175 queue->tx_tail = 0;
1185 netif_tx_start_all_queues(bp->dev);
1188 spin_unlock_irqrestore(&bp->lock, flags);
1189 napi_enable(&queue->napi_tx);
1199 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1211 if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
1224 struct macb *bp = queue->bp;
1225 u16 queue_index = queue - bp->queues;
1231 spin_lock(&queue->tx_ptr_lock);
1232 head = queue->tx_head;
1233 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1244 ctrl = desc->ctrl;
1255 skb = tx_skb->skb;
1259 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1263 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1265 skb->data);
1266 bp->dev->stats.tx_packets++;
1267 queue->stats.tx_packets++;
1268 bp->dev->stats.tx_bytes += skb->len;
1269 queue->stats.tx_bytes += skb->len;
1271 bytes += skb->len;
1286 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1289 queue->tx_tail = tail;
1290 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1291 CIRC_CNT(queue->tx_head, queue->tx_tail,
1292 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1293 netif_wake_subqueue(bp->dev, queue_index);
1294 spin_unlock(&queue->tx_ptr_lock);
1304 struct macb *bp = queue->bp;
1307 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1308 bp->rx_ring_size) > 0) {
1309 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1316 if (!queue->rx_skbuff[entry]) {
1318 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1320 netdev_err(bp->dev,
1326 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1327 bp->rx_buffer_size,
1329 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1334 queue->rx_skbuff[entry] = skb;
1336 if (entry == bp->rx_ring_size - 1)
1338 desc->ctrl = 0;
1348 desc->ctrl = 0;
1350 desc->addr &= ~MACB_BIT(RX_USED);
1352 queue->rx_prepared_head++;
1358 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1359 queue, queue->rx_prepared_head, queue->rx_tail);
1371 desc->addr &= ~MACB_BIT(RX_USED);
1386 struct macb *bp = queue->bp;
1398 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1404 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1410 /* Ensure ctrl is at least as up-to-date as rxused */
1413 ctrl = desc->ctrl;
1415 queue->rx_tail++;
1419 netdev_err(bp->dev,
1421 bp->dev->stats.rx_dropped++;
1422 queue->stats.rx_dropped++;
1425 skb = queue->rx_skbuff[entry];
1427 netdev_err(bp->dev,
1429 bp->dev->stats.rx_dropped++;
1430 queue->stats.rx_dropped++;
1434 queue->rx_skbuff[entry] = NULL;
1435 len = ctrl & bp->rx_frm_len_mask;
1437 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1440 dma_unmap_single(&bp->pdev->dev, addr,
1441 bp->rx_buffer_size, DMA_FROM_DEVICE);
1443 skb->protocol = eth_type_trans(skb, bp->dev);
1445 if (bp->dev->features & NETIF_F_RXCSUM &&
1446 !(bp->dev->flags & IFF_PROMISC) &&
1448 skb->ip_summed = CHECKSUM_UNNECESSARY;
1450 bp->dev->stats.rx_packets++;
1451 queue->stats.rx_packets++;
1452 bp->dev->stats.rx_bytes += skb->len;
1453 queue->stats.rx_bytes += skb->len;
1458 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1459 skb->len, skb->csum);
1463 skb->data, 32, true);
1482 struct macb *bp = queue->bp;
1485 len = desc->ctrl & bp->rx_frm_len_mask;
1487 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1493 * payload word-aligned.
1499 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1501 bp->dev->stats.rx_dropped++;
1504 desc->addr &= ~MACB_BIT(RX_USED);
1521 unsigned int frag_len = bp->rx_buffer_size;
1526 return -1;
1528 frag_len = len - offset;
1533 offset += bp->rx_buffer_size;
1535 desc->addr &= ~MACB_BIT(RX_USED);
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1547 bp->dev->stats.rx_packets++;
1548 bp->dev->stats.rx_bytes += skb->len;
1549 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1550 skb->len, skb->csum);
1558 struct macb *bp = queue->bp;
1563 addr = queue->rx_buffers_dma;
1564 for (i = 0; i < bp->rx_ring_size; i++) {
1567 desc->ctrl = 0;
1568 addr += bp->rx_buffer_size;
1570 desc->addr |= MACB_BIT(RX_WRAP);
1571 queue->rx_tail = 0;
1577 struct macb *bp = queue->bp;
1581 int first_frag = -1;
1583 for (tail = queue->rx_tail; budget > 0; tail++) {
1590 if (!(desc->addr & MACB_BIT(RX_USED)))
1593 /* Ensure ctrl is at least as up-to-date as addr */
1596 ctrl = desc->ctrl;
1599 if (first_frag != -1)
1607 if (unlikely(first_frag == -1)) {
1613 first_frag = -1;
1620 budget--;
1629 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1631 spin_lock_irqsave(&bp->lock, flags);
1637 queue_writel(queue, RBQP, queue->rx_ring_dma);
1641 spin_unlock_irqrestore(&bp->lock, flags);
1645 if (first_frag != -1)
1646 queue->rx_tail = first_frag;
1648 queue->rx_tail = tail;
1655 struct macb *bp = queue->bp;
1659 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1665 return (desc->addr & MACB_BIT(RX_USED)) != 0;
1671 struct macb *bp = queue->bp;
1674 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1676 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1677 (unsigned int)(queue - bp->queues), work_done, budget);
1680 queue_writel(queue, IER, bp->rx_intr_mask);
1686 * interrupts are re-enabled.
1693 queue_writel(queue, IDR, bp->rx_intr_mask);
1694 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1696 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1708 struct macb *bp = queue->bp;
1711 spin_lock(&queue->tx_ptr_lock);
1713 if (queue->tx_head == queue->tx_tail)
1718 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1723 spin_lock_irq(&bp->lock);
1725 spin_unlock_irq(&bp->lock);
1728 spin_unlock(&queue->tx_ptr_lock);
1735 spin_lock(&queue->tx_ptr_lock);
1736 if (queue->tx_head != queue->tx_tail) {
1740 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1743 spin_unlock(&queue->tx_ptr_lock);
1750 struct macb *bp = queue->bp;
1756 if (queue->txubr_pending) {
1757 queue->txubr_pending = false;
1758 netdev_vdbg(bp->dev, "poll: tx restart\n");
1762 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1763 (unsigned int)(queue - bp->queues), work_done, budget);
1772 * interrupts are re-enabled.
1780 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1782 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1793 struct net_device *dev = bp->dev;
1798 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1799 queue_writel(queue, IDR, bp->rx_intr_mask |
1810 bp->macbgem_ops.mog_init_rings(bp);
1816 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1818 bp->rx_intr_mask |
1832 struct macb *bp = queue->bp;
1840 spin_lock(&bp->lock);
1845 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1846 (unsigned int)(queue - bp->queues),
1848 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1850 pm_wakeup_event(&bp->pdev->dev, 0);
1853 spin_unlock(&bp->lock);
1861 struct macb *bp = queue->bp;
1869 spin_lock(&bp->lock);
1874 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1875 (unsigned int)(queue - bp->queues),
1877 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1879 pm_wakeup_event(&bp->pdev->dev, 0);
1882 spin_unlock(&bp->lock);
1890 struct macb *bp = queue->bp;
1891 struct net_device *dev = bp->dev;
1899 spin_lock(&bp->lock);
1904 queue_writel(queue, IDR, -1);
1905 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1906 queue_writel(queue, ISR, -1);
1910 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1911 (unsigned int)(queue - bp->queues),
1914 if (status & bp->rx_intr_mask) {
1921 queue_writel(queue, IDR, bp->rx_intr_mask);
1922 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1925 if (napi_schedule_prep(&queue->napi_rx)) {
1926 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1927 __napi_schedule(&queue->napi_rx);
1934 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1939 queue->txubr_pending = true;
1943 if (napi_schedule_prep(&queue->napi_tx)) {
1944 netdev_vdbg(bp->dev, "scheduling TX softirq\n");
1945 __napi_schedule(&queue->napi_tx);
1951 schedule_work(&queue->tx_error_task);
1953 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1960 * add that if/when we get our hands on a full-blown MII PHY.
1965 * interrupts but it can be cleared by re-enabling RX. See
1976 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1982 spin_lock(&bp->stats_lock);
1984 bp->hw_stats.gem.rx_overruns++;
1986 bp->hw_stats.macb.rx_overruns++;
1987 spin_unlock(&bp->stats_lock);
1989 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1994 queue_work(system_bh_wq, &bp->hresp_err_bh_work);
1997 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2003 spin_unlock(&bp->lock);
2009 /* Polling receive - used by netconsole and other diagnostic tools
2020 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2021 macb_interrupt(dev->irq, queue);
2032 unsigned int len, entry, i, tx_head = queue->tx_head;
2036 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
2041 if (skb_shinfo(skb)->gso_size != 0) {
2042 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2043 /* UDP - UFO */
2046 /* TCP - TSO */
2050 /* First, map non-paged data */
2059 tx_skb = &queue->tx_skb[entry];
2061 mapping = dma_map_single(&bp->pdev->dev,
2062 skb->data + offset,
2064 if (dma_mapping_error(&bp->pdev->dev, mapping))
2068 tx_skb->skb = NULL;
2069 tx_skb->mapping = mapping;
2070 tx_skb->size = size;
2071 tx_skb->mapped_as_page = false;
2073 len -= size;
2078 size = min(len, bp->max_tx_length);
2083 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2088 size = min(len, bp->max_tx_length);
2090 tx_skb = &queue->tx_skb[entry];
2092 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2094 if (dma_mapping_error(&bp->pdev->dev, mapping))
2098 tx_skb->skb = NULL;
2099 tx_skb->mapping = mapping;
2100 tx_skb->size = size;
2101 tx_skb->mapped_as_page = true;
2103 len -= size;
2112 netdev_err(bp->dev, "BUG! empty skb!\n");
2117 tx_skb->skb = skb;
2130 desc->ctrl = ctrl;
2135 mss_mfs = skb_shinfo(skb)->gso_size +
2139 mss_mfs = skb_shinfo(skb)->gso_size;
2148 i--;
2150 tx_skb = &queue->tx_skb[entry];
2153 ctrl = (u32)tx_skb->size;
2158 if (unlikely(entry == (bp->tx_ring_size - 1)))
2162 if (i == queue->tx_head) {
2165 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2166 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
2176 macb_set_addr(bp, desc, tx_skb->mapping);
2177 /* desc->addr must be visible to hardware before clearing
2178 * 'TX_USED' bit in desc->ctrl.
2181 desc->ctrl = ctrl;
2182 } while (i != queue->tx_head);
2184 queue->tx_head = tx_head;
2189 netdev_err(bp->dev, "TX DMA map failed\n");
2191 for (i = queue->tx_head; i != tx_head; i++) {
2210 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2220 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2223 nr_frags = skb_shinfo(skb)->nr_frags;
2225 nr_frags--;
2227 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2238 if (skb->ip_summed != CHECKSUM_PARTIAL)
2243 return -1;
2246 * This is required - at least for Zynq, which otherwise calculates
2249 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2257 int padlen = ETH_ZLEN - (*skb)->len;
2262 if (!(ndev->features & NETIF_F_HW_CSUM) ||
2263 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2264 skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
2282 return -ENOMEM;
2289 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2293 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2308 struct macb_queue *queue = &bp->queues[queue_index];
2325 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2326 (bp->hw_dma_cap & HW_DMA_CAP_PTP))
2327 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2330 is_lso = (skb_shinfo(skb)->gso_size != 0);
2334 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2340 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2345 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
2348 netdev_vdbg(bp->dev,
2350 queue_index, skb->len, skb->head, skb->data,
2353 skb->data, 16, true);
2362 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2364 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2365 nr_frags = skb_shinfo(skb)->nr_frags;
2367 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2368 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2371 spin_lock_bh(&queue->tx_ptr_lock);
2374 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2375 bp->tx_ring_size) < desc_cnt) {
2377 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2378 queue->tx_head, queue->tx_tail);
2392 netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
2393 skb->len);
2395 spin_lock_irq(&bp->lock);
2397 spin_unlock_irq(&bp->lock);
2399 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2403 spin_unlock_bh(&queue->tx_ptr_lock);
2411 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2413 bp->rx_buffer_size = size;
2415 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2416 netdev_dbg(bp->dev,
2419 bp->rx_buffer_size =
2420 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2424 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2425 bp->dev->mtu, bp->rx_buffer_size);
2437 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2438 if (!queue->rx_skbuff)
2441 for (i = 0; i < bp->rx_ring_size; i++) {
2442 skb = queue->rx_skbuff[i];
2450 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2456 kfree(queue->rx_skbuff);
2457 queue->rx_skbuff = NULL;
2463 struct macb_queue *queue = &bp->queues[0];
2465 if (queue->rx_buffers) {
2466 dma_free_coherent(&bp->pdev->dev,
2467 bp->rx_ring_size * bp->rx_buffer_size,
2468 queue->rx_buffers, queue->rx_buffers_dma);
2469 queue->rx_buffers = NULL;
2479 if (bp->rx_ring_tieoff) {
2480 dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
2481 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
2482 bp->rx_ring_tieoff = NULL;
2485 bp->macbgem_ops.mog_free_rx_buffers(bp);
2487 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2488 kfree(queue->tx_skb);
2489 queue->tx_skb = NULL;
2490 if (queue->tx_ring) {
2491 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2492 dma_free_coherent(&bp->pdev->dev, size,
2493 queue->tx_ring, queue->tx_ring_dma);
2494 queue->tx_ring = NULL;
2496 if (queue->rx_ring) {
2497 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2498 dma_free_coherent(&bp->pdev->dev, size,
2499 queue->rx_ring, queue->rx_ring_dma);
2500 queue->rx_ring = NULL;
2511 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2512 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2513 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2514 if (!queue->rx_skbuff)
2515 return -ENOMEM;
2517 netdev_dbg(bp->dev,
2519 bp->rx_ring_size, queue->rx_skbuff);
2526 struct macb_queue *queue = &bp->queues[0];
2529 size = bp->rx_ring_size * bp->rx_buffer_size;
2530 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2531 &queue->rx_buffers_dma, GFP_KERNEL);
2532 if (!queue->rx_buffers)
2533 return -ENOMEM;
2535 netdev_dbg(bp->dev,
2537 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2547 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2548 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2549 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2550 &queue->tx_ring_dma,
2552 if (!queue->tx_ring)
2554 netdev_dbg(bp->dev,
2556 q, size, (unsigned long)queue->tx_ring_dma,
2557 queue->tx_ring);
2559 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2560 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2561 if (!queue->tx_skb)
2564 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2565 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2566 &queue->rx_ring_dma, GFP_KERNEL);
2567 if (!queue->rx_ring)
2569 netdev_dbg(bp->dev,
2571 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2573 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2577 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
2578 bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
2580 &bp->rx_ring_tieoff_dma,
2582 if (!bp->rx_ring_tieoff)
2590 return -ENOMEM;
2595 struct macb_dma_desc *desc = bp->rx_ring_tieoff;
2597 if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
2603 desc->ctrl = 0;
2613 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2614 for (i = 0; i < bp->tx_ring_size; i++) {
2617 desc->ctrl = MACB_BIT(TX_USED);
2619 desc->ctrl |= MACB_BIT(TX_WRAP);
2620 queue->tx_head = 0;
2621 queue->tx_tail = 0;
2623 queue->rx_tail = 0;
2624 queue->rx_prepared_head = 0;
2637 macb_init_rx_ring(&bp->queues[0]);
2639 for (i = 0; i < bp->tx_ring_size; i++) {
2640 desc = macb_tx_desc(&bp->queues[0], i);
2642 desc->ctrl = MACB_BIT(TX_USED);
2644 bp->queues[0].tx_head = 0;
2645 bp->queues[0].tx_tail = 0;
2646 desc->ctrl |= MACB_BIT(TX_WRAP);
2668 macb_writel(bp, TSR, -1);
2669 macb_writel(bp, RSR, -1);
2675 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2676 queue_writel(queue, IDR, -1);
2678 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2679 queue_writel(queue, ISR, -1);
2686 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2716 pclk_hz = clk_get_rate(bp->pclk);
2750 * - use the correct receive buffer size
2751 * - set best burst length for DMA operations
2753 * - set both rx/tx packet buffers to full memory size
2763 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2765 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2766 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2772 if (bp->dma_burst_length)
2773 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2774 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2777 if (bp->native_io)
2782 if (bp->dev->features & NETIF_F_HW_CSUM)
2789 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2793 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2796 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2812 if (bp->caps & MACB_CAPS_JUMBO)
2816 if (bp->dev->flags & IFF_PROMISC)
2818 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2820 if (!(bp->dev->flags & IFF_BROADCAST))
2824 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2825 gem_writel(bp, JML, bp->jumbo_max_len);
2826 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2827 if (bp->caps & MACB_CAPS_JUMBO)
2828 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2833 if (bp->rx_watermark)
2834 gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU)));
2893 /* Add multicast addresses to the internal multicast-hash table. */
2905 bitnr = hash_get_index(ha->addr);
2921 if (dev->flags & IFF_PROMISC) {
2933 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2937 if (dev->flags & IFF_ALLMULTI) {
2939 macb_or_gem_writel(bp, HRB, -1);
2940 macb_or_gem_writel(bp, HRT, -1);
2946 } else if (dev->flags & (~IFF_ALLMULTI)) {
2958 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2964 netdev_dbg(bp->dev, "open\n");
2966 err = pm_runtime_resume_and_get(&bp->pdev->dev);
2980 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2981 napi_enable(&queue->napi_rx);
2982 napi_enable(&queue->napi_tx);
2987 err = phy_power_on(bp->sgmii_phy);
2997 if (bp->ptp_info)
2998 bp->ptp_info->ptp_init(dev);
3003 phy_power_off(bp->sgmii_phy);
3007 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3008 napi_disable(&queue->napi_rx);
3009 napi_disable(&queue->napi_tx);
3013 pm_runtime_put_sync(&bp->pdev->dev);
3026 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3027 napi_disable(&queue->napi_rx);
3028 napi_disable(&queue->napi_tx);
3032 phylink_stop(bp->phylink);
3033 phylink_disconnect_phy(bp->phylink);
3035 phy_power_off(bp->sgmii_phy);
3037 spin_lock_irqsave(&bp->lock, flags);
3040 spin_unlock_irqrestore(&bp->lock, flags);
3044 if (bp->ptp_info)
3045 bp->ptp_info->ptp_remove(dev);
3047 pm_runtime_put(&bp->pdev->dev);
3055 return -EBUSY;
3057 WRITE_ONCE(dev->mtu, new_mtu);
3080 u64 *p = &bp->hw_stats.gem.tx_octets;
3084 u64 val = bp->macb_reg_readl(bp, offset);
3086 bp->ethtool_stats[i] += val;
3091 val = bp->macb_reg_readl(bp, offset + 4);
3092 bp->ethtool_stats[i] += ((u64)val) << 32;
3098 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3099 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
3100 bp->ethtool_stats[idx++] = *stat;
3105 struct gem_stats *hwstat = &bp->hw_stats.gem;
3107 spin_lock_irq(&bp->stats_lock);
3108 if (netif_running(bp->dev))
3111 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
3112 hwstat->rx_alignment_errors +
3113 hwstat->rx_resource_errors +
3114 hwstat->rx_overruns +
3115 hwstat->rx_oversize_frames +
3116 hwstat->rx_jabbers +
3117 hwstat->rx_undersized_frames +
3118 hwstat->rx_length_field_frame_errors);
3119 nstat->tx_errors = (hwstat->tx_late_collisions +
3120 hwstat->tx_excessive_collisions +
3121 hwstat->tx_underrun +
3122 hwstat->tx_carrier_sense_errors);
3123 nstat->multicast = hwstat->rx_multicast_frames;
3124 nstat->collisions = (hwstat->tx_single_collision_frames +
3125 hwstat->tx_multiple_collision_frames +
3126 hwstat->tx_excessive_collisions);
3127 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
3128 hwstat->rx_jabbers +
3129 hwstat->rx_undersized_frames +
3130 hwstat->rx_length_field_frame_errors);
3131 nstat->rx_over_errors = hwstat->rx_resource_errors;
3132 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
3133 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
3134 nstat->rx_fifo_errors = hwstat->rx_overruns;
3135 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
3136 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
3137 nstat->tx_fifo_errors = hwstat->tx_underrun;
3138 spin_unlock_irq(&bp->stats_lock);
3146 spin_lock_irq(&bp->stats_lock);
3148 memcpy(data, &bp->ethtool_stats, sizeof(u64)
3150 spin_unlock_irq(&bp->stats_lock);
3159 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3161 return -EOPNOTSUPP;
3179 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3194 struct macb_stats *hwstat = &bp->hw_stats.macb;
3196 netdev_stats_to_stats64(nstat, &bp->dev->stats);
3203 spin_lock_irq(&bp->stats_lock);
3207 nstat->rx_errors = (hwstat->rx_fcs_errors +
3208 hwstat->rx_align_errors +
3209 hwstat->rx_resource_errors +
3210 hwstat->rx_overruns +
3211 hwstat->rx_oversize_pkts +
3212 hwstat->rx_jabbers +
3213 hwstat->rx_undersize_pkts +
3214 hwstat->rx_length_mismatch);
3215 nstat->tx_errors = (hwstat->tx_late_cols +
3216 hwstat->tx_excessive_cols +
3217 hwstat->tx_underruns +
3218 hwstat->tx_carrier_errors +
3219 hwstat->sqe_test_errors);
3220 nstat->collisions = (hwstat->tx_single_cols +
3221 hwstat->tx_multiple_cols +
3222 hwstat->tx_excessive_cols);
3223 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
3224 hwstat->rx_jabbers +
3225 hwstat->rx_undersize_pkts +
3226 hwstat->rx_length_mismatch);
3227 nstat->rx_over_errors = hwstat->rx_resource_errors +
3228 hwstat->rx_overruns;
3229 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
3230 nstat->rx_frame_errors = hwstat->rx_align_errors;
3231 nstat->rx_fifo_errors = hwstat->rx_overruns;
3233 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
3234 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
3235 nstat->tx_fifo_errors = hwstat->tx_underruns;
3237 spin_unlock_irq(&bp->stats_lock);
3244 struct macb_stats *hwstat = &bp->hw_stats.macb;
3246 spin_lock_irq(&bp->stats_lock);
3248 pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3249 pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3250 spin_unlock_irq(&bp->stats_lock);
3257 struct gem_stats *hwstat = &bp->hw_stats.gem;
3259 spin_lock_irq(&bp->stats_lock);
3261 pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3262 pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3263 spin_unlock_irq(&bp->stats_lock);
3270 struct macb_stats *hwstat = &bp->hw_stats.macb;
3272 spin_lock_irq(&bp->stats_lock);
3274 mac_stats->FramesTransmittedOK = hwstat->tx_ok;
3275 mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
3276 mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
3277 mac_stats->FramesReceivedOK = hwstat->rx_ok;
3278 mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
3279 mac_stats->AlignmentErrors = hwstat->rx_align_errors;
3280 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
3281 mac_stats->LateCollisions = hwstat->tx_late_cols;
3282 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
3283 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
3284 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
3285 mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
3286 mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
3287 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
3288 spin_unlock_irq(&bp->stats_lock);
3295 struct gem_stats *hwstat = &bp->hw_stats.gem;
3297 spin_lock_irq(&bp->stats_lock);
3299 mac_stats->FramesTransmittedOK = hwstat->tx_frames;
3300 mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
3301 mac_stats->MultipleCollisionFrames =
3302 hwstat->tx_multiple_collision_frames;
3303 mac_stats->FramesReceivedOK = hwstat->rx_frames;
3304 mac_stats->FrameCheckSequenceErrors =
3305 hwstat->rx_frame_check_sequence_errors;
3306 mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
3307 mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
3308 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
3309 mac_stats->LateCollisions = hwstat->tx_late_collisions;
3310 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
3311 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
3312 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
3313 mac_stats->OctetsReceivedOK = hwstat->rx_octets;
3314 mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
3315 mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
3316 mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
3317 mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
3318 mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
3319 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
3320 spin_unlock_irq(&bp->stats_lock);
3328 struct macb_stats *hwstat = &bp->hw_stats.macb;
3330 spin_lock_irq(&bp->stats_lock);
3332 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3333 spin_unlock_irq(&bp->stats_lock);
3340 struct gem_stats *hwstat = &bp->hw_stats.gem;
3342 spin_lock_irq(&bp->stats_lock);
3344 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3345 spin_unlock_irq(&bp->stats_lock);
3353 struct macb_stats *hwstat = &bp->hw_stats.macb;
3355 spin_lock_irq(&bp->stats_lock);
3357 rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
3358 rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
3359 rmon_stats->jabbers = hwstat->rx_jabbers;
3360 spin_unlock_irq(&bp->stats_lock);
3379 struct gem_stats *hwstat = &bp->hw_stats.gem;
3381 spin_lock_irq(&bp->stats_lock);
3383 rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
3384 rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
3385 rmon_stats->jabbers = hwstat->rx_jabbers;
3386 rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
3387 rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
3388 rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
3389 rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
3390 rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
3391 rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
3392 rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
3393 rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
3394 rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
3395 rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
3396 rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
3397 rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
3398 rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
3399 rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
3400 spin_unlock_irq(&bp->stats_lock);
3416 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3419 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3420 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3433 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3434 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3436 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3446 phylink_ethtool_get_wol(bp->phylink, wol);
3447 wol->supported |= (WAKE_MAGIC | WAKE_ARP);
3450 wol->wolopts |= bp->wolopts;
3459 ret = phylink_ethtool_set_wol(bp->phylink, wol);
3461 if (ret && ret != -EOPNOTSUPP)
3464 bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
3465 bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
3466 bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
3468 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
3478 return phylink_ethtool_ksettings_get(bp->phylink, kset);
3486 return phylink_ethtool_ksettings_set(bp->phylink, kset);
3496 ring->rx_max_pending = MAX_RX_RING_SIZE;
3497 ring->tx_max_pending = MAX_TX_RING_SIZE;
3499 ring->rx_pending = bp->rx_ring_size;
3500 ring->tx_pending = bp->tx_ring_size;
3512 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3513 return -EINVAL;
3515 new_rx_size = clamp_t(u32, ring->rx_pending,
3519 new_tx_size = clamp_t(u32, ring->tx_pending,
3523 if ((new_tx_size == bp->tx_ring_size) &&
3524 (new_rx_size == bp->rx_ring_size)) {
3529 if (netif_running(bp->dev)) {
3531 macb_close(bp->dev);
3534 bp->rx_ring_size = new_rx_size;
3535 bp->tx_ring_size = new_tx_size;
3538 macb_open(bp->dev);
3549 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3553 else if (!IS_ERR(bp->pclk)) {
3554 tsu_clk = bp->pclk;
3557 return -ENOTSUPP;
3571 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3576 info->so_timestamping =
3581 info->tx_types =
3585 info->rx_filters =
3589 if (bp->ptp_clock)
3590 info->phc_index = ptp_clock_index(bp->ptp_clock);
3611 if (bp->ptp_info)
3612 return bp->ptp_info->get_ts_info(netdev, info);
3619 struct net_device *netdev = bp->dev;
3624 if (!(netdev->features & NETIF_F_NTUPLE))
3629 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3630 struct ethtool_rx_flow_spec *fs = &item->fs;
3633 if (fs->location >= num_t2_scr)
3636 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3642 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3644 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3649 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3654 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3659 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3666 uint16_t index = fs->location;
3675 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3676 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3679 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3680 /* 1st compare reg - IP source address */
3683 w0 = tp4sp_v->ip4src;
3684 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3693 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3694 /* 2nd compare reg - IP destination address */
3697 w0 = tp4sp_v->ip4dst;
3698 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3707 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3708 /* 3rd compare reg - source port, destination port */
3712 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3713 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3714 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3715 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3719 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
3721 if (tp4sp_m->psrc == 0xFFFF) { /* src port */
3722 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3725 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3735 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3750 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3753 int ret = -EINVAL;
3758 return -ENOMEM;
3759 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3763 fs->flow_type, (int)fs->ring_cookie, fs->location,
3764 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3765 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3766 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3767 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3769 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3772 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3773 if (item->fs.location > newfs->fs.location) {
3774 list_add_tail(&newfs->list, &item->list);
3777 } else if (item->fs.location == fs->location) {
3779 fs->location);
3780 ret = -EBUSY;
3785 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3788 bp->rx_fs_list.count++;
3792 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3796 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3809 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3811 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3812 if (item->fs.location == cmd->fs.location) {
3814 fs = &(item->fs);
3817 fs->flow_type, (int)fs->ring_cookie, fs->location,
3818 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3819 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3820 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3821 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3823 gem_writel_n(bp, SCRT2, fs->location, 0);
3825 list_del(&item->list);
3826 bp->rx_fs_list.count--;
3827 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3833 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3834 return -EINVAL;
3843 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3844 if (item->fs.location == cmd->fs.location) {
3845 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3849 return -EINVAL;
3859 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3860 if (cnt == cmd->rule_cnt)
3861 return -EMSGSIZE;
3862 rule_locs[cnt] = item->fs.location;
3865 cmd->data = bp->max_tuples;
3866 cmd->rule_cnt = cnt;
3877 switch (cmd->cmd) {
3879 cmd->data = bp->num_queues;
3882 cmd->rule_cnt = bp->rx_fs_list.count;
3892 "Command parameter %d is not supported\n", cmd->cmd);
3893 ret = -EOPNOTSUPP;
3904 switch (cmd->cmd) {
3906 if ((cmd->fs.location >= bp->max_tuples)
3907 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3908 ret = -EINVAL;
3918 "Command parameter %d is not supported\n", cmd->cmd);
3919 ret = -EOPNOTSUPP;
3969 return -EINVAL;
3971 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3980 return -EINVAL;
3982 if (!bp->ptp_info)
3983 return -EOPNOTSUPP;
3985 return bp->ptp_info->get_hwtst(dev, cfg);
3995 return -EINVAL;
3997 if (!bp->ptp_info)
3998 return -EOPNOTSUPP;
4000 return bp->ptp_info->set_hwtst(dev, cfg, extack);
4023 struct net_device *netdev = bp->dev;
4030 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
4051 netdev_features_t changed = features ^ netdev->features;
4070 struct net_device *netdev = bp->dev;
4071 netdev_features_t features = netdev->features;
4081 list_for_each_entry(item, &bp->rx_fs_list.list, list)
4082 gem_prog_cmp_regs(bp, &item->fs);
4112 struct device_node *np = bp->pdev->dev.of_node;
4116 refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
4119 bp->caps = dt_conf->caps;
4121 if (hw_is_gem(bp->regs, bp->native_io)) {
4122 bp->caps |= MACB_CAPS_MACB_IS_GEM;
4126 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
4128 bp->caps |= MACB_CAPS_PCS;
4131 bp->caps |= MACB_CAPS_HIGH_SPEED;
4134 bp->caps |= MACB_CAPS_FIFO_MODE;
4137 dev_err(&bp->pdev->dev,
4141 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
4142 bp->ptp_info = &gem_ptp_info;
4149 bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
4151 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
4197 pdata = dev_get_platdata(&pdev->dev);
4199 *pclk = pdata->pclk;
4200 *hclk = pdata->hclk;
4202 *pclk = devm_clk_get(&pdev->dev, "pclk");
4203 *hclk = devm_clk_get(&pdev->dev, "hclk");
4207 return dev_err_probe(&pdev->dev,
4208 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
4212 return dev_err_probe(&pdev->dev,
4213 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
4216 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
4220 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
4224 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
4230 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4236 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
4242 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
4248 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
4254 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
4284 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
4285 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
4292 if (!(bp->queue_mask & (1 << hw_q)))
4295 queue = &bp->queues[q];
4296 queue->bp = bp;
4297 spin_lock_init(&queue->tx_ptr_lock);
4298 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
4299 netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
4301 queue->ISR = GEM_ISR(hw_q - 1);
4302 queue->IER = GEM_IER(hw_q - 1);
4303 queue->IDR = GEM_IDR(hw_q - 1);
4304 queue->IMR = GEM_IMR(hw_q - 1);
4305 queue->TBQP = GEM_TBQP(hw_q - 1);
4306 queue->RBQP = GEM_RBQP(hw_q - 1);
4307 queue->RBQS = GEM_RBQS(hw_q - 1);
4309 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4310 queue->TBQPH = GEM_TBQPH(hw_q - 1);
4311 queue->RBQPH = GEM_RBQPH(hw_q - 1);
4316 queue->ISR = MACB_ISR;
4317 queue->IER = MACB_IER;
4318 queue->IDR = MACB_IDR;
4319 queue->IMR = MACB_IMR;
4320 queue->TBQP = MACB_TBQP;
4321 queue->RBQP = MACB_RBQP;
4323 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4324 queue->TBQPH = MACB_TBQPH;
4325 queue->RBQPH = MACB_RBQPH;
4335 queue->irq = platform_get_irq(pdev, q);
4336 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4337 IRQF_SHARED, dev->name, queue);
4339 dev_err(&pdev->dev,
4341 queue->irq, err);
4345 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4349 dev->netdev_ops = &macb_netdev_ops;
4353 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4354 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4355 bp->macbgem_ops.mog_init_rings = gem_init_rings;
4356 bp->macbgem_ops.mog_rx = gem_rx;
4357 dev->ethtool_ops = &gem_ethtool_ops;
4359 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4360 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4361 bp->macbgem_ops.mog_init_rings = macb_init_rings;
4362 bp->macbgem_ops.mog_rx = macb_rx;
4363 dev->ethtool_ops = &macb_ethtool_ops;
4368 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4371 dev->hw_features = NETIF_F_SG;
4375 dev->hw_features |= MACB_NETIF_LSO;
4378 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4379 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
4380 if (bp->caps & MACB_CAPS_SG_DISABLED)
4381 dev->hw_features &= ~NETIF_F_SG;
4382 dev->features = dev->hw_features;
4386 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
4389 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
4391 INIT_LIST_HEAD(&bp->rx_fs_list.list);
4392 if (bp->max_tuples > 0) {
4400 dev->hw_features |= NETIF_F_NTUPLE;
4402 bp->rx_fs_list.count = 0;
4403 spin_lock_init(&bp->rx_fs_lock);
4405 bp->max_tuples = 0;
4408 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4410 if (phy_interface_mode_is_rgmii(bp->phy_interface))
4411 val = bp->usrio->rgmii;
4412 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4413 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4414 val = bp->usrio->rmii;
4415 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4416 val = bp->usrio->mii;
4418 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4419 val |= bp->usrio->refclk;
4427 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4451 struct macb_queue *q = &lp->queues[0];
4453 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4456 &q->rx_ring_dma, GFP_KERNEL);
4457 if (!q->rx_ring)
4458 return -ENOMEM;
4460 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4463 &q->rx_buffers_dma, GFP_KERNEL);
4464 if (!q->rx_buffers) {
4465 dma_free_coherent(&lp->pdev->dev,
4468 q->rx_ring, q->rx_ring_dma);
4469 q->rx_ring = NULL;
4470 return -ENOMEM;
4478 struct macb_queue *q = &lp->queues[0];
4480 if (q->rx_ring) {
4481 dma_free_coherent(&lp->pdev->dev,
4484 q->rx_ring, q->rx_ring_dma);
4485 q->rx_ring = NULL;
4488 if (q->rx_buffers) {
4489 dma_free_coherent(&lp->pdev->dev,
4492 q->rx_buffers, q->rx_buffers_dma);
4493 q->rx_buffers = NULL;
4500 struct macb_queue *q = &lp->queues[0];
4510 addr = q->rx_buffers_dma;
4514 desc->ctrl = 0;
4519 desc->addr |= MACB_BIT(RX_WRAP);
4522 q->rx_tail = 0;
4525 macb_writel(lp, RBQP, q->rx_ring_dma);
4571 ret = pm_runtime_resume_and_get(&lp->pdev->dev);
4596 pm_runtime_put_sync(&lp->pdev->dev);
4607 phylink_stop(lp->phylink);
4608 phylink_disconnect_phy(lp->phylink);
4612 return pm_runtime_put(&lp->pdev->dev);
4627 lp->rm9200_txq[desc].skb = skb;
4628 lp->rm9200_txq[desc].size = skb->len;
4629 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4630 skb->len, DMA_TO_DEVICE);
4631 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
4633 dev->stats.tx_dropped++;
4639 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
4641 macb_writel(lp, TCR, skb->len);
4657 struct macb_queue *q = &lp->queues[0];
4663 desc = macb_rx_desc(q, q->rx_tail);
4664 while (desc->addr & MACB_BIT(RX_USED)) {
4665 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
4666 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
4672 skb->protocol = eth_type_trans(skb, dev);
4673 dev->stats.rx_packets++;
4674 dev->stats.rx_bytes += pktlen;
4677 dev->stats.rx_dropped++;
4680 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4681 dev->stats.multicast++;
4684 desc->addr &= ~MACB_BIT(RX_USED);
4687 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4688 q->rx_tail = 0;
4690 q->rx_tail++;
4692 desc = macb_rx_desc(q, q->rx_tail);
4717 dev->stats.tx_errors++;
4720 if (lp->rm9200_txq[desc].skb) {
4721 dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4722 lp->rm9200_txq[desc].skb = NULL;
4723 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4724 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
4725 dev->stats.tx_packets++;
4726 dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4731 /* Work-around for EMAC Errata section 41.3.1 */
4751 at91ether_interrupt(dev->irq, dev);
4783 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
4789 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4802 bp->queues[0].bp = bp;
4804 dev->netdev_ops = &at91ether_netdev_ops;
4805 dev->ethtool_ops = &macb_ethtool_ops;
4807 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4808 0, dev->name, dev);
4822 return mgmt->rate;
4855 iowrite32(1, mgmt->reg);
4857 iowrite32(0, mgmt->reg);
4858 mgmt->rate = rate;
4880 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4882 err = -ENOMEM;
4886 init.name = "sifive-gemgxl-mgmt";
4891 mgmt->rate = 0;
4892 mgmt->hw.init = &init;
4894 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4902 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4906 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4919 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4920 if (IS_ERR(mgmt->reg))
4921 return PTR_ERR(mgmt->reg);
4932 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4934 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
4936 if (IS_ERR(bp->sgmii_phy))
4937 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
4940 ret = phy_init(bp->sgmii_phy);
4942 return dev_err_probe(&pdev->dev, ret,
4949 ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
4952 dev_err(&pdev->dev, "Failed to read power management information\n");
4967 ret = device_reset_optional(&pdev->dev);
4969 phy_exit(bp->sgmii_phy);
4970 return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
4977 phy_exit(bp->sgmii_phy);
5135 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
5137 { .compatible = "cdns,np4-macb", .data = &np4_config },
5138 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
5140 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
5141 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
5142 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
5143 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
5144 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
5145 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
5146 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
5148 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
5149 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
5150 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
5151 { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
5152 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
5153 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
5154 { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
5155 { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
5156 { .compatible = "xlnx,versal-gem", .data = &versal_config},
5178 struct clk **) = macb_config->clk_init;
5179 int (*init)(struct platform_device *) = macb_config->init;
5180 struct device_node *np = pdev->dev.of_node;
5201 if (match && match->data) {
5202 macb_config = match->data;
5203 clk_init = macb_config->clk_init;
5204 init = macb_config->init;
5212 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
5213 pm_runtime_use_autosuspend(&pdev->dev);
5214 pm_runtime_get_noresume(&pdev->dev);
5215 pm_runtime_set_active(&pdev->dev);
5216 pm_runtime_enable(&pdev->dev);
5222 err = -ENOMEM;
5226 dev->base_addr = regs->start;
5228 SET_NETDEV_DEV(dev, &pdev->dev);
5231 bp->pdev = pdev;
5232 bp->dev = dev;
5233 bp->regs = mem;
5234 bp->native_io = native_io;
5236 bp->macb_reg_readl = hw_readl_native;
5237 bp->macb_reg_writel = hw_writel_native;
5239 bp->macb_reg_readl = hw_readl;
5240 bp->macb_reg_writel = hw_writel;
5242 bp->num_queues = num_queues;
5243 bp->queue_mask = queue_mask;
5245 bp->dma_burst_length = macb_config->dma_burst_length;
5246 bp->pclk = pclk;
5247 bp->hclk = hclk;
5248 bp->tx_clk = tx_clk;
5249 bp->rx_clk = rx_clk;
5250 bp->tsu_clk = tsu_clk;
5252 bp->jumbo_max_len = macb_config->jumbo_max_len;
5254 if (!hw_is_gem(bp->regs, bp->native_io))
5255 bp->max_tx_length = MACB_MAX_TX_LEN;
5256 else if (macb_config->max_tx_length)
5257 bp->max_tx_length = macb_config->max_tx_length;
5259 bp->max_tx_length = GEM_MAX_TX_LEN;
5261 bp->wol = 0;
5262 device_set_wakeup_capable(&pdev->dev, 1);
5264 bp->usrio = macb_config->usrio;
5270 err = of_property_read_u32(bp->pdev->dev.of_node,
5271 "cdns,rx-watermark",
5272 &bp->rx_watermark);
5278 wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1;
5279 if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) {
5280 dev_info(&bp->pdev->dev, "Invalid watermark value\n");
5281 bp->rx_watermark = 0;
5285 spin_lock_init(&bp->lock);
5286 spin_lock_init(&bp->stats_lock);
5293 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
5295 dev_err(&pdev->dev, "failed to set DMA mask\n");
5298 bp->hw_dma_cap |= HW_DMA_CAP_64B;
5303 dev->irq = platform_get_irq(pdev, 0);
5304 if (dev->irq < 0) {
5305 err = dev->irq;
5309 /* MTU range: 68 - 1518 or 10240 */
5310 dev->min_mtu = GEM_MTU_MIN_SIZE;
5311 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
5312 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
5314 dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
5316 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
5319 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
5324 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
5328 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
5329 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
5330 bp->rx_intr_mask |= MACB_BIT(RXUBR);
5332 err = of_get_ethdev_address(np, bp->dev);
5333 if (err == -EPROBE_DEFER)
5341 bp->phy_interface = PHY_INTERFACE_MODE_MII;
5343 bp->phy_interface = interface;
5358 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5362 INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
5366 dev->base_addr, dev->irq, dev->dev_addr);
5368 pm_runtime_mark_last_busy(&bp->pdev->dev);
5369 pm_runtime_put_autosuspend(&bp->pdev->dev);
5374 mdiobus_unregister(bp->mii_bus);
5375 mdiobus_free(bp->mii_bus);
5378 phy_exit(bp->sgmii_phy);
5385 pm_runtime_disable(&pdev->dev);
5386 pm_runtime_set_suspended(&pdev->dev);
5387 pm_runtime_dont_use_autosuspend(&pdev->dev);
5401 phy_exit(bp->sgmii_phy);
5402 mdiobus_unregister(bp->mii_bus);
5403 mdiobus_free(bp->mii_bus);
5406 cancel_work_sync(&bp->hresp_err_bh_work);
5407 pm_runtime_disable(&pdev->dev);
5408 pm_runtime_dont_use_autosuspend(&pdev->dev);
5409 if (!pm_runtime_suspended(&pdev->dev)) {
5410 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
5411 bp->rx_clk, bp->tsu_clk);
5412 pm_runtime_set_suspended(&pdev->dev);
5414 phylink_destroy(bp->phylink);
5431 if (!device_may_wakeup(&bp->dev->dev))
5432 phy_exit(bp->sgmii_phy);
5437 if (bp->wol & MACB_WOL_ENABLED) {
5439 idev = __in_dev_get_rcu(bp->dev);
5441 ifa = rcu_dereference(idev->ifa_list);
5442 if ((bp->wolopts & WAKE_ARP) && !ifa) {
5444 return -EOPNOTSUPP;
5446 spin_lock_irqsave(&bp->lock, flags);
5453 for (q = 0, queue = bp->queues; q < bp->num_queues;
5456 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
5461 lower_32_bits(bp->rx_ring_tieoff_dma));
5464 upper_32_bits(bp->rx_ring_tieoff_dma));
5468 queue_writel(queue, IDR, -1);
5470 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5471 queue_writel(queue, ISR, -1);
5476 macb_writel(bp, TSR, -1);
5477 macb_writel(bp, RSR, -1);
5479 tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
5480 if (bp->wolopts & WAKE_ARP) {
5483 tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
5489 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5491 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
5492 IRQF_SHARED, netdev->name, bp->queues);
5496 bp->queues[0].irq, err);
5497 spin_unlock_irqrestore(&bp->lock, flags);
5500 queue_writel(bp->queues, IER, GEM_BIT(WOL));
5503 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
5504 IRQF_SHARED, netdev->name, bp->queues);
5508 bp->queues[0].irq, err);
5509 spin_unlock_irqrestore(&bp->lock, flags);
5512 queue_writel(bp->queues, IER, MACB_BIT(WOL));
5515 spin_unlock_irqrestore(&bp->lock, flags);
5517 enable_irq_wake(bp->queues[0].irq);
5521 for (q = 0, queue = bp->queues; q < bp->num_queues;
5523 napi_disable(&queue->napi_rx);
5524 napi_disable(&queue->napi_tx);
5527 if (!(bp->wol & MACB_WOL_ENABLED)) {
5529 phylink_stop(bp->phylink);
5531 spin_lock_irqsave(&bp->lock, flags);
5533 spin_unlock_irqrestore(&bp->lock, flags);
5536 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5537 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
5539 if (netdev->hw_features & NETIF_F_NTUPLE)
5540 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
5542 if (bp->ptp_info)
5543 bp->ptp_info->ptp_remove(netdev);
5559 if (!device_may_wakeup(&bp->dev->dev))
5560 phy_init(bp->sgmii_phy);
5568 if (bp->wol & MACB_WOL_ENABLED) {
5569 spin_lock_irqsave(&bp->lock, flags);
5572 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
5575 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
5579 queue_readl(bp->queues, ISR);
5580 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5581 queue_writel(bp->queues, ISR, -1);
5583 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5584 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
5585 IRQF_SHARED, netdev->name, bp->queues);
5589 bp->queues[0].irq, err);
5590 spin_unlock_irqrestore(&bp->lock, flags);
5593 spin_unlock_irqrestore(&bp->lock, flags);
5595 disable_irq_wake(bp->queues[0].irq);
5601 phylink_stop(bp->phylink);
5605 for (q = 0, queue = bp->queues; q < bp->num_queues;
5607 napi_enable(&queue->napi_rx);
5608 napi_enable(&queue->napi_tx);
5611 if (netdev->hw_features & NETIF_F_NTUPLE)
5612 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5614 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5615 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5623 phylink_start(bp->phylink);
5627 if (bp->ptp_info)
5628 bp->ptp_info->ptp_init(netdev);
5639 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5640 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
5641 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5652 clk_prepare_enable(bp->pclk);
5653 clk_prepare_enable(bp->hclk);
5654 clk_prepare_enable(bp->tx_clk);
5655 clk_prepare_enable(bp->rx_clk);
5656 clk_prepare_enable(bp->tsu_clk);
5657 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
5658 clk_prepare_enable(bp->tsu_clk);