Lines Matching +full:zynqmp +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004-2006 Atmel Corporation
10 #include <linux/clk-provider.h>
23 #include <linux/dma-mapping.h>
36 #include <linux/reset.h>
37 #include <linux/firmware/xlnx-zynqmp.h>
61 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
72 …MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN -…
88 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
127 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
152 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
178 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
184 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
185 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
186 return &queue->tx_ring[index]; in macb_tx_desc()
192 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
199 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
200 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
202 return queue->tx_ring_dma + offset; in macb_tx_dma()
207 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
212 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
213 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
214 return &queue->rx_ring[index]; in macb_rx_desc()
219 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
220 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
226 return __raw_readl(bp->regs + offset); in hw_readl_native()
231 __raw_writel(value, bp->regs + offset); in hw_writel_native()
236 return readl_relaxed(bp->regs + offset); in hw_readl()
241 writel_relaxed(value, bp->regs + offset); in hw_writel()
278 bottom = get_unaligned_le32(bp->dev->dev_addr); in macb_set_hwaddr()
280 top = get_unaligned_le16(bp->dev->dev_addr + 4); in macb_set_hwaddr()
317 eth_hw_addr_set(bp->dev, addr); in macb_get_hwaddr()
322 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
323 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
336 struct macb *bp = bus->priv; in macb_mdio_read_c22()
339 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_read_c22()
360 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read_c22()
361 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read_c22()
369 struct macb *bp = bus->priv; in macb_mdio_read_c45()
372 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read_c45()
374 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read_c45()
406 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read_c45()
407 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read_c45()
415 struct macb *bp = bus->priv; in macb_mdio_write_c22()
418 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_write_c22()
438 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write_c22()
439 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write_c22()
448 struct macb *bp = bus->priv; in macb_mdio_write_c45()
451 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write_c45()
453 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write_c45()
484 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write_c45()
485 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write_c45()
497 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init_buffers()
499 upper_32_bits(bp->queues[0].rx_ring_dma)); in macb_init_buffers()
501 upper_32_bits(bp->queues[0].tx_ring_dma)); in macb_init_buffers()
505 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
506 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
507 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
512 * macb_set_tx_clk() - Set a clock to a new frequency
520 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
524 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
531 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
538 ferr = abs(rate_rounded - rate); in macb_set_tx_clk()
541 netdev_warn(bp->dev, in macb_set_tx_clk()
545 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
546 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
571 state->speed = SPEED_10000; in macb_usx_pcs_get_state()
572 state->duplex = 1; in macb_usx_pcs_get_state()
573 state->an_complete = 1; in macb_usx_pcs_get_state()
576 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); in macb_usx_pcs_get_state()
579 state->pause = MLO_PAUSE_RX; in macb_usx_pcs_get_state()
599 state->link = 0; in macb_pcs_get_state()
631 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_config()
637 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
642 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
643 if (state->interface == PHY_INTERFACE_MODE_RMII) in macb_mac_config()
649 if (state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
651 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { in macb_mac_config()
654 } else if (bp->caps & MACB_CAPS_MIIONRGMII && in macb_mac_config()
655 bp->phy_interface == PHY_INTERFACE_MODE_MII) { in macb_mac_config()
671 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
683 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
689 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_down()
695 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
696 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
698 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
713 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_up()
720 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
732 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
747 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
750 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
752 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
757 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
761 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
763 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_up()
779 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_select_pcs()
783 return &bp->phylink_usx_pcs; in macb_mac_select_pcs()
785 return &bp->phylink_sgmii_pcs; in macb_mac_select_pcs()
799 dn = of_parse_phandle(dn, "phy-handle", 0); in macb_phy_handle_exists()
806 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
807 struct net_device *dev = bp->dev; in macb_phylink_connect()
812 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
815 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
818 return -ENXIO; in macb_phylink_connect()
822 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
830 phylink_start(bp->phylink); in macb_phylink_connect()
838 struct net_device *ndev = to_net_dev(config->dev); in macb_get_pcs_fixed_state()
841 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
849 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; in macb_mii_probe()
850 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mii_probe()
852 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
853 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
854 bp->phylink_config.mac_managed_pm = true; in macb_mii_probe()
856 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
857 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
858 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
861 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | in macb_mii_probe()
865 bp->phylink_config.supported_interfaces); in macb_mii_probe()
867 bp->phylink_config.supported_interfaces); in macb_mii_probe()
870 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { in macb_mii_probe()
871 bp->phylink_config.mac_capabilities |= MAC_1000FD; in macb_mii_probe()
872 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_mii_probe()
873 bp->phylink_config.mac_capabilities |= MAC_1000HD; in macb_mii_probe()
876 bp->phylink_config.supported_interfaces); in macb_mii_probe()
877 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces); in macb_mii_probe()
879 if (bp->caps & MACB_CAPS_PCS) in macb_mii_probe()
881 bp->phylink_config.supported_interfaces); in macb_mii_probe()
883 if (bp->caps & MACB_CAPS_HIGH_SPEED) { in macb_mii_probe()
885 bp->phylink_config.supported_interfaces); in macb_mii_probe()
886 bp->phylink_config.mac_capabilities |= MAC_10000FD; in macb_mii_probe()
890 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
891 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
892 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
894 PTR_ERR(bp->phylink)); in macb_mii_probe()
895 return PTR_ERR(bp->phylink); in macb_mii_probe()
903 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
909 return of_mdiobus_register(bp->mii_bus, mdio_np); in macb_mdiobus_register()
923 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
926 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
931 struct device_node *mdio_np, *np = bp->pdev->dev.of_node; in macb_mii_init()
932 int err = -ENXIO; in macb_mii_init()
934 /* With fixed-link, we don't need to register the MDIO bus, in macb_mii_init()
940 return macb_mii_probe(bp->dev); in macb_mii_init()
945 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
946 if (!bp->mii_bus) { in macb_mii_init()
947 err = -ENOMEM; in macb_mii_init()
951 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
952 bp->mii_bus->read = &macb_mdio_read_c22; in macb_mii_init()
953 bp->mii_bus->write = &macb_mdio_write_c22; in macb_mii_init()
954 bp->mii_bus->read_c45 = &macb_mdio_read_c45; in macb_mii_init()
955 bp->mii_bus->write_c45 = &macb_mdio_write_c45; in macb_mii_init()
956 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
957 bp->pdev->name, bp->pdev->id); in macb_mii_init()
958 bp->mii_bus->priv = bp; in macb_mii_init()
959 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
961 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
967 err = macb_mii_probe(bp->dev); in macb_mii_init()
974 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
976 mdiobus_free(bp->mii_bus); in macb_mii_init()
985 u64 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
986 u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
989 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); in macb_update_stats()
992 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
1010 if (tx_skb->mapping) { in macb_tx_unmap()
1011 if (tx_skb->mapped_as_page) in macb_tx_unmap()
1012 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1013 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1015 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1016 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1017 tx_skb->mapping = 0; in macb_tx_unmap()
1020 if (tx_skb->skb) { in macb_tx_unmap()
1021 napi_consume_skb(tx_skb->skb, budget); in macb_tx_unmap()
1022 tx_skb->skb = NULL; in macb_tx_unmap()
1031 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
1033 desc_64->addrh = upper_32_bits(addr); in macb_set_addr()
1041 desc->addr = lower_32_bits(addr); in macb_set_addr()
1050 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1052 addr = ((u64)(desc_64->addrh) << 32); in macb_get_addr()
1055 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); in macb_get_addr()
1057 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_get_addr()
1068 struct macb *bp = queue->bp; in macb_tx_error_task()
1078 queue_index = queue - bp->queues; in macb_tx_error_task()
1079 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1080 queue_index, queue->tx_tail, queue->tx_head); in macb_tx_error_task()
1088 napi_disable(&queue->napi_tx); in macb_tx_error_task()
1089 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1092 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1099 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1107 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { in macb_tx_error_task()
1111 ctrl = desc->ctrl; in macb_tx_error_task()
1113 skb = tx_skb->skb; in macb_tx_error_task()
1121 skb = tx_skb->skb; in macb_tx_error_task()
1128 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1130 skb->data); in macb_tx_error_task()
1131 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1132 queue->stats.tx_packets++; in macb_tx_error_task()
1134 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1135 queue->stats.tx_bytes += skb->len; in macb_tx_error_task()
1136 bytes += skb->len; in macb_tx_error_task()
1139 /* "Buffers exhausted mid-frame" errors may only happen in macb_tx_error_task()
1144 netdev_err(bp->dev, in macb_tx_error_task()
1145 "BUG: TX buffers exhausted mid-frame\n"); in macb_tx_error_task()
1147 desc->ctrl = ctrl | MACB_BIT(TX_USED); in macb_tx_error_task()
1153 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), in macb_tx_error_task()
1159 desc->ctrl = MACB_BIT(TX_USED); in macb_tx_error_task()
1165 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
1167 queue->tx_head = 0; in macb_tx_error_task()
1168 queue->tx_tail = 0; in macb_tx_error_task()
1178 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1181 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1182 napi_enable(&queue->napi_tx); in macb_tx_error_task()
1192 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ptp_one_step_sync()
1204 if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) in ptp_one_step_sync()
1217 struct macb *bp = queue->bp; in macb_tx_complete()
1218 u16 queue_index = queue - bp->queues; in macb_tx_complete()
1225 spin_lock_irqsave(&queue->tx_ptr_lock, flags); in macb_tx_complete()
1226 head = queue->tx_head; in macb_tx_complete()
1227 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { in macb_tx_complete()
1238 ctrl = desc->ctrl; in macb_tx_complete()
1249 skb = tx_skb->skb; in macb_tx_complete()
1253 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in macb_tx_complete()
1257 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_complete()
1259 skb->data); in macb_tx_complete()
1260 bp->dev->stats.tx_packets++; in macb_tx_complete()
1261 queue->stats.tx_packets++; in macb_tx_complete()
1262 bp->dev->stats.tx_bytes += skb->len; in macb_tx_complete()
1263 queue->stats.tx_bytes += skb->len; in macb_tx_complete()
1265 bytes += skb->len; in macb_tx_complete()
1280 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), in macb_tx_complete()
1283 queue->tx_tail = tail; in macb_tx_complete()
1284 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_complete()
1285 CIRC_CNT(queue->tx_head, queue->tx_tail, in macb_tx_complete()
1286 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_complete()
1287 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_complete()
1288 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); in macb_tx_complete()
1298 struct macb *bp = queue->bp; in gem_rx_refill()
1301 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, in gem_rx_refill()
1302 bp->rx_ring_size) > 0) { in gem_rx_refill()
1303 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1310 if (!queue->rx_skbuff[entry]) { in gem_rx_refill()
1312 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1314 netdev_err(bp->dev, in gem_rx_refill()
1320 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1321 bp->rx_buffer_size, in gem_rx_refill()
1323 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1328 queue->rx_skbuff[entry] = skb; in gem_rx_refill()
1330 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1332 desc->ctrl = 0; in gem_rx_refill()
1342 desc->ctrl = 0; in gem_rx_refill()
1344 desc->addr &= ~MACB_BIT(RX_USED); in gem_rx_refill()
1346 queue->rx_prepared_head++; in gem_rx_refill()
1352 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1353 queue, queue->rx_prepared_head, queue->rx_tail); in gem_rx_refill()
1365 desc->addr &= ~MACB_BIT(RX_USED); in discard_partial_frame()
1380 struct macb *bp = queue->bp; in gem_rx()
1392 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1398 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; in gem_rx()
1404 /* Ensure ctrl is at least as up-to-date as rxused */ in gem_rx()
1407 ctrl = desc->ctrl; in gem_rx()
1409 queue->rx_tail++; in gem_rx()
1413 netdev_err(bp->dev, in gem_rx()
1415 bp->dev->stats.rx_dropped++; in gem_rx()
1416 queue->stats.rx_dropped++; in gem_rx()
1419 skb = queue->rx_skbuff[entry]; in gem_rx()
1421 netdev_err(bp->dev, in gem_rx()
1423 bp->dev->stats.rx_dropped++; in gem_rx()
1424 queue->stats.rx_dropped++; in gem_rx()
1428 queue->rx_skbuff[entry] = NULL; in gem_rx()
1429 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1431 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1434 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1435 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1437 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1439 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1440 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1442 skb->ip_summed = CHECKSUM_UNNECESSARY; in gem_rx()
1444 bp->dev->stats.rx_packets++; in gem_rx()
1445 queue->stats.rx_packets++; in gem_rx()
1446 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1447 queue->stats.rx_bytes += skb->len; in gem_rx()
1452 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1453 skb->len, skb->csum); in gem_rx()
1457 skb->data, 32, true); in gem_rx()
1476 struct macb *bp = queue->bp; in macb_rx_frame()
1479 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1481 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1487 * payload word-aligned. in macb_rx_frame()
1493 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1495 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1498 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1515 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1520 return -1; in macb_rx_frame()
1522 frag_len = len - offset; in macb_rx_frame()
1527 offset += bp->rx_buffer_size; in macb_rx_frame()
1529 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1539 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1541 bp->dev->stats.rx_packets++; in macb_rx_frame()
1542 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1543 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1544 skb->len, skb->csum); in macb_rx_frame()
1552 struct macb *bp = queue->bp; in macb_init_rx_ring()
1557 addr = queue->rx_buffers_dma; in macb_init_rx_ring()
1558 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1561 desc->ctrl = 0; in macb_init_rx_ring()
1562 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1564 desc->addr |= MACB_BIT(RX_WRAP); in macb_init_rx_ring()
1565 queue->rx_tail = 0; in macb_init_rx_ring()
1571 struct macb *bp = queue->bp; in macb_rx()
1575 int first_frag = -1; in macb_rx()
1577 for (tail = queue->rx_tail; budget > 0; tail++) { in macb_rx()
1584 if (!(desc->addr & MACB_BIT(RX_USED))) in macb_rx()
1587 /* Ensure ctrl is at least as up-to-date as addr */ in macb_rx()
1590 ctrl = desc->ctrl; in macb_rx()
1593 if (first_frag != -1) in macb_rx()
1601 if (unlikely(first_frag == -1)) { in macb_rx()
1607 first_frag = -1; in macb_rx()
1614 budget--; in macb_rx()
1623 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1625 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1631 queue_writel(queue, RBQP, queue->rx_ring_dma); in macb_rx()
1635 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1639 if (first_frag != -1) in macb_rx()
1640 queue->rx_tail = first_frag; in macb_rx()
1642 queue->rx_tail = tail; in macb_rx()
1649 struct macb *bp = queue->bp; in macb_rx_pending()
1653 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in macb_rx_pending()
1659 return (desc->addr & MACB_BIT(RX_USED)) != 0; in macb_rx_pending()
1665 struct macb *bp = queue->bp; in macb_rx_poll()
1668 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_rx_poll()
1670 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n", in macb_rx_poll()
1671 (unsigned int)(queue - bp->queues), work_done, budget); in macb_rx_poll()
1674 queue_writel(queue, IER, bp->rx_intr_mask); in macb_rx_poll()
1680 * interrupts are re-enabled. in macb_rx_poll()
1687 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_rx_poll()
1688 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_rx_poll()
1690 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n"); in macb_rx_poll()
1702 struct macb *bp = queue->bp; in macb_tx_restart()
1706 spin_lock_irqsave(&queue->tx_ptr_lock, flags); in macb_tx_restart()
1708 if (queue->tx_head == queue->tx_tail) in macb_tx_restart()
1713 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); in macb_tx_restart()
1718 spin_lock(&bp->lock); in macb_tx_restart()
1720 spin_unlock(&bp->lock); in macb_tx_restart()
1723 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); in macb_tx_restart()
1731 spin_lock_irqsave(&queue->tx_ptr_lock, flags); in macb_tx_complete_pending()
1732 if (queue->tx_head != queue->tx_tail) { in macb_tx_complete_pending()
1736 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) in macb_tx_complete_pending()
1739 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); in macb_tx_complete_pending()
1746 struct macb *bp = queue->bp; in macb_tx_poll()
1752 if (queue->txubr_pending) { in macb_tx_poll()
1753 queue->txubr_pending = false; in macb_tx_poll()
1754 netdev_vdbg(bp->dev, "poll: tx restart\n"); in macb_tx_poll()
1758 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n", in macb_tx_poll()
1759 (unsigned int)(queue - bp->queues), work_done, budget); in macb_tx_poll()
1768 * interrupts are re-enabled. in macb_tx_poll()
1776 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_poll()
1778 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n"); in macb_tx_poll()
1789 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1794 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1795 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1806 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1812 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1814 bp->rx_intr_mask | in macb_hresp_error_task()
1828 struct macb *bp = queue->bp; in macb_wol_interrupt()
1836 spin_lock(&bp->lock); in macb_wol_interrupt()
1841 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1842 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1844 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1846 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1849 spin_unlock(&bp->lock); in macb_wol_interrupt()
1857 struct macb *bp = queue->bp; in gem_wol_interrupt()
1865 spin_lock(&bp->lock); in gem_wol_interrupt()
1870 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1871 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1873 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1875 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1878 spin_unlock(&bp->lock); in gem_wol_interrupt()
1886 struct macb *bp = queue->bp; in macb_interrupt()
1887 struct net_device *dev = bp->dev; in macb_interrupt()
1895 spin_lock(&bp->lock); in macb_interrupt()
1900 queue_writel(queue, IDR, -1); in macb_interrupt()
1901 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1902 queue_writel(queue, ISR, -1); in macb_interrupt()
1906 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1907 (unsigned int)(queue - bp->queues), in macb_interrupt()
1910 if (status & bp->rx_intr_mask) { in macb_interrupt()
1917 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1918 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1921 if (napi_schedule_prep(&queue->napi_rx)) { in macb_interrupt()
1922 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1923 __napi_schedule(&queue->napi_rx); in macb_interrupt()
1930 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1935 queue->txubr_pending = true; in macb_interrupt()
1939 if (napi_schedule_prep(&queue->napi_tx)) { in macb_interrupt()
1940 netdev_vdbg(bp->dev, "scheduling TX softirq\n"); in macb_interrupt()
1941 __napi_schedule(&queue->napi_tx); in macb_interrupt()
1947 schedule_work(&queue->tx_error_task); in macb_interrupt()
1949 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1956 * add that if/when we get our hands on a full-blown MII PHY. in macb_interrupt()
1961 * interrupts but it can be cleared by re-enabling RX. See in macb_interrupt()
1972 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1978 spin_lock(&bp->stats_lock); in macb_interrupt()
1980 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1982 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1983 spin_unlock(&bp->stats_lock); in macb_interrupt()
1985 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1990 queue_work(system_bh_wq, &bp->hresp_err_bh_work); in macb_interrupt()
1993 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1999 spin_unlock(&bp->lock); in macb_interrupt()
2005 /* Polling receive - used by netconsole and other diagnostic tools
2016 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
2017 macb_interrupt(dev->irq, queue); in macb_poll_controller()
2028 unsigned int len, entry, i, tx_head = queue->tx_head; in macb_tx_map()
2032 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; in macb_tx_map()
2037 if (skb_shinfo(skb)->gso_size != 0) { in macb_tx_map()
2038 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_tx_map()
2039 /* UDP - UFO */ in macb_tx_map()
2042 /* TCP - TSO */ in macb_tx_map()
2046 /* First, map non-paged data */ in macb_tx_map()
2055 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2057 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
2058 skb->data + offset, in macb_tx_map()
2060 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2064 tx_skb->skb = NULL; in macb_tx_map()
2065 tx_skb->mapping = mapping; in macb_tx_map()
2066 tx_skb->size = size; in macb_tx_map()
2067 tx_skb->mapped_as_page = false; in macb_tx_map()
2069 len -= size; in macb_tx_map()
2074 size = min(len, bp->max_tx_length); in macb_tx_map()
2079 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_tx_map()
2084 size = min(len, bp->max_tx_length); in macb_tx_map()
2086 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2088 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
2090 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2094 tx_skb->skb = NULL; in macb_tx_map()
2095 tx_skb->mapping = mapping; in macb_tx_map()
2096 tx_skb->size = size; in macb_tx_map()
2097 tx_skb->mapped_as_page = true; in macb_tx_map()
2099 len -= size; in macb_tx_map()
2108 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
2113 tx_skb->skb = skb; in macb_tx_map()
2126 desc->ctrl = ctrl; in macb_tx_map()
2131 mss_mfs = skb_shinfo(skb)->gso_size + in macb_tx_map()
2135 mss_mfs = skb_shinfo(skb)->gso_size; in macb_tx_map()
2144 i--; in macb_tx_map()
2146 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2149 ctrl = (u32)tx_skb->size; in macb_tx_map()
2154 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
2158 if (i == queue->tx_head) { in macb_tx_map()
2161 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2162 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && in macb_tx_map()
2172 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2173 /* desc->addr must be visible to hardware before clearing in macb_tx_map()
2174 * 'TX_USED' bit in desc->ctrl. in macb_tx_map()
2177 desc->ctrl = ctrl; in macb_tx_map()
2178 } while (i != queue->tx_head); in macb_tx_map()
2180 queue->tx_head = tx_head; in macb_tx_map()
2185 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2187 for (i = queue->tx_head; i != tx_head; i++) { in macb_tx_map()
2206 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) in macb_features_check()
2216 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) in macb_features_check()
2219 nr_frags = skb_shinfo(skb)->nr_frags; in macb_features_check()
2221 nr_frags--; in macb_features_check()
2223 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_features_check()
2234 if (skb->ip_summed != CHECKSUM_PARTIAL) in macb_clear_csum()
2239 return -1; in macb_clear_csum()
2242 * This is required - at least for Zynq, which otherwise calculates in macb_clear_csum()
2245 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; in macb_clear_csum()
2253 int padlen = ETH_ZLEN - (*skb)->len; in macb_pad_and_fcs()
2258 if (!(ndev->features & NETIF_F_HW_CSUM) || in macb_pad_and_fcs()
2259 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || in macb_pad_and_fcs()
2260 skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb)) in macb_pad_and_fcs()
2278 return -ENOMEM; in macb_pad_and_fcs()
2285 skb_put_zero(*skb, padlen - ETH_FCS_LEN); in macb_pad_and_fcs()
2289 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
2304 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2322 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in macb_start_xmit()
2323 (bp->hw_dma_cap & HW_DMA_CAP_PTP)) in macb_start_xmit()
2324 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in macb_start_xmit()
2327 is_lso = (skb_shinfo(skb)->gso_size != 0); in macb_start_xmit()
2331 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_start_xmit()
2337 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2342 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2345 netdev_vdbg(bp->dev, in macb_start_xmit()
2347 queue_index, skb->len, skb->head, skb->data, in macb_start_xmit()
2350 skb->data, 16, true); in macb_start_xmit()
2359 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2361 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2362 nr_frags = skb_shinfo(skb)->nr_frags; in macb_start_xmit()
2364 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); in macb_start_xmit()
2365 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2368 spin_lock_irqsave(&queue->tx_ptr_lock, flags); in macb_start_xmit()
2371 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, in macb_start_xmit()
2372 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2374 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2375 queue->tx_head, queue->tx_tail); in macb_start_xmit()
2389 netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), in macb_start_xmit()
2390 skb->len); in macb_start_xmit()
2392 spin_lock(&bp->lock); in macb_start_xmit()
2394 spin_unlock(&bp->lock); in macb_start_xmit()
2396 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2400 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); in macb_start_xmit()
2408 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2410 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2412 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2413 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2416 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2417 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2421 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2422 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2434 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2435 if (!queue->rx_skbuff) in gem_free_rx_buffers()
2438 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2439 skb = queue->rx_skbuff[i]; in gem_free_rx_buffers()
2447 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2453 kfree(queue->rx_skbuff); in gem_free_rx_buffers()
2454 queue->rx_skbuff = NULL; in gem_free_rx_buffers()
2460 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2462 if (queue->rx_buffers) { in macb_free_rx_buffers()
2463 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2464 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2465 queue->rx_buffers, queue->rx_buffers_dma); in macb_free_rx_buffers()
2466 queue->rx_buffers = NULL; in macb_free_rx_buffers()
2472 return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch; in macb_tx_ring_size_per_queue()
2477 return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch; in macb_rx_ring_size_per_queue()
2482 struct device *dev = &bp->pdev->dev; in macb_free_consistent()
2487 if (bp->rx_ring_tieoff) { in macb_free_consistent()
2489 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); in macb_free_consistent()
2490 bp->rx_ring_tieoff = NULL; in macb_free_consistent()
2493 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2495 size = bp->num_queues * macb_tx_ring_size_per_queue(bp); in macb_free_consistent()
2496 dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma); in macb_free_consistent()
2498 size = bp->num_queues * macb_rx_ring_size_per_queue(bp); in macb_free_consistent()
2499 dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma); in macb_free_consistent()
2501 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2502 kfree(queue->tx_skb); in macb_free_consistent()
2503 queue->tx_skb = NULL; in macb_free_consistent()
2504 queue->tx_ring = NULL; in macb_free_consistent()
2505 queue->rx_ring = NULL; in macb_free_consistent()
2515 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2516 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2517 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); in gem_alloc_rx_buffers()
2518 if (!queue->rx_skbuff) in gem_alloc_rx_buffers()
2519 return -ENOMEM; in gem_alloc_rx_buffers()
2521 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2523 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2530 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2533 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2534 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2535 &queue->rx_buffers_dma, GFP_KERNEL); in macb_alloc_rx_buffers()
2536 if (!queue->rx_buffers) in macb_alloc_rx_buffers()
2537 return -ENOMEM; in macb_alloc_rx_buffers()
2539 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2541 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); in macb_alloc_rx_buffers()
2547 struct device *dev = &bp->pdev->dev; in macb_alloc_consistent()
2555 * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match! in macb_alloc_consistent()
2561 size = bp->num_queues * macb_tx_ring_size_per_queue(bp); in macb_alloc_consistent()
2563 if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1)) in macb_alloc_consistent()
2565 netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n", in macb_alloc_consistent()
2566 size, bp->num_queues, (unsigned long)tx_dma, tx); in macb_alloc_consistent()
2568 size = bp->num_queues * macb_rx_ring_size_per_queue(bp); in macb_alloc_consistent()
2570 if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1)) in macb_alloc_consistent()
2572 netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n", in macb_alloc_consistent()
2573 size, bp->num_queues, (unsigned long)rx_dma, rx); in macb_alloc_consistent()
2575 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2576 queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q; in macb_alloc_consistent()
2577 queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q; in macb_alloc_consistent()
2579 queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q; in macb_alloc_consistent()
2580 queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q; in macb_alloc_consistent()
2582 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2583 queue->tx_skb = kmalloc(size, GFP_KERNEL); in macb_alloc_consistent()
2584 if (!queue->tx_skb) in macb_alloc_consistent()
2587 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2591 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) { in macb_alloc_consistent()
2592 bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev, in macb_alloc_consistent()
2594 &bp->rx_ring_tieoff_dma, in macb_alloc_consistent()
2596 if (!bp->rx_ring_tieoff) in macb_alloc_consistent()
2604 return -ENOMEM; in macb_alloc_consistent()
2609 struct macb_dma_desc *desc = bp->rx_ring_tieoff; in macb_init_tieoff()
2611 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) in macb_init_tieoff()
2617 desc->ctrl = 0; in macb_init_tieoff()
2627 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2628 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2631 desc->ctrl = MACB_BIT(TX_USED); in gem_init_rings()
2633 desc->ctrl |= MACB_BIT(TX_WRAP); in gem_init_rings()
2634 queue->tx_head = 0; in gem_init_rings()
2635 queue->tx_tail = 0; in gem_init_rings()
2637 queue->rx_tail = 0; in gem_init_rings()
2638 queue->rx_prepared_head = 0; in gem_init_rings()
2651 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2653 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2654 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2656 desc->ctrl = MACB_BIT(TX_USED); in macb_init_rings()
2658 bp->queues[0].tx_head = 0; in macb_init_rings()
2659 bp->queues[0].tx_tail = 0; in macb_init_rings()
2660 desc->ctrl |= MACB_BIT(TX_WRAP); in macb_init_rings()
2682 macb_writel(bp, TSR, -1); in macb_reset_hw()
2683 macb_writel(bp, RSR, -1); in macb_reset_hw()
2685 /* Disable RX partial store and forward and reset watermark value */ in macb_reset_hw()
2689 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2690 queue_writel(queue, IDR, -1); in macb_reset_hw()
2692 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2693 queue_writel(queue, ISR, -1); in macb_reset_hw()
2700 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2730 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2764 * - use the correct receive buffer size
2765 * - set best burst length for DMA operations
2767 * - set both rx/tx packet buffers to full memory size
2777 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2779 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2780 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2786 if (bp->dma_burst_length) in macb_configure_dma()
2787 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2788 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); in macb_configure_dma()
2791 if (bp->native_io) in macb_configure_dma()
2796 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2803 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2807 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2810 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2826 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2830 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2832 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2834 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2838 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2839 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2840 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2841 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2842 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2847 if (bp->rx_watermark) in macb_init_hw()
2848 gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU))); in macb_init_hw()
2907 /* Add multicast addresses to the internal multicast-hash table. */
2919 bitnr = hash_get_index(ha->addr); in macb_sethashtable()
2935 if (dev->flags & IFF_PROMISC) { in macb_set_rx_mode()
2947 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2951 if (dev->flags & IFF_ALLMULTI) { in macb_set_rx_mode()
2953 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2954 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2960 } else if (dev->flags & (~IFF_ALLMULTI)) { in macb_set_rx_mode()
2972 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; in macb_open()
2978 netdev_dbg(bp->dev, "open\n"); in macb_open()
2980 err = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_open()
2994 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
2995 napi_enable(&queue->napi_rx); in macb_open()
2996 napi_enable(&queue->napi_tx); in macb_open()
3001 err = phy_power_on(bp->sgmii_phy); in macb_open()
3011 if (bp->ptp_info) in macb_open()
3012 bp->ptp_info->ptp_init(dev); in macb_open()
3017 phy_power_off(bp->sgmii_phy); in macb_open()
3021 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
3022 napi_disable(&queue->napi_rx); in macb_open()
3023 napi_disable(&queue->napi_tx); in macb_open()
3027 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
3040 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_close()
3041 napi_disable(&queue->napi_rx); in macb_close()
3042 napi_disable(&queue->napi_tx); in macb_close()
3046 phylink_stop(bp->phylink); in macb_close()
3047 phylink_disconnect_phy(bp->phylink); in macb_close()
3049 phy_power_off(bp->sgmii_phy); in macb_close()
3051 spin_lock_irqsave(&bp->lock, flags); in macb_close()
3054 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
3058 if (bp->ptp_info) in macb_close()
3059 bp->ptp_info->ptp_remove(dev); in macb_close()
3061 pm_runtime_put(&bp->pdev->dev); in macb_close()
3069 return -EBUSY; in macb_change_mtu()
3071 WRITE_ONCE(dev->mtu, new_mtu); in macb_change_mtu()
3094 u64 *p = &bp->hw_stats.gem.tx_octets; in gem_update_stats()
3098 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
3100 bp->ethtool_stats[i] += val; in gem_update_stats()
3105 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
3106 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
3112 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
3113 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) in gem_update_stats()
3114 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
3119 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
3121 spin_lock_irq(&bp->stats_lock); in gem_get_stats()
3122 if (netif_running(bp->dev)) in gem_get_stats()
3125 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + in gem_get_stats()
3126 hwstat->rx_alignment_errors + in gem_get_stats()
3127 hwstat->rx_resource_errors + in gem_get_stats()
3128 hwstat->rx_overruns + in gem_get_stats()
3129 hwstat->rx_oversize_frames + in gem_get_stats()
3130 hwstat->rx_jabbers + in gem_get_stats()
3131 hwstat->rx_undersized_frames + in gem_get_stats()
3132 hwstat->rx_length_field_frame_errors); in gem_get_stats()
3133 nstat->tx_errors = (hwstat->tx_late_collisions + in gem_get_stats()
3134 hwstat->tx_excessive_collisions + in gem_get_stats()
3135 hwstat->tx_underrun + in gem_get_stats()
3136 hwstat->tx_carrier_sense_errors); in gem_get_stats()
3137 nstat->multicast = hwstat->rx_multicast_frames; in gem_get_stats()
3138 nstat->collisions = (hwstat->tx_single_collision_frames + in gem_get_stats()
3139 hwstat->tx_multiple_collision_frames + in gem_get_stats()
3140 hwstat->tx_excessive_collisions); in gem_get_stats()
3141 nstat->rx_length_errors = (hwstat->rx_oversize_frames + in gem_get_stats()
3142 hwstat->rx_jabbers + in gem_get_stats()
3143 hwstat->rx_undersized_frames + in gem_get_stats()
3144 hwstat->rx_length_field_frame_errors); in gem_get_stats()
3145 nstat->rx_over_errors = hwstat->rx_resource_errors; in gem_get_stats()
3146 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; in gem_get_stats()
3147 nstat->rx_frame_errors = hwstat->rx_alignment_errors; in gem_get_stats()
3148 nstat->rx_fifo_errors = hwstat->rx_overruns; in gem_get_stats()
3149 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; in gem_get_stats()
3150 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; in gem_get_stats()
3151 nstat->tx_fifo_errors = hwstat->tx_underrun; in gem_get_stats()
3152 spin_unlock_irq(&bp->stats_lock); in gem_get_stats()
3160 spin_lock_irq(&bp->stats_lock); in gem_get_ethtool_stats()
3162 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
3164 spin_unlock_irq(&bp->stats_lock); in gem_get_ethtool_stats()
3173 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
3175 return -EOPNOTSUPP; in gem_get_sset_count()
3193 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
3208 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
3210 netdev_stats_to_stats64(nstat, &bp->dev->stats); in macb_get_stats()
3217 spin_lock_irq(&bp->stats_lock); in macb_get_stats()
3221 nstat->rx_errors = (hwstat->rx_fcs_errors + in macb_get_stats()
3222 hwstat->rx_align_errors + in macb_get_stats()
3223 hwstat->rx_resource_errors + in macb_get_stats()
3224 hwstat->rx_overruns + in macb_get_stats()
3225 hwstat->rx_oversize_pkts + in macb_get_stats()
3226 hwstat->rx_jabbers + in macb_get_stats()
3227 hwstat->rx_undersize_pkts + in macb_get_stats()
3228 hwstat->rx_length_mismatch); in macb_get_stats()
3229 nstat->tx_errors = (hwstat->tx_late_cols + in macb_get_stats()
3230 hwstat->tx_excessive_cols + in macb_get_stats()
3231 hwstat->tx_underruns + in macb_get_stats()
3232 hwstat->tx_carrier_errors + in macb_get_stats()
3233 hwstat->sqe_test_errors); in macb_get_stats()
3234 nstat->collisions = (hwstat->tx_single_cols + in macb_get_stats()
3235 hwstat->tx_multiple_cols + in macb_get_stats()
3236 hwstat->tx_excessive_cols); in macb_get_stats()
3237 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + in macb_get_stats()
3238 hwstat->rx_jabbers + in macb_get_stats()
3239 hwstat->rx_undersize_pkts + in macb_get_stats()
3240 hwstat->rx_length_mismatch); in macb_get_stats()
3241 nstat->rx_over_errors = hwstat->rx_resource_errors + in macb_get_stats()
3242 hwstat->rx_overruns; in macb_get_stats()
3243 nstat->rx_crc_errors = hwstat->rx_fcs_errors; in macb_get_stats()
3244 nstat->rx_frame_errors = hwstat->rx_align_errors; in macb_get_stats()
3245 nstat->rx_fifo_errors = hwstat->rx_overruns; in macb_get_stats()
3247 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; in macb_get_stats()
3248 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; in macb_get_stats()
3249 nstat->tx_fifo_errors = hwstat->tx_underruns; in macb_get_stats()
3251 spin_unlock_irq(&bp->stats_lock); in macb_get_stats()
3258 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_pause_stats()
3260 spin_lock_irq(&bp->stats_lock); in macb_get_pause_stats()
3262 pause_stats->tx_pause_frames = hwstat->tx_pause_frames; in macb_get_pause_stats()
3263 pause_stats->rx_pause_frames = hwstat->rx_pause_frames; in macb_get_pause_stats()
3264 spin_unlock_irq(&bp->stats_lock); in macb_get_pause_stats()
3271 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_pause_stats()
3273 spin_lock_irq(&bp->stats_lock); in gem_get_pause_stats()
3275 pause_stats->tx_pause_frames = hwstat->tx_pause_frames; in gem_get_pause_stats()
3276 pause_stats->rx_pause_frames = hwstat->rx_pause_frames; in gem_get_pause_stats()
3277 spin_unlock_irq(&bp->stats_lock); in gem_get_pause_stats()
3284 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_eth_mac_stats()
3286 spin_lock_irq(&bp->stats_lock); in macb_get_eth_mac_stats()
3288 mac_stats->FramesTransmittedOK = hwstat->tx_ok; in macb_get_eth_mac_stats()
3289 mac_stats->SingleCollisionFrames = hwstat->tx_single_cols; in macb_get_eth_mac_stats()
3290 mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols; in macb_get_eth_mac_stats()
3291 mac_stats->FramesReceivedOK = hwstat->rx_ok; in macb_get_eth_mac_stats()
3292 mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors; in macb_get_eth_mac_stats()
3293 mac_stats->AlignmentErrors = hwstat->rx_align_errors; in macb_get_eth_mac_stats()
3294 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred; in macb_get_eth_mac_stats()
3295 mac_stats->LateCollisions = hwstat->tx_late_cols; in macb_get_eth_mac_stats()
3296 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols; in macb_get_eth_mac_stats()
3297 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns; in macb_get_eth_mac_stats()
3298 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors; in macb_get_eth_mac_stats()
3299 mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns; in macb_get_eth_mac_stats()
3300 mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch; in macb_get_eth_mac_stats()
3301 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts; in macb_get_eth_mac_stats()
3302 spin_unlock_irq(&bp->stats_lock); in macb_get_eth_mac_stats()
3309 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_eth_mac_stats()
3311 spin_lock_irq(&bp->stats_lock); in gem_get_eth_mac_stats()
3313 mac_stats->FramesTransmittedOK = hwstat->tx_frames; in gem_get_eth_mac_stats()
3314 mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames; in gem_get_eth_mac_stats()
3315 mac_stats->MultipleCollisionFrames = in gem_get_eth_mac_stats()
3316 hwstat->tx_multiple_collision_frames; in gem_get_eth_mac_stats()
3317 mac_stats->FramesReceivedOK = hwstat->rx_frames; in gem_get_eth_mac_stats()
3318 mac_stats->FrameCheckSequenceErrors = in gem_get_eth_mac_stats()
3319 hwstat->rx_frame_check_sequence_errors; in gem_get_eth_mac_stats()
3320 mac_stats->AlignmentErrors = hwstat->rx_alignment_errors; in gem_get_eth_mac_stats()
3321 mac_stats->OctetsTransmittedOK = hwstat->tx_octets; in gem_get_eth_mac_stats()
3322 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames; in gem_get_eth_mac_stats()
3323 mac_stats->LateCollisions = hwstat->tx_late_collisions; in gem_get_eth_mac_stats()
3324 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions; in gem_get_eth_mac_stats()
3325 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun; in gem_get_eth_mac_stats()
3326 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors; in gem_get_eth_mac_stats()
3327 mac_stats->OctetsReceivedOK = hwstat->rx_octets; in gem_get_eth_mac_stats()
3328 mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames; in gem_get_eth_mac_stats()
3329 mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames; in gem_get_eth_mac_stats()
3330 mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames; in gem_get_eth_mac_stats()
3331 mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames; in gem_get_eth_mac_stats()
3332 mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors; in gem_get_eth_mac_stats()
3333 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames; in gem_get_eth_mac_stats()
3334 spin_unlock_irq(&bp->stats_lock); in gem_get_eth_mac_stats()
3342 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_eth_phy_stats()
3344 spin_lock_irq(&bp->stats_lock); in macb_get_eth_phy_stats()
3346 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors; in macb_get_eth_phy_stats()
3347 spin_unlock_irq(&bp->stats_lock); in macb_get_eth_phy_stats()
3354 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_eth_phy_stats()
3356 spin_lock_irq(&bp->stats_lock); in gem_get_eth_phy_stats()
3358 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors; in gem_get_eth_phy_stats()
3359 spin_unlock_irq(&bp->stats_lock); in gem_get_eth_phy_stats()
3367 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_rmon_stats()
3369 spin_lock_irq(&bp->stats_lock); in macb_get_rmon_stats()
3371 rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts; in macb_get_rmon_stats()
3372 rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts; in macb_get_rmon_stats()
3373 rmon_stats->jabbers = hwstat->rx_jabbers; in macb_get_rmon_stats()
3374 spin_unlock_irq(&bp->stats_lock); in macb_get_rmon_stats()
3393 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_rmon_stats()
3395 spin_lock_irq(&bp->stats_lock); in gem_get_rmon_stats()
3397 rmon_stats->undersize_pkts = hwstat->rx_undersized_frames; in gem_get_rmon_stats()
3398 rmon_stats->oversize_pkts = hwstat->rx_oversize_frames; in gem_get_rmon_stats()
3399 rmon_stats->jabbers = hwstat->rx_jabbers; in gem_get_rmon_stats()
3400 rmon_stats->hist[0] = hwstat->rx_64_byte_frames; in gem_get_rmon_stats()
3401 rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames; in gem_get_rmon_stats()
3402 rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames; in gem_get_rmon_stats()
3403 rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames; in gem_get_rmon_stats()
3404 rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames; in gem_get_rmon_stats()
3405 rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames; in gem_get_rmon_stats()
3406 rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames; in gem_get_rmon_stats()
3407 rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames; in gem_get_rmon_stats()
3408 rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames; in gem_get_rmon_stats()
3409 rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames; in gem_get_rmon_stats()
3410 rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames; in gem_get_rmon_stats()
3411 rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames; in gem_get_rmon_stats()
3412 rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames; in gem_get_rmon_stats()
3413 rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames; in gem_get_rmon_stats()
3414 spin_unlock_irq(&bp->stats_lock); in gem_get_rmon_stats()
3430 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3433 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3434 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3447 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3448 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3450 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3460 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3461 wol->supported |= (WAKE_MAGIC | WAKE_ARP); in macb_get_wol()
3464 wol->wolopts |= bp->wolopts; in macb_get_wol()
3473 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3475 if (ret && ret != -EOPNOTSUPP) in macb_set_wol()
3478 bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0; in macb_set_wol()
3479 bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0; in macb_set_wol()
3480 bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0; in macb_set_wol()
3482 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); in macb_set_wol()
3492 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3500 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3510 ring->rx_max_pending = MAX_RX_RING_SIZE; in macb_get_ringparam()
3511 ring->tx_max_pending = MAX_TX_RING_SIZE; in macb_get_ringparam()
3513 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3514 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3524 unsigned int reset = 0; in macb_set_ringparam() local
3526 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in macb_set_ringparam()
3527 return -EINVAL; in macb_set_ringparam()
3529 new_rx_size = clamp_t(u32, ring->rx_pending, in macb_set_ringparam()
3533 new_tx_size = clamp_t(u32, ring->tx_pending, in macb_set_ringparam()
3537 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3538 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3543 if (netif_running(bp->dev)) { in macb_set_ringparam()
3544 reset = 1; in macb_set_ringparam()
3545 macb_close(bp->dev); in macb_set_ringparam()
3548 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3549 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3551 if (reset) in macb_set_ringparam()
3552 macb_open(bp->dev); in macb_set_ringparam()
3563 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3567 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3568 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3571 return -ENOTSUPP; in gem_get_tsu_rate()
3585 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3590 info->so_timestamping = in gem_get_ts_info()
3595 info->tx_types = in gem_get_ts_info()
3599 info->rx_filters = in gem_get_ts_info()
3603 if (bp->ptp_clock) in gem_get_ts_info()
3604 info->phc_index = ptp_clock_index(bp->ptp_clock); in gem_get_ts_info()
3625 if (bp->ptp_info) in macb_get_ts_info()
3626 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3633 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3638 if (!(netdev->features & NETIF_F_NTUPLE)) in gem_enable_flow_filters()
3643 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3644 struct ethtool_rx_flow_spec *fs = &item->fs; in gem_enable_flow_filters()
3647 if (fs->location >= num_t2_scr) in gem_enable_flow_filters()
3650 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3656 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_enable_flow_filters()
3658 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) in gem_enable_flow_filters()
3663 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) in gem_enable_flow_filters()
3668 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) in gem_enable_flow_filters()
3673 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3680 uint16_t index = fs->location; in gem_prog_cmp_regs()
3689 tp4sp_v = &(fs->h_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3690 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3693 if (tp4sp_m->ip4src == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3694 /* 1st compare reg - IP source address */ in gem_prog_cmp_regs()
3697 w0 = tp4sp_v->ip4src; in gem_prog_cmp_regs()
3698 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3707 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3708 /* 2nd compare reg - IP destination address */ in gem_prog_cmp_regs()
3711 w0 = tp4sp_v->ip4dst; in gem_prog_cmp_regs()
3712 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3721 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { in gem_prog_cmp_regs()
3722 /* 3rd compare reg - source port, destination port */ in gem_prog_cmp_regs()
3726 if (tp4sp_m->psrc == tp4sp_m->pdst) { in gem_prog_cmp_regs()
3727 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3728 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3729 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3733 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ in gem_prog_cmp_regs()
3735 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ in gem_prog_cmp_regs()
3736 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3739 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3749 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); in gem_prog_cmp_regs()
3764 struct ethtool_rx_flow_spec *fs = &cmd->fs; in gem_add_flow_filter()
3767 int ret = -EINVAL; in gem_add_flow_filter()
3772 return -ENOMEM; in gem_add_flow_filter()
3773 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); in gem_add_flow_filter()
3777 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_add_flow_filter()
3778 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_add_flow_filter()
3779 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_add_flow_filter()
3780 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), in gem_add_flow_filter()
3781 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); in gem_add_flow_filter()
3783 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3786 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3787 if (item->fs.location > newfs->fs.location) { in gem_add_flow_filter()
3788 list_add_tail(&newfs->list, &item->list); in gem_add_flow_filter()
3791 } else if (item->fs.location == fs->location) { in gem_add_flow_filter()
3793 fs->location); in gem_add_flow_filter()
3794 ret = -EBUSY; in gem_add_flow_filter()
3799 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3802 bp->rx_fs_list.count++; in gem_add_flow_filter()
3806 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3810 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3823 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3825 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3826 if (item->fs.location == cmd->fs.location) { in gem_del_flow_filter()
3828 fs = &(item->fs); in gem_del_flow_filter()
3831 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_del_flow_filter()
3832 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_del_flow_filter()
3833 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_del_flow_filter()
3834 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), in gem_del_flow_filter()
3835 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); in gem_del_flow_filter()
3837 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3839 list_del(&item->list); in gem_del_flow_filter()
3840 bp->rx_fs_list.count--; in gem_del_flow_filter()
3841 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3847 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3848 return -EINVAL; in gem_del_flow_filter()
3857 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3858 if (item->fs.location == cmd->fs.location) { in gem_get_flow_entry()
3859 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); in gem_get_flow_entry()
3863 return -EINVAL; in gem_get_flow_entry()
3873 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3874 if (cnt == cmd->rule_cnt) in gem_get_all_flow_entries()
3875 return -EMSGSIZE; in gem_get_all_flow_entries()
3876 rule_locs[cnt] = item->fs.location; in gem_get_all_flow_entries()
3879 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3880 cmd->rule_cnt = cnt; in gem_get_all_flow_entries()
3891 switch (cmd->cmd) { in gem_get_rxnfc()
3893 cmd->data = bp->num_queues; in gem_get_rxnfc()
3896 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3906 "Command parameter %d is not supported\n", cmd->cmd); in gem_get_rxnfc()
3907 ret = -EOPNOTSUPP; in gem_get_rxnfc()
3918 switch (cmd->cmd) { in gem_set_rxnfc()
3920 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3921 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3922 ret = -EINVAL; in gem_set_rxnfc()
3932 "Command parameter %d is not supported\n", cmd->cmd); in gem_set_rxnfc()
3933 ret = -EOPNOTSUPP; in gem_set_rxnfc()
3983 return -EINVAL; in macb_ioctl()
3985 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3994 return -EINVAL; in macb_hwtstamp_get()
3996 if (!bp->ptp_info) in macb_hwtstamp_get()
3997 return -EOPNOTSUPP; in macb_hwtstamp_get()
3999 return bp->ptp_info->get_hwtst(dev, cfg); in macb_hwtstamp_get()
4009 return -EINVAL; in macb_hwtstamp_set()
4011 if (!bp->ptp_info) in macb_hwtstamp_set()
4012 return -EOPNOTSUPP; in macb_hwtstamp_set()
4014 return bp->ptp_info->set_hwtst(dev, cfg, extack); in macb_hwtstamp_set()
4037 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
4044 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) in macb_set_rxcsum_feature()
4065 netdev_features_t changed = features ^ netdev->features; in macb_set_features()
4084 struct net_device *netdev = bp->dev; in macb_restore_features()
4085 netdev_features_t features = netdev->features; in macb_restore_features()
4095 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
4096 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
4104 u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time; in macb_taprio_setup_replace()
4114 if (conf->num_entries > bp->num_queues) { in macb_taprio_setup_replace()
4116 conf->num_entries, bp->num_queues); in macb_taprio_setup_replace()
4117 return -EINVAL; in macb_taprio_setup_replace()
4120 if (conf->base_time < 0) { in macb_taprio_setup_replace()
4122 conf->base_time); in macb_taprio_setup_replace()
4123 return -ERANGE; in macb_taprio_setup_replace()
4127 err = phylink_ethtool_ksettings_get(bp->phylink, &kset); in macb_taprio_setup_replace()
4136 return -EINVAL; in macb_taprio_setup_replace()
4139 enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL); in macb_taprio_setup_replace()
4141 return -ENOMEM; in macb_taprio_setup_replace()
4143 /* Pre-validate all entries before making any hardware changes */ in macb_taprio_setup_replace()
4144 for (i = 0; i < conf->num_entries; i++) { in macb_taprio_setup_replace()
4145 entry = &conf->entries[i]; in macb_taprio_setup_replace()
4147 if (entry->command != TC_TAPRIO_CMD_SET_GATES) { in macb_taprio_setup_replace()
4149 i, entry->command); in macb_taprio_setup_replace()
4150 err = -EOPNOTSUPP; in macb_taprio_setup_replace()
4155 if (!is_power_of_2(entry->gate_mask)) { in macb_taprio_setup_replace()
4157 i, entry->gate_mask); in macb_taprio_setup_replace()
4158 err = -EINVAL; in macb_taprio_setup_replace()
4163 if (entry->gate_mask & ~bp->queue_mask) { in macb_taprio_setup_replace()
4165 i, entry->gate_mask, bp->num_queues); in macb_taprio_setup_replace()
4166 err = -EINVAL; in macb_taprio_setup_replace()
4173 if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) { in macb_taprio_setup_replace()
4176 err = -ERANGE; in macb_taprio_setup_replace()
4181 if (entry->interval > enst_max_hw_interval(speed)) { in macb_taprio_setup_replace()
4183 i, entry->interval, enst_max_hw_interval(speed)); in macb_taprio_setup_replace()
4184 err = -ERANGE; in macb_taprio_setup_replace()
4189 if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) { in macb_taprio_setup_replace()
4191 i, conf->cycle_time - entry->interval, in macb_taprio_setup_replace()
4193 err = -ERANGE; in macb_taprio_setup_replace()
4197 enst_queue[i].queue_id = order_base_2(entry->gate_mask); in macb_taprio_setup_replace()
4202 enst_ns_to_hw_units(entry->interval, speed); in macb_taprio_setup_replace()
4204 enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed); in macb_taprio_setup_replace()
4206 configured_queues |= entry->gate_mask; in macb_taprio_setup_replace()
4207 total_on_time += entry->interval; in macb_taprio_setup_replace()
4208 start_time += entry->interval; in macb_taprio_setup_replace()
4212 if (total_on_time > conf->cycle_time) { in macb_taprio_setup_replace()
4214 total_on_time, conf->cycle_time); in macb_taprio_setup_replace()
4215 err = -EINVAL; in macb_taprio_setup_replace()
4220 conf->num_entries, conf->base_time, conf->cycle_time); in macb_taprio_setup_replace()
4222 /* All validations passed - proceed with hardware configuration */ in macb_taprio_setup_replace()
4223 scoped_guard(spinlock_irqsave, &bp->lock) { in macb_taprio_setup_replace()
4226 bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); in macb_taprio_setup_replace()
4228 for (i = 0; i < conf->num_entries; i++) { in macb_taprio_setup_replace()
4229 queue = &bp->queues[enst_queue[i].queue_id]; in macb_taprio_setup_replace()
4244 conf->num_entries, hweight32(configured_queues)); in macb_taprio_setup_replace()
4259 enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET; in macb_taprio_destroy()
4261 scoped_guard(spinlock_irqsave, &bp->lock) { in macb_taprio_destroy()
4266 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_taprio_destroy()
4281 if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC))) in macb_setup_taprio()
4282 return -EOPNOTSUPP; in macb_setup_taprio()
4285 if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) { in macb_setup_taprio()
4287 return -EOPNOTSUPP; in macb_setup_taprio()
4290 switch (taprio->cmd) { in macb_setup_taprio()
4298 err = -EOPNOTSUPP; in macb_setup_taprio()
4308 return -EINVAL; in macb_setup_tc()
4314 return -EOPNOTSUPP; in macb_setup_tc()
4344 struct device_node *np = bp->pdev->dev.of_node; in macb_configure_caps()
4348 refclk_ext = of_property_read_bool(np, "cdns,refclk-ext"); in macb_configure_caps()
4351 bp->caps = dt_conf->caps; in macb_configure_caps()
4353 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
4354 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
4358 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
4360 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
4363 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
4366 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
4369 dev_err(&bp->pdev->dev, in macb_configure_caps()
4373 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
4374 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
4381 bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN; in macb_configure_caps()
4383 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
4429 pdata = dev_get_platdata(&pdev->dev); in macb_clk_init()
4431 *pclk = pdata->pclk; in macb_clk_init()
4432 *hclk = pdata->hclk; in macb_clk_init()
4434 *pclk = devm_clk_get(&pdev->dev, "pclk"); in macb_clk_init()
4435 *hclk = devm_clk_get(&pdev->dev, "hclk"); in macb_clk_init()
4439 return dev_err_probe(&pdev->dev, in macb_clk_init()
4440 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, in macb_clk_init()
4444 return dev_err_probe(&pdev->dev, in macb_clk_init()
4445 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, in macb_clk_init()
4448 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); in macb_clk_init()
4452 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); in macb_clk_init()
4456 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); in macb_clk_init()
4462 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in macb_clk_init()
4468 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); in macb_clk_init()
4474 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); in macb_clk_init()
4480 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); in macb_clk_init()
4486 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); in macb_clk_init()
4516 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
4517 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
4524 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
4527 queue = &bp->queues[q]; in macb_init()
4528 queue->bp = bp; in macb_init()
4529 spin_lock_init(&queue->tx_ptr_lock); in macb_init()
4530 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll); in macb_init()
4531 netif_napi_add(dev, &queue->napi_tx, macb_tx_poll); in macb_init()
4533 queue->ISR = GEM_ISR(hw_q - 1); in macb_init()
4534 queue->IER = GEM_IER(hw_q - 1); in macb_init()
4535 queue->IDR = GEM_IDR(hw_q - 1); in macb_init()
4536 queue->IMR = GEM_IMR(hw_q - 1); in macb_init()
4537 queue->TBQP = GEM_TBQP(hw_q - 1); in macb_init()
4538 queue->RBQP = GEM_RBQP(hw_q - 1); in macb_init()
4539 queue->RBQS = GEM_RBQS(hw_q - 1); in macb_init()
4542 queue->ISR = MACB_ISR; in macb_init()
4543 queue->IER = MACB_IER; in macb_init()
4544 queue->IDR = MACB_IDR; in macb_init()
4545 queue->IMR = MACB_IMR; in macb_init()
4546 queue->TBQP = MACB_TBQP; in macb_init()
4547 queue->RBQP = MACB_RBQP; in macb_init()
4550 queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q); in macb_init()
4551 queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q); in macb_init()
4552 queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q); in macb_init()
4559 queue->irq = platform_get_irq(pdev, q); in macb_init()
4560 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, in macb_init()
4561 IRQF_SHARED, dev->name, queue); in macb_init()
4563 dev_err(&pdev->dev, in macb_init()
4565 queue->irq, err); in macb_init()
4569 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); in macb_init()
4573 dev->netdev_ops = &macb_netdev_ops; in macb_init()
4577 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
4578 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
4579 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
4580 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
4581 dev->ethtool_ops = &gem_ethtool_ops; in macb_init()
4583 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
4584 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
4585 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
4586 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
4587 dev->ethtool_ops = &macb_ethtool_ops; in macb_init()
4592 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in macb_init()
4595 dev->hw_features = NETIF_F_SG; in macb_init()
4599 dev->hw_features |= MACB_NETIF_LSO; in macb_init()
4602 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
4603 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; in macb_init()
4604 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
4605 dev->hw_features &= ~NETIF_F_SG; in macb_init()
4607 if (bp->caps & MACB_CAPS_QBV) in macb_init()
4608 dev->hw_features |= NETIF_F_HW_TC; in macb_init()
4610 dev->features = dev->hw_features; in macb_init()
4614 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs in macb_init()
4617 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
4619 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
4620 if (bp->max_tuples > 0) { in macb_init()
4628 dev->hw_features |= NETIF_F_NTUPLE; in macb_init()
4630 bp->rx_fs_list.count = 0; in macb_init()
4631 spin_lock_init(&bp->rx_fs_lock); in macb_init()
4633 bp->max_tuples = 0; in macb_init()
4636 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
4638 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
4639 val = bp->usrio->rgmii; in macb_init()
4640 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
4641 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4642 val = bp->usrio->rmii; in macb_init()
4643 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4644 val = bp->usrio->mii; in macb_init()
4646 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
4647 val |= bp->usrio->refclk; in macb_init()
4655 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4679 struct macb_queue *q = &lp->queues[0]; in at91ether_alloc_coherent()
4681 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4684 &q->rx_ring_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4685 if (!q->rx_ring) in at91ether_alloc_coherent()
4686 return -ENOMEM; in at91ether_alloc_coherent()
4688 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4691 &q->rx_buffers_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4692 if (!q->rx_buffers) { in at91ether_alloc_coherent()
4693 dma_free_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4696 q->rx_ring, q->rx_ring_dma); in at91ether_alloc_coherent()
4697 q->rx_ring = NULL; in at91ether_alloc_coherent()
4698 return -ENOMEM; in at91ether_alloc_coherent()
4706 struct macb_queue *q = &lp->queues[0]; in at91ether_free_coherent()
4708 if (q->rx_ring) { in at91ether_free_coherent()
4709 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4712 q->rx_ring, q->rx_ring_dma); in at91ether_free_coherent()
4713 q->rx_ring = NULL; in at91ether_free_coherent()
4716 if (q->rx_buffers) { in at91ether_free_coherent()
4717 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4720 q->rx_buffers, q->rx_buffers_dma); in at91ether_free_coherent()
4721 q->rx_buffers = NULL; in at91ether_free_coherent()
4728 struct macb_queue *q = &lp->queues[0]; in at91ether_start()
4738 addr = q->rx_buffers_dma; in at91ether_start()
4742 desc->ctrl = 0; in at91ether_start()
4747 desc->addr |= MACB_BIT(RX_WRAP); in at91ether_start()
4749 /* Reset buffer index */ in at91ether_start()
4750 q->rx_tail = 0; in at91ether_start()
4753 macb_writel(lp, RBQP, q->rx_ring_dma); in at91ether_start()
4799 ret = pm_runtime_resume_and_get(&lp->pdev->dev); in at91ether_open()
4824 pm_runtime_put_sync(&lp->pdev->dev); in at91ether_open()
4835 phylink_stop(lp->phylink); in at91ether_close()
4836 phylink_disconnect_phy(lp->phylink); in at91ether_close()
4840 return pm_runtime_put(&lp->pdev->dev); in at91ether_close()
4855 lp->rm9200_txq[desc].skb = skb; in at91ether_start_xmit()
4856 lp->rm9200_txq[desc].size = skb->len; in at91ether_start_xmit()
4857 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, in at91ether_start_xmit()
4858 skb->len, DMA_TO_DEVICE); in at91ether_start_xmit()
4859 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { in at91ether_start_xmit()
4861 dev->stats.tx_dropped++; in at91ether_start_xmit()
4867 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); in at91ether_start_xmit()
4869 macb_writel(lp, TCR, skb->len); in at91ether_start_xmit()
4885 struct macb_queue *q = &lp->queues[0]; in at91ether_rx()
4891 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4892 while (desc->addr & MACB_BIT(RX_USED)) { in at91ether_rx()
4893 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; in at91ether_rx()
4894 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); in at91ether_rx()
4900 skb->protocol = eth_type_trans(skb, dev); in at91ether_rx()
4901 dev->stats.rx_packets++; in at91ether_rx()
4902 dev->stats.rx_bytes += pktlen; in at91ether_rx()
4905 dev->stats.rx_dropped++; in at91ether_rx()
4908 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) in at91ether_rx()
4909 dev->stats.multicast++; in at91ether_rx()
4911 /* reset ownership bit */ in at91ether_rx()
4912 desc->addr &= ~MACB_BIT(RX_USED); in at91ether_rx()
4915 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) in at91ether_rx()
4916 q->rx_tail = 0; in at91ether_rx()
4918 q->rx_tail++; in at91ether_rx()
4920 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4945 dev->stats.tx_errors++; in at91ether_interrupt()
4948 if (lp->rm9200_txq[desc].skb) { in at91ether_interrupt()
4949 dev_consume_skb_irq(lp->rm9200_txq[desc].skb); in at91ether_interrupt()
4950 lp->rm9200_txq[desc].skb = NULL; in at91ether_interrupt()
4951 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, in at91ether_interrupt()
4952 lp->rm9200_txq[desc].size, DMA_TO_DEVICE); in at91ether_interrupt()
4953 dev->stats.tx_packets++; in at91ether_interrupt()
4954 dev->stats.tx_bytes += lp->rm9200_txq[desc].size; in at91ether_interrupt()
4959 /* Work-around for EMAC Errata section 41.3.1 */ in at91ether_interrupt()
4979 at91ether_interrupt(dev->irq, dev); in at91ether_poll_controller()
5011 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); in at91ether_clk_init()
5017 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in at91ether_clk_init()
5030 bp->queues[0].bp = bp; in at91ether_init()
5032 dev->netdev_ops = &at91ether_netdev_ops; in at91ether_init()
5033 dev->ethtool_ops = &macb_ethtool_ops; in at91ether_init()
5035 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, in at91ether_init()
5036 0, dev->name, dev); in at91ether_init()
5050 return mgmt->rate; in fu540_macb_tx_recalc_rate()
5056 if (WARN_ON(req->rate < 2500000)) in fu540_macb_tx_determine_rate()
5057 req->rate = 2500000; in fu540_macb_tx_determine_rate()
5058 else if (req->rate == 2500000) in fu540_macb_tx_determine_rate()
5059 req->rate = 2500000; in fu540_macb_tx_determine_rate()
5060 else if (WARN_ON(req->rate < 13750000)) in fu540_macb_tx_determine_rate()
5061 req->rate = 2500000; in fu540_macb_tx_determine_rate()
5062 else if (WARN_ON(req->rate < 25000000)) in fu540_macb_tx_determine_rate()
5063 req->rate = 25000000; in fu540_macb_tx_determine_rate()
5064 else if (req->rate == 25000000) in fu540_macb_tx_determine_rate()
5065 req->rate = 25000000; in fu540_macb_tx_determine_rate()
5066 else if (WARN_ON(req->rate < 75000000)) in fu540_macb_tx_determine_rate()
5067 req->rate = 25000000; in fu540_macb_tx_determine_rate()
5068 else if (WARN_ON(req->rate < 125000000)) in fu540_macb_tx_determine_rate()
5069 req->rate = 125000000; in fu540_macb_tx_determine_rate()
5070 else if (req->rate == 125000000) in fu540_macb_tx_determine_rate()
5071 req->rate = 125000000; in fu540_macb_tx_determine_rate()
5072 else if (WARN_ON(req->rate > 125000000)) in fu540_macb_tx_determine_rate()
5073 req->rate = 125000000; in fu540_macb_tx_determine_rate()
5075 req->rate = 125000000; in fu540_macb_tx_determine_rate()
5092 iowrite32(1, mgmt->reg); in fu540_macb_tx_set_rate()
5094 iowrite32(0, mgmt->reg); in fu540_macb_tx_set_rate()
5095 mgmt->rate = rate; in fu540_macb_tx_set_rate()
5117 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); in fu540_c000_clk_init()
5119 err = -ENOMEM; in fu540_c000_clk_init()
5123 init.name = "sifive-gemgxl-mgmt"; in fu540_c000_clk_init()
5128 mgmt->rate = 0; in fu540_c000_clk_init()
5129 mgmt->hw.init = &init; in fu540_c000_clk_init()
5131 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); in fu540_c000_clk_init()
5139 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); in fu540_c000_clk_init()
5143 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); in fu540_c000_clk_init()
5156 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); in fu540_c000_init()
5157 if (IS_ERR(mgmt->reg)) in fu540_c000_init()
5158 return PTR_ERR(mgmt->reg); in fu540_c000_init()
5169 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in init_reset_optional()
5171 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); in init_reset_optional()
5173 if (IS_ERR(bp->sgmii_phy)) in init_reset_optional()
5174 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy), in init_reset_optional()
5177 ret = phy_init(bp->sgmii_phy); in init_reset_optional()
5179 return dev_err_probe(&pdev->dev, ret, in init_reset_optional()
5186 ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains", in init_reset_optional()
5189 dev_err(&pdev->dev, "Failed to read power management information\n"); in init_reset_optional()
5203 /* Fully reset controller at hardware level if mapped in device tree */ in init_reset_optional()
5204 ret = device_reset_optional(&pdev->dev); in init_reset_optional()
5206 phy_exit(bp->sgmii_phy); in init_reset_optional()
5207 return dev_err_probe(&pdev->dev, ret, "failed to reset controller"); in init_reset_optional()
5214 phy_exit(bp->sgmii_phy); in init_reset_optional()
5385 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
5387 { .compatible = "cdns,np4-macb", .data = &np4_config },
5388 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
5390 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
5391 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
5392 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
5393 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
5394 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
5395 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
5396 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
5398 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
5399 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
5400 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
5401 { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
5402 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
5403 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
5404 { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
5405 { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
5406 { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
5407 { .compatible = "xlnx,versal-gem", .data = &versal_config},
5429 struct clk **) = macb_config->clk_init; in macb_probe()
5430 int (*init)(struct platform_device *) = macb_config->init; in macb_probe()
5431 struct device_node *np = pdev->dev.of_node; in macb_probe()
5452 if (match && match->data) { in macb_probe()
5453 macb_config = match->data; in macb_probe()
5454 clk_init = macb_config->clk_init; in macb_probe()
5455 init = macb_config->init; in macb_probe()
5463 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); in macb_probe()
5464 pm_runtime_use_autosuspend(&pdev->dev); in macb_probe()
5465 pm_runtime_get_noresume(&pdev->dev); in macb_probe()
5466 pm_runtime_set_active(&pdev->dev); in macb_probe()
5467 pm_runtime_enable(&pdev->dev); in macb_probe()
5473 err = -ENOMEM; in macb_probe()
5477 dev->base_addr = regs->start; in macb_probe()
5479 SET_NETDEV_DEV(dev, &pdev->dev); in macb_probe()
5482 bp->pdev = pdev; in macb_probe()
5483 bp->dev = dev; in macb_probe()
5484 bp->regs = mem; in macb_probe()
5485 bp->native_io = native_io; in macb_probe()
5487 bp->macb_reg_readl = hw_readl_native; in macb_probe()
5488 bp->macb_reg_writel = hw_writel_native; in macb_probe()
5490 bp->macb_reg_readl = hw_readl; in macb_probe()
5491 bp->macb_reg_writel = hw_writel; in macb_probe()
5493 bp->num_queues = num_queues; in macb_probe()
5494 bp->queue_mask = queue_mask; in macb_probe()
5496 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
5497 bp->pclk = pclk; in macb_probe()
5498 bp->hclk = hclk; in macb_probe()
5499 bp->tx_clk = tx_clk; in macb_probe()
5500 bp->rx_clk = rx_clk; in macb_probe()
5501 bp->tsu_clk = tsu_clk; in macb_probe()
5503 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
5505 if (!hw_is_gem(bp->regs, bp->native_io)) in macb_probe()
5506 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_probe()
5507 else if (macb_config->max_tx_length) in macb_probe()
5508 bp->max_tx_length = macb_config->max_tx_length; in macb_probe()
5510 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_probe()
5512 bp->wol = 0; in macb_probe()
5513 device_set_wakeup_capable(&pdev->dev, 1); in macb_probe()
5515 bp->usrio = macb_config->usrio; in macb_probe()
5517 /* By default we set to partial store and forward mode for zynqmp. in macb_probe()
5521 err = of_property_read_u32(bp->pdev->dev.of_node, in macb_probe()
5522 "cdns,rx-watermark", in macb_probe()
5523 &bp->rx_watermark); in macb_probe()
5529 wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1; in macb_probe()
5530 if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) { in macb_probe()
5531 dev_info(&bp->pdev->dev, "Invalid watermark value\n"); in macb_probe()
5532 bp->rx_watermark = 0; in macb_probe()
5536 spin_lock_init(&bp->lock); in macb_probe()
5537 spin_lock_init(&bp->stats_lock); in macb_probe()
5544 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); in macb_probe()
5546 dev_err(&pdev->dev, "failed to set DMA mask\n"); in macb_probe()
5549 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
5554 dev->irq = platform_get_irq(pdev, 0); in macb_probe()
5555 if (dev->irq < 0) { in macb_probe()
5556 err = dev->irq; in macb_probe()
5560 /* MTU range: 68 - 1518 or 10240 */ in macb_probe()
5561 dev->min_mtu = GEM_MTU_MIN_SIZE; in macb_probe()
5562 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_probe()
5563 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
5565 dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
5567 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
5570 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
5575 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
5579 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
5580 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
5581 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
5583 err = of_get_ethdev_address(np, bp->dev); in macb_probe()
5584 if (err == -EPROBE_DEFER) in macb_probe()
5592 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
5594 bp->phy_interface = interface; in macb_probe()
5609 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); in macb_probe()
5613 INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task); in macb_probe()
5617 dev->base_addr, dev->irq, dev->dev_addr); in macb_probe()
5619 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
5620 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
5625 mdiobus_unregister(bp->mii_bus); in macb_probe()
5626 mdiobus_free(bp->mii_bus); in macb_probe()
5629 phy_exit(bp->sgmii_phy); in macb_probe()
5636 pm_runtime_disable(&pdev->dev); in macb_probe()
5637 pm_runtime_set_suspended(&pdev->dev); in macb_probe()
5638 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_probe()
5653 phy_exit(bp->sgmii_phy); in macb_remove()
5654 mdiobus_unregister(bp->mii_bus); in macb_remove()
5655 mdiobus_free(bp->mii_bus); in macb_remove()
5657 device_set_wakeup_enable(&bp->pdev->dev, 0); in macb_remove()
5658 cancel_work_sync(&bp->hresp_err_bh_work); in macb_remove()
5659 pm_runtime_disable(&pdev->dev); in macb_remove()
5660 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_remove()
5661 pm_runtime_set_suspended(&pdev->dev); in macb_remove()
5662 phylink_destroy(bp->phylink); in macb_remove()
5679 if (!device_may_wakeup(&bp->dev->dev)) in macb_suspend()
5680 phy_exit(bp->sgmii_phy); in macb_suspend()
5685 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
5687 idev = __in_dev_get_rcu(bp->dev); in macb_suspend()
5689 ifa = rcu_dereference(idev->ifa_list); in macb_suspend()
5690 if ((bp->wolopts & WAKE_ARP) && !ifa) { in macb_suspend()
5692 return -EOPNOTSUPP; in macb_suspend()
5694 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5702 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) in macb_suspend()
5704 upper_32_bits(bp->rx_ring_tieoff_dma)); in macb_suspend()
5706 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5709 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) { in macb_suspend()
5714 lower_32_bits(bp->rx_ring_tieoff_dma)); in macb_suspend()
5717 queue_writel(queue, IDR, -1); in macb_suspend()
5719 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
5720 queue_writel(queue, ISR, -1); in macb_suspend()
5725 macb_writel(bp, TSR, -1); in macb_suspend()
5726 macb_writel(bp, RSR, -1); in macb_suspend()
5728 tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0; in macb_suspend()
5729 if (bp->wolopts & WAKE_ARP) { in macb_suspend()
5732 tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local)); in macb_suspend()
5738 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
5740 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
5741 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5745 bp->queues[0].irq, err); in macb_suspend()
5746 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5749 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
5752 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
5753 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5757 bp->queues[0].irq, err); in macb_suspend()
5758 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5761 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
5764 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5766 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
5770 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5772 napi_disable(&queue->napi_rx); in macb_suspend()
5773 napi_disable(&queue->napi_tx); in macb_suspend()
5776 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
5778 phylink_stop(bp->phylink); in macb_suspend()
5780 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5782 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5785 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
5786 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
5788 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_suspend()
5789 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
5791 if (bp->ptp_info) in macb_suspend()
5792 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
5808 if (!device_may_wakeup(&bp->dev->dev)) in macb_resume()
5809 phy_init(bp->sgmii_phy); in macb_resume()
5817 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
5818 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
5821 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
5824 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
5828 queue_readl(bp->queues, ISR); in macb_resume()
5829 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
5830 queue_writel(bp->queues, ISR, -1); in macb_resume()
5832 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
5833 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
5834 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
5838 bp->queues[0].irq, err); in macb_resume()
5839 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5842 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5844 disable_irq_wake(bp->queues[0].irq); in macb_resume()
5850 phylink_stop(bp->phylink); in macb_resume()
5854 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5856 napi_enable(&queue->napi_rx); in macb_resume()
5857 napi_enable(&queue->napi_tx); in macb_resume()
5860 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_resume()
5861 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5863 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5864 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5872 phylink_start(bp->phylink); in macb_resume()
5876 if (bp->ptp_info) in macb_resume()
5877 bp->ptp_info->ptp_init(netdev); in macb_resume()
5888 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5889 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) in macb_runtime_suspend()
5890 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5901 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5902 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5903 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5904 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5905 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()
5906 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) { in macb_runtime_resume()
5907 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()