Lines Matching +full:mac +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
35 static int mtk_msg_level = -1;
37 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
291 __raw_writel(val, eth->base + reg); in mtk_w32()
296 return __raw_readl(eth->base + reg); in mtk_r32()
322 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
323 return -ETIMEDOUT; in mtk_mdio_busy_wait()
446 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22()
454 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45()
461 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22()
469 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45()
482 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
495 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
497 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
501 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
520 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_select_pcs() local
522 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs()
527 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
528 0 : mac->id; in mtk_mac_select_pcs()
530 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
539 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_prepare() local
541 struct mtk_eth *eth = mac->hw; in mtk_mac_prepare()
544 mac->id != MTK_GMAC1_ID) { in mtk_mac_prepare()
545 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, in mtk_mac_prepare()
546 XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id)); in mtk_mac_prepare()
548 mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) | in mtk_mac_prepare()
549 MTK_XGMAC_FORCE_LINK(mac->id), in mtk_mac_prepare()
550 MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id)); in mtk_mac_prepare()
559 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_config() local
561 struct mtk_eth *eth = mac->hw; in mtk_mac_config()
565 /* MT76x8 has no hardware settings between for the MAC */ in mtk_mac_config()
566 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
567 mac->interface != state->interface) { in mtk_mac_config()
569 switch (state->interface) { in mtk_mac_config()
576 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
577 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
585 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
590 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
591 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
597 if (mac->id == MTK_GMAC2_ID && in mtk_mac_config()
598 MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) { in mtk_mac_config()
599 err = mtk_gmac_2p5gphy_path_setup(eth, mac->id); in mtk_mac_config()
609 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && in mtk_mac_config()
610 !phy_interface_mode_is_8023z(state->interface) && in mtk_mac_config()
611 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { in mtk_mac_config()
612 if (MTK_HAS_CAPS(mac->hw->soc->caps, in mtk_mac_config()
614 if (mt7621_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
615 state->interface)) in mtk_mac_config()
618 mtk_gmac0_rgmii_adjust(mac->hw, in mtk_mac_config()
619 state->interface); in mtk_mac_config()
623 mtk_w32(mac->hw, in mtk_mac_config()
628 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, in mtk_mac_config()
630 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); in mtk_mac_config()
634 switch (state->interface) { in mtk_mac_config()
645 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
646 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); in mtk_mac_config()
647 val |= SYSCFG0_GE_MODE(ge_mode, mac->id); in mtk_mac_config()
648 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
650 mac->interface = state->interface; in mtk_mac_config()
654 if (state->interface == PHY_INTERFACE_MODE_SGMII || in mtk_mac_config()
655 phy_interface_mode_is_8023z(state->interface)) { in mtk_mac_config()
659 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
661 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
666 mac->syscfg0 = val; in mtk_mac_config()
668 dev_err(eth->dev, in mtk_mac_config()
669 "In-band mode not supported in non SGMII mode!\n"); in mtk_mac_config()
674 if (mtk_interface_mode_is_xgmii(eth, state->interface)) { in mtk_mac_config()
675 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id)); in mtk_mac_config()
676 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id)); in mtk_mac_config()
678 if (mac->id == MTK_GMAC1_ID) in mtk_mac_config()
685 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
686 mac->id, phy_modes(state->interface)); in mtk_mac_config()
690 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
691 mac->id, phy_modes(state->interface), err); in mtk_mac_config()
697 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_finish() local
699 struct mtk_eth *eth = mac->hw; in mtk_mac_finish()
705 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
706 SYSCFG0_SGMII_MASK, mac->syscfg0); in mtk_mac_finish()
709 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
716 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_mac_finish()
724 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_link_down() local
727 if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) { in mtk_mac_link_down()
729 mtk_m32(mac->hw, in mtk_mac_link_down()
731 MTK_MAC_MCR(mac->id)); in mtk_mac_link_down()
732 } else if (mac->id != MTK_GMAC1_ID) { in mtk_mac_link_down()
733 /* XGMAC except for built-in switch */ in mtk_mac_link_down()
734 mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE, in mtk_mac_link_down()
735 MTK_XMAC_MCR(mac->id)); in mtk_mac_link_down()
736 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0, in mtk_mac_link_down()
737 MTK_XGMAC_STS(mac->id)); in mtk_mac_link_down()
744 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
747 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_set_queue_speed()
807 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
810 static void mtk_gdm_mac_link_up(struct mtk_mac *mac, in mtk_gdm_mac_link_up() argument
818 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_gdm_mac_link_up()
824 mac->speed = speed; in mtk_gdm_mac_link_up()
839 /* Configure pause modes - phylink will avoid these for half duplex */ in mtk_gdm_mac_link_up()
846 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); in mtk_gdm_mac_link_up()
849 static void mtk_xgdm_mac_link_up(struct mtk_mac *mac, in mtk_xgdm_mac_link_up() argument
857 if (mac->id == MTK_GMAC1_ID) in mtk_xgdm_mac_link_up()
860 /* Eliminate the interference(before link-up) caused by PHY noise */ in mtk_xgdm_mac_link_up()
861 mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id)); in mtk_xgdm_mac_link_up()
863 mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR, in mtk_xgdm_mac_link_up()
864 MTK_XMAC_CNT_CTRL(mac->id)); in mtk_xgdm_mac_link_up()
866 mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), in mtk_xgdm_mac_link_up()
867 MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id)); in mtk_xgdm_mac_link_up()
869 mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id)); in mtk_xgdm_mac_link_up()
872 /* Configure pause modes - in mtk_xgdm_mac_link_up()
880 mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id)); in mtk_xgdm_mac_link_up()
888 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_link_up() local
891 if (mtk_interface_mode_is_xgmii(mac->hw, interface)) in mtk_mac_link_up()
892 mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex, in mtk_mac_link_up()
895 mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex, in mtk_mac_link_up()
901 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_disable_tx_lpi() local
903 struct mtk_eth *eth = mac->hw; in mtk_mac_disable_tx_lpi()
905 mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id)); in mtk_mac_disable_tx_lpi()
911 struct mtk_mac *mac = container_of(config, struct mtk_mac, in mtk_mac_enable_tx_lpi() local
913 struct mtk_eth *eth = mac->hw; in mtk_mac_enable_tx_lpi()
916 if (mtk_interface_mode_is_xgmii(eth, mac->interface)) in mtk_mac_enable_tx_lpi()
917 return -EOPNOTSUPP; in mtk_mac_enable_tx_lpi()
936 /* PHY Wake-up time, this field does not have a reset value, so use the in mtk_mac_enable_tx_lpi()
942 mtk_w32(eth, val, MTK_MAC_EEECR(mac->id)); in mtk_mac_enable_tx_lpi()
943 mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id)); in mtk_mac_enable_tx_lpi()
964 val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider); in mtk_mdio_config()
982 mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
984 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
985 return -ENODEV; in mtk_mdio_init()
988 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
989 if (!eth->mii_bus) { in mtk_mdio_init()
990 ret = -ENOMEM; in mtk_mdio_init()
994 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
995 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
996 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
997 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
998 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
999 eth->mii_bus->priv = eth; in mtk_mdio_init()
1000 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
1002 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
1004 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) { in mtk_mdio_init()
1006 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
1007 ret = -EINVAL; in mtk_mdio_init()
1012 eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); in mtk_mdio_init()
1014 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider); in mtk_mdio_init()
1015 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
1024 if (!eth->mii_bus) in mtk_mdio_cleanup()
1027 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
1035 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
1036 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
1037 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
1038 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
1046 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
1047 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
1048 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
1049 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
1057 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
1058 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
1059 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
1060 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
1068 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
1069 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
1070 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
1071 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
1077 struct mtk_mac *mac = netdev_priv(dev); in mtk_set_mac_address() local
1078 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address()
1079 const char *macaddr = dev->dev_addr; in mtk_set_mac_address()
1084 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_mac_address()
1085 return -EBUSY; in mtk_set_mac_address()
1087 spin_lock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
1088 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
1089 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
1091 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
1095 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], in mtk_set_mac_address()
1096 MTK_GDMA_MAC_ADRH(mac->id)); in mtk_set_mac_address()
1097 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | in mtk_set_mac_address()
1099 MTK_GDMA_MAC_ADRL(mac->id)); in mtk_set_mac_address()
1101 spin_unlock_bh(&mac->hw->page_lock); in mtk_set_mac_address()
1106 void mtk_stats_update_mac(struct mtk_mac *mac) in mtk_stats_update_mac() argument
1108 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_stats_update_mac()
1109 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac()
1111 u64_stats_update_begin(&hw_stats->syncp); in mtk_stats_update_mac()
1113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
1114 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT); in mtk_stats_update_mac()
1115 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT); in mtk_stats_update_mac()
1116 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT); in mtk_stats_update_mac()
1117 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT); in mtk_stats_update_mac()
1118 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
1119 mtk_r32(mac->hw, MT7628_SDM_CS_ERR); in mtk_stats_update_mac()
1121 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
1122 unsigned int offs = hw_stats->reg_offset; in mtk_stats_update_mac()
1125 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); in mtk_stats_update_mac()
1126 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); in mtk_stats_update_mac()
1128 hw_stats->rx_bytes += (stats << 32); in mtk_stats_update_mac()
1129 hw_stats->rx_packets += in mtk_stats_update_mac()
1130 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); in mtk_stats_update_mac()
1131 hw_stats->rx_overflow += in mtk_stats_update_mac()
1132 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); in mtk_stats_update_mac()
1133 hw_stats->rx_fcs_errors += in mtk_stats_update_mac()
1134 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); in mtk_stats_update_mac()
1135 hw_stats->rx_short_errors += in mtk_stats_update_mac()
1136 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); in mtk_stats_update_mac()
1137 hw_stats->rx_long_errors += in mtk_stats_update_mac()
1138 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); in mtk_stats_update_mac()
1139 hw_stats->rx_checksum_errors += in mtk_stats_update_mac()
1140 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); in mtk_stats_update_mac()
1141 hw_stats->rx_flow_control_packets += in mtk_stats_update_mac()
1142 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); in mtk_stats_update_mac()
1145 hw_stats->tx_skip += in mtk_stats_update_mac()
1146 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs); in mtk_stats_update_mac()
1147 hw_stats->tx_collisions += in mtk_stats_update_mac()
1148 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs); in mtk_stats_update_mac()
1149 hw_stats->tx_bytes += in mtk_stats_update_mac()
1150 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs); in mtk_stats_update_mac()
1151 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs); in mtk_stats_update_mac()
1153 hw_stats->tx_bytes += (stats << 32); in mtk_stats_update_mac()
1154 hw_stats->tx_packets += in mtk_stats_update_mac()
1155 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs); in mtk_stats_update_mac()
1157 hw_stats->tx_skip += in mtk_stats_update_mac()
1158 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); in mtk_stats_update_mac()
1159 hw_stats->tx_collisions += in mtk_stats_update_mac()
1160 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); in mtk_stats_update_mac()
1161 hw_stats->tx_bytes += in mtk_stats_update_mac()
1162 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); in mtk_stats_update_mac()
1163 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); in mtk_stats_update_mac()
1165 hw_stats->tx_bytes += (stats << 32); in mtk_stats_update_mac()
1166 hw_stats->tx_packets += in mtk_stats_update_mac()
1167 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); in mtk_stats_update_mac()
1171 u64_stats_update_end(&hw_stats->syncp); in mtk_stats_update_mac()
1179 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1181 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1182 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1183 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1191 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_stats64() local
1192 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_get_stats64()
1196 if (spin_trylock_bh(&hw_stats->stats_lock)) { in mtk_get_stats64()
1197 mtk_stats_update_mac(mac); in mtk_get_stats64()
1198 spin_unlock_bh(&hw_stats->stats_lock); in mtk_get_stats64()
1203 start = u64_stats_fetch_begin(&hw_stats->syncp); in mtk_get_stats64()
1204 storage->rx_packets = hw_stats->rx_packets; in mtk_get_stats64()
1205 storage->tx_packets = hw_stats->tx_packets; in mtk_get_stats64()
1206 storage->rx_bytes = hw_stats->rx_bytes; in mtk_get_stats64()
1207 storage->tx_bytes = hw_stats->tx_bytes; in mtk_get_stats64()
1208 storage->collisions = hw_stats->tx_collisions; in mtk_get_stats64()
1209 storage->rx_length_errors = hw_stats->rx_short_errors + in mtk_get_stats64()
1210 hw_stats->rx_long_errors; in mtk_get_stats64()
1211 storage->rx_over_errors = hw_stats->rx_overflow; in mtk_get_stats64()
1212 storage->rx_crc_errors = hw_stats->rx_fcs_errors; in mtk_get_stats64()
1213 storage->rx_errors = hw_stats->rx_checksum_errors; in mtk_get_stats64()
1214 storage->tx_aborted_errors = hw_stats->tx_skip; in mtk_get_stats64()
1215 } while (u64_stats_fetch_retry(&hw_stats->syncp, start)); in mtk_get_stats64()
1217 storage->tx_errors = dev->stats.tx_errors; in mtk_get_stats64()
1218 storage->rx_dropped = dev->stats.rx_dropped; in mtk_get_stats64()
1219 storage->tx_dropped = dev->stats.tx_dropped; in mtk_get_stats64()
1226 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_max_frag_size()
1234 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - in mtk_max_buf_size()
1245 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); in mtk_rx_get_desc()
1246 if (!(rxd->rxd2 & RX_DMA_DONE)) in mtk_rx_get_desc()
1249 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); in mtk_rx_get_desc()
1250 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); in mtk_rx_get_desc()
1251 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); in mtk_rx_get_desc()
1253 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); in mtk_rx_get_desc()
1254 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); in mtk_rx_get_desc()
1276 if (use_sram && eth->sram_pool) { in mtk_dma_ring_alloc()
1277 dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size); in mtk_dma_ring_alloc()
1280 *dma_handle = gen_pool_virt_to_phys(eth->sram_pool, in mtk_dma_ring_alloc()
1283 dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle, in mtk_dma_ring_alloc()
1293 if (in_sram && eth->sram_pool) in mtk_dma_ring_free()
1294 gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size); in mtk_dma_ring_free()
1296 dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle); in mtk_dma_ring_free()
1302 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1304 int cnt = soc->tx.fq_dma_size; in mtk_init_fq_dma()
1308 eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size, in mtk_init_fq_dma()
1309 ð->phy_scratch_ring, true); in mtk_init_fq_dma()
1311 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1312 return -ENOMEM; in mtk_init_fq_dma()
1314 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); in mtk_init_fq_dma()
1316 for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) { in mtk_init_fq_dma()
1317 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH); in mtk_init_fq_dma()
1318 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1320 if (unlikely(!eth->scratch_head[j])) in mtk_init_fq_dma()
1321 return -ENOMEM; in mtk_init_fq_dma()
1323 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1324 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1327 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1328 return -ENOMEM; in mtk_init_fq_dma()
1333 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; in mtk_init_fq_dma()
1334 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; in mtk_init_fq_dma()
1336 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1337 (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size; in mtk_init_fq_dma()
1339 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
1340 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) in mtk_init_fq_dma()
1341 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
1343 txd->txd4 = 0; in mtk_init_fq_dma()
1345 txd->txd5 = 0; in mtk_init_fq_dma()
1346 txd->txd6 = 0; in mtk_init_fq_dma()
1347 txd->txd7 = 0; in mtk_init_fq_dma()
1348 txd->txd8 = 0; in mtk_init_fq_dma()
1353 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1354 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1355 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1356 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1363 return ring->dma + (desc - ring->phys); in mtk_qdma_phys_to_virt()
1369 int idx = (txd - ring->dma) / txd_size; in mtk_desc_to_tx_buf()
1371 return &ring->buf[idx]; in mtk_desc_to_tx_buf()
1377 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; in qdma_to_pdma()
1382 return (dma - ring->dma) / txd_size; in txd_to_idx()
1388 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1389 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { in mtk_tx_unmap()
1390 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1394 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { in mtk_tx_unmap()
1395 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1402 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1409 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1416 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_tx_unmap()
1417 if (tx_buf->type == MTK_TYPE_SKB) { in mtk_tx_unmap()
1418 struct sk_buff *skb = tx_buf->data; in mtk_tx_unmap()
1425 struct xdp_frame *xdpf = tx_buf->data; in mtk_tx_unmap()
1427 if (napi && tx_buf->type == MTK_TYPE_XDP_TX) in mtk_tx_unmap()
1435 tx_buf->flags = 0; in mtk_tx_unmap()
1436 tx_buf->data = NULL; in mtk_tx_unmap()
1443 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1448 txd->txd3 = mapped_addr; in setup_tx_buf()
1449 txd->txd2 |= TX_DMA_PLEN1(size); in setup_tx_buf()
1453 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in setup_tx_buf()
1454 txd->txd1 = mapped_addr; in setup_tx_buf()
1455 txd->txd2 = TX_DMA_PLEN0(size); in setup_tx_buf()
1465 struct mtk_mac *mac = netdev_priv(dev); in mtk_tx_set_dma_desc_v1() local
1466 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1()
1470 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v1()
1472 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) | in mtk_tx_set_dma_desc_v1()
1473 FIELD_PREP(TX_DMA_PQID, info->qid); in mtk_tx_set_dma_desc_v1()
1474 if (info->last) in mtk_tx_set_dma_desc_v1()
1476 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v1()
1478 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ in mtk_tx_set_dma_desc_v1()
1479 if (info->first) { in mtk_tx_set_dma_desc_v1()
1480 if (info->gso) in mtk_tx_set_dma_desc_v1()
1483 if (info->csum) in mtk_tx_set_dma_desc_v1()
1486 if (info->vlan) in mtk_tx_set_dma_desc_v1()
1487 data |= TX_DMA_INS_VLAN | info->vlan_tci; in mtk_tx_set_dma_desc_v1()
1489 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v1()
1495 struct mtk_mac *mac = netdev_priv(dev); in mtk_tx_set_dma_desc_v2() local
1497 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2()
1500 WRITE_ONCE(desc->txd1, info->addr); in mtk_tx_set_dma_desc_v2()
1502 data = TX_DMA_PLEN0(info->size); in mtk_tx_set_dma_desc_v2()
1503 if (info->last) in mtk_tx_set_dma_desc_v2()
1506 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1507 data |= TX_DMA_PREP_ADDR64(info->addr); in mtk_tx_set_dma_desc_v2()
1509 WRITE_ONCE(desc->txd3, data); in mtk_tx_set_dma_desc_v2()
1512 switch (mac->id) { in mtk_tx_set_dma_desc_v2()
1524 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); in mtk_tx_set_dma_desc_v2()
1525 WRITE_ONCE(desc->txd4, data); in mtk_tx_set_dma_desc_v2()
1528 if (info->first) { in mtk_tx_set_dma_desc_v2()
1529 if (info->gso) in mtk_tx_set_dma_desc_v2()
1532 if (info->csum) in mtk_tx_set_dma_desc_v2()
1537 WRITE_ONCE(desc->txd5, data); in mtk_tx_set_dma_desc_v2()
1540 if (info->first && info->vlan) in mtk_tx_set_dma_desc_v2()
1541 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; in mtk_tx_set_dma_desc_v2()
1542 WRITE_ONCE(desc->txd6, data); in mtk_tx_set_dma_desc_v2()
1544 WRITE_ONCE(desc->txd7, 0); in mtk_tx_set_dma_desc_v2()
1545 WRITE_ONCE(desc->txd8, 0); in mtk_tx_set_dma_desc_v2()
1551 struct mtk_mac *mac = netdev_priv(dev); in mtk_tx_set_dma_desc() local
1552 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc()
1566 .csum = skb->ip_summed == CHECKSUM_PARTIAL, in mtk_tx_map()
1574 struct mtk_mac *mac = netdev_priv(dev); in mtk_tx_map() local
1575 struct mtk_eth *eth = mac->hw; in mtk_tx_map()
1576 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1585 itxd = ring->next_free; in mtk_tx_map()
1587 if (itxd == ring->last_free) in mtk_tx_map()
1588 return -ENOMEM; in mtk_tx_map()
1590 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); in mtk_tx_map()
1593 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1595 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1596 return -ENOMEM; in mtk_tx_map()
1600 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_tx_map()
1601 itx_buf->mac_id = mac->id; in mtk_tx_map()
1609 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_tx_map()
1610 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mtk_tx_map()
1617 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || in mtk_tx_map()
1619 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1621 if (txd == ring->last_free) in mtk_tx_map()
1631 soc->tx.dma_max_len); in mtk_tx_map()
1633 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && in mtk_tx_map()
1634 !(frag_size - txd_info.size); in mtk_tx_map()
1635 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1638 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1644 soc->tx.desc_size); in mtk_tx_map()
1647 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_tx_map()
1648 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; in mtk_tx_map()
1649 tx_buf->mac_id = mac->id; in mtk_tx_map()
1654 frag_size -= txd_info.size; in mtk_tx_map()
1660 itx_buf->type = MTK_TYPE_SKB; in mtk_tx_map()
1661 itx_buf->data = skb; in mtk_tx_map()
1663 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1665 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_tx_map()
1667 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_tx_map()
1670 netdev_tx_sent_queue(txq, skb->len); in mtk_tx_map()
1673 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_tx_map()
1674 atomic_sub(n_desc, &ring->free_count); in mtk_tx_map()
1681 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_map()
1683 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1687 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size), in mtk_tx_map()
1688 ring->dma_size); in mtk_tx_map()
1696 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); in mtk_tx_map()
1701 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_map()
1702 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_tx_map()
1703 itxd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_tx_map()
1705 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); in mtk_tx_map()
1709 return -ENOMEM; in mtk_tx_map()
1718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mtk_cal_txd_req()
1719 frag = &skb_shinfo(skb)->frags[i]; in mtk_cal_txd_req()
1721 eth->soc->tx.dma_max_len); in mtk_cal_txd_req()
1724 nfrags += skb_shinfo(skb)->nr_frags; in mtk_cal_txd_req()
1735 if (!eth->netdev[i]) in mtk_queue_stopped()
1737 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1749 if (!eth->netdev[i]) in mtk_wake_queue()
1751 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1757 struct mtk_mac *mac = netdev_priv(dev); in mtk_start_xmit() local
1758 struct mtk_eth *eth = mac->hw; in mtk_start_xmit()
1759 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1760 struct net_device_stats *stats = &dev->stats; in mtk_start_xmit()
1765 !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) { in mtk_start_xmit()
1775 spin_lock(ð->page_lock); in mtk_start_xmit()
1777 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1781 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { in mtk_start_xmit()
1785 spin_unlock(ð->page_lock); in mtk_start_xmit()
1797 if (skb_shinfo(skb)->gso_type & in mtk_start_xmit()
1800 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); in mtk_start_xmit()
1807 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) in mtk_start_xmit()
1810 spin_unlock(ð->page_lock); in mtk_start_xmit()
1815 spin_unlock(ð->page_lock); in mtk_start_xmit()
1818 stats->tx_dropped++; in mtk_start_xmit()
1828 if (!eth->hwlro) in mtk_get_rx_ring()
1829 return ð->rx_ring[0]; in mtk_get_rx_ring()
1834 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1835 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_get_rx_ring()
1836 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_get_rx_ring()
1837 if (rxd->rxd2 & RX_DMA_DONE) { in mtk_get_rx_ring()
1838 ring->calc_idx_update = true; in mtk_get_rx_ring()
1851 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1852 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1853 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1856 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1857 if (ring->calc_idx_update) { in mtk_update_rx_cpu_idx()
1858 ring->calc_idx_update = false; in mtk_update_rx_cpu_idx()
1859 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1879 .dev = eth->dma_dev, in mtk_create_page_pool()
1886 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1892 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id, in mtk_create_page_pool()
1893 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1926 if (ring->page_pool) in mtk_rx_put_buff()
1927 page_pool_put_full_page(ring->page_pool, in mtk_rx_put_buff()
1938 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1939 struct mtk_mac *mac = netdev_priv(dev); in mtk_xdp_frame_map() local
1943 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1944 txd_info->size, DMA_TO_DEVICE); in mtk_xdp_frame_map()
1945 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1946 return -ENOMEM; in mtk_xdp_frame_map()
1948 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; in mtk_xdp_frame_map()
1952 txd_info->addr = page_pool_get_dma_addr(page) + in mtk_xdp_frame_map()
1954 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1955 txd_info->size, DMA_BIDIRECTIONAL); in mtk_xdp_frame_map()
1959 tx_buf->mac_id = mac->id; in mtk_xdp_frame_map()
1960 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; in mtk_xdp_frame_map()
1961 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; in mtk_xdp_frame_map()
1964 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1974 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1975 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1976 struct mtk_mac *mac = netdev_priv(dev); in mtk_xdp_submit_frame() local
1978 .size = xdpf->len, in mtk_xdp_submit_frame()
1981 .qid = mac->id, in mtk_xdp_submit_frame()
1986 void *data = xdpf->data; in mtk_xdp_submit_frame()
1988 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1989 return -EBUSY; in mtk_xdp_submit_frame()
1991 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in mtk_xdp_submit_frame()
1992 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) in mtk_xdp_submit_frame()
1993 return -EBUSY; in mtk_xdp_submit_frame()
1995 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1997 txd = ring->next_free; in mtk_xdp_submit_frame()
1998 if (txd == ring->last_free) { in mtk_xdp_submit_frame()
1999 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
2000 return -ENOMEM; in mtk_xdp_submit_frame()
2004 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size); in mtk_xdp_submit_frame()
2010 data, xdpf->headroom, index, dma_map); in mtk_xdp_submit_frame()
2017 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { in mtk_xdp_submit_frame()
2018 txd = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
2019 if (txd == ring->last_free) in mtk_xdp_submit_frame()
2023 soc->tx.desc_size); in mtk_xdp_submit_frame()
2029 txd_info.size = skb_frag_size(&sinfo->frags[index]); in mtk_xdp_submit_frame()
2031 txd_info.qid = mac->id; in mtk_xdp_submit_frame()
2032 data = skb_frag_address(&sinfo->frags[index]); in mtk_xdp_submit_frame()
2037 htx_buf->data = xdpf; in mtk_xdp_submit_frame()
2039 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
2043 txd_pdma->txd2 |= TX_DMA_LS0; in mtk_xdp_submit_frame()
2045 txd_pdma->txd2 |= TX_DMA_LS1; in mtk_xdp_submit_frame()
2048 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); in mtk_xdp_submit_frame()
2049 atomic_sub(n_desc, &ring->free_count); in mtk_xdp_submit_frame()
2056 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
2057 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
2061 idx = txd_to_idx(ring, txd, soc->tx.desc_size); in mtk_xdp_submit_frame()
2062 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
2066 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
2072 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size); in mtk_xdp_submit_frame()
2075 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_xdp_submit_frame()
2076 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_xdp_submit_frame()
2079 txd_pdma->txd2 = TX_DMA_DESP2_DEF; in mtk_xdp_submit_frame()
2082 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); in mtk_xdp_submit_frame()
2085 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
2093 struct mtk_mac *mac = netdev_priv(dev); in mtk_xdp_xmit() local
2094 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_xmit()
2095 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit()
2099 return -EINVAL; in mtk_xdp_xmit()
2107 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_xmit()
2108 hw_stats->xdp_stats.tx_xdp_xmit += nxmit; in mtk_xdp_xmit()
2109 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit; in mtk_xdp_xmit()
2110 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_xmit()
2118 struct mtk_mac *mac = netdev_priv(dev); in mtk_xdp_run() local
2119 struct mtk_hw_stats *hw_stats = mac->hw_stats; in mtk_xdp_run()
2120 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop; in mtk_xdp_run()
2126 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
2133 count = &hw_stats->xdp_stats.rx_xdp_pass; in mtk_xdp_run()
2141 count = &hw_stats->xdp_stats.rx_xdp_redirect; in mtk_xdp_run()
2147 count = &hw_stats->xdp_stats.rx_xdp_tx_errors; in mtk_xdp_run()
2152 count = &hw_stats->xdp_stats.rx_xdp_tx; in mtk_xdp_run()
2165 page_pool_put_full_page(ring->page_pool, in mtk_xdp_run()
2166 virt_to_head_page(xdp->data), true); in mtk_xdp_run()
2169 u64_stats_update_begin(&hw_stats->syncp); in mtk_xdp_run()
2171 u64_stats_update_end(&hw_stats->syncp); in mtk_xdp_run()
2197 int mac = 0; in mtk_poll_rx() local
2203 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); in mtk_poll_rx()
2204 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_poll_rx()
2205 data = ring->data[idx]; in mtk_poll_rx()
2210 /* find out which mac the packet come from. values start at 1 */ in mtk_poll_rx()
2217 mac = val - 1; in mtk_poll_rx()
2220 mac = MTK_GMAC3_ID; in mtk_poll_rx()
2225 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2227 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; in mtk_poll_rx()
2230 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS || in mtk_poll_rx()
2231 !eth->netdev[mac])) in mtk_poll_rx()
2234 netdev = eth->netdev[mac]; in mtk_poll_rx()
2235 ppe_idx = eth->mac[mac]->ppe_idx; in mtk_poll_rx()
2237 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
2243 if (ring->page_pool) { in mtk_poll_rx()
2248 new_data = mtk_page_pool_get_buff(ring->page_pool, in mtk_poll_rx()
2252 netdev->stats.rx_dropped++; in mtk_poll_rx()
2256 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2258 pktlen, page_pool_get_dma_dir(ring->page_pool)); in mtk_poll_rx()
2260 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); in mtk_poll_rx()
2274 page_pool_put_full_page(ring->page_pool, in mtk_poll_rx()
2276 netdev->stats.rx_dropped++; in mtk_poll_rx()
2280 skb_reserve(skb, xdp.data - xdp.data_hard_start); in mtk_poll_rx()
2281 skb_put(skb, xdp.data_end - xdp.data); in mtk_poll_rx()
2282 metasize = xdp.data - xdp.data_meta; in mtk_poll_rx()
2287 if (ring->frag_size <= PAGE_SIZE) in mtk_poll_rx()
2288 new_data = napi_alloc_frag(ring->frag_size); in mtk_poll_rx()
2293 netdev->stats.rx_dropped++; in mtk_poll_rx()
2297 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2298 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2299 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
2300 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2303 netdev->stats.rx_dropped++; in mtk_poll_rx()
2307 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2310 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2311 ring->buf_size, DMA_FROM_DEVICE); in mtk_poll_rx()
2313 skb = build_skb(data, ring->frag_size); in mtk_poll_rx()
2315 netdev->stats.rx_dropped++; in mtk_poll_rx()
2324 skb->dev = netdev; in mtk_poll_rx()
2325 bytes += skb->len; in mtk_poll_rx()
2343 if (*rxdcsum & eth->soc->rx.dma_l4_valid) in mtk_poll_rx()
2344 skb->ip_summed = CHECKSUM_UNNECESSARY; in mtk_poll_rx()
2347 skb->protocol = eth_type_trans(skb, netdev); in mtk_poll_rx()
2356 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2357 eth->dsa_meta[port]) in mtk_poll_rx()
2358 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); in mtk_poll_rx()
2362 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); in mtk_poll_rx()
2368 ring->data[idx] = new_data; in mtk_poll_rx()
2369 rxd->rxd1 = (unsigned int)dma_addr; in mtk_poll_rx()
2371 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_poll_rx()
2374 rxd->rxd2); in mtk_poll_rx()
2379 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2380 rxd->rxd2 = RX_DMA_LSO; in mtk_poll_rx()
2382 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64; in mtk_poll_rx()
2384 ring->calc_idx = idx; in mtk_poll_rx()
2397 eth->rx_packets += done; in mtk_poll_rx()
2398 eth->rx_bytes += bytes; in mtk_poll_rx()
2399 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2401 net_dim(ð->rx_dim, &dim_sample); in mtk_poll_rx()
2417 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2422 unsigned int bytes = skb->len; in mtk_poll_tx_done()
2424 state->total++; in mtk_poll_tx_done()
2425 eth->tx_packets++; in mtk_poll_tx_done()
2426 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2428 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2433 if (state->txq == txq) { in mtk_poll_tx_done()
2434 state->done++; in mtk_poll_tx_done()
2435 state->bytes += bytes; in mtk_poll_tx_done()
2439 if (state->txq) in mtk_poll_tx_done()
2440 netdev_tx_completed_queue(state->txq, state->done, state->bytes); in mtk_poll_tx_done()
2442 state->txq = txq; in mtk_poll_tx_done()
2443 state->done = 1; in mtk_poll_tx_done()
2444 state->bytes = bytes; in mtk_poll_tx_done()
2450 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2451 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
2457 cpu = ring->last_free_ptr; in mtk_poll_tx_qdma()
2458 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2464 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma()
2466 desc = mtk_qdma_phys_to_virt(ring, desc->txd2); in mtk_poll_tx_qdma()
2467 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) in mtk_poll_tx_qdma()
2471 eth->soc->tx.desc_size); in mtk_poll_tx_qdma()
2472 if (!tx_buf->data) in mtk_poll_tx_qdma()
2475 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_qdma()
2476 if (tx_buf->type == MTK_TYPE_SKB) in mtk_poll_tx_qdma()
2477 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2478 tx_buf->data); in mtk_poll_tx_qdma()
2480 budget--; in mtk_poll_tx_qdma()
2484 ring->last_free = desc; in mtk_poll_tx_qdma()
2485 atomic_inc(&ring->free_count); in mtk_poll_tx_qdma()
2491 ring->last_free_ptr = cpu; in mtk_poll_tx_qdma()
2492 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2500 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2506 cpu = ring->cpu_idx; in mtk_poll_tx_pdma()
2511 tx_buf = &ring->buf[cpu]; in mtk_poll_tx_pdma()
2512 if (!tx_buf->data) in mtk_poll_tx_pdma()
2515 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { in mtk_poll_tx_pdma()
2516 if (tx_buf->type == MTK_TYPE_SKB) in mtk_poll_tx_pdma()
2517 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2518 budget--; in mtk_poll_tx_pdma()
2522 desc = ring->dma + cpu * eth->soc->tx.desc_size; in mtk_poll_tx_pdma()
2523 ring->last_free = desc; in mtk_poll_tx_pdma()
2524 atomic_inc(&ring->free_count); in mtk_poll_tx_pdma()
2526 cpu = NEXT_DESP_IDX(cpu, ring->dma_size); in mtk_poll_tx_pdma()
2530 ring->cpu_idx = cpu; in mtk_poll_tx_pdma()
2537 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2541 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2549 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2551 net_dim(ð->tx_dim, &dim_sample); in mtk_poll_tx()
2554 (atomic_read(&ring->free_count) > ring->thresh)) in mtk_poll_tx()
2574 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2577 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2579 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2583 dev_info(eth->dev, in mtk_napi_tx()
2585 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2586 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2592 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2604 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2612 mtk_w32(eth, eth->soc->rx.irq_done_mask, in mtk_napi_rx()
2613 reg_map->pdma.irq_status); in mtk_napi_rx()
2614 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2618 dev_info(eth->dev, in mtk_napi_rx()
2620 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2621 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2627 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2628 eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2631 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2638 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2639 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2640 int i, sz = soc->tx.desc_size; in mtk_tx_alloc()
2645 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) in mtk_tx_alloc()
2648 ring_size = soc->tx.dma_size; in mtk_tx_alloc()
2650 ring->buf = kcalloc(ring_size, sizeof(*ring->buf), in mtk_tx_alloc()
2652 if (!ring->buf) in mtk_tx_alloc()
2655 ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true); in mtk_tx_alloc()
2656 if (!ring->dma) in mtk_tx_alloc()
2661 u32 next_ptr = ring->phys + next * sz; in mtk_tx_alloc()
2663 txd = ring->dma + i * sz; in mtk_tx_alloc()
2664 txd->txd2 = next_ptr; in mtk_tx_alloc()
2665 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; in mtk_tx_alloc()
2666 txd->txd4 = 0; in mtk_tx_alloc()
2668 txd->txd5 = 0; in mtk_tx_alloc()
2669 txd->txd6 = 0; in mtk_tx_alloc()
2670 txd->txd7 = 0; in mtk_tx_alloc()
2671 txd->txd8 = 0; in mtk_tx_alloc()
2675 /* On MT7688 (PDMA only) this driver uses the ring->dma structs in mtk_tx_alloc()
2677 * descriptors in ring->dma_pdma. in mtk_tx_alloc()
2679 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2680 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2681 &ring->phys_pdma, GFP_KERNEL); in mtk_tx_alloc()
2682 if (!ring->dma_pdma) in mtk_tx_alloc()
2686 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; in mtk_tx_alloc()
2687 ring->dma_pdma[i].txd4 = 0; in mtk_tx_alloc()
2691 ring->dma_size = ring_size; in mtk_tx_alloc()
2692 atomic_set(&ring->free_count, ring_size - 2); in mtk_tx_alloc()
2693 ring->next_free = ring->dma; in mtk_tx_alloc()
2694 ring->last_free = (void *)txd; in mtk_tx_alloc()
2695 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz)); in mtk_tx_alloc()
2696 ring->thresh = MAX_SKB_FRAGS; in mtk_tx_alloc()
2703 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
2704 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2705 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2707 ring->phys + ((ring_size - 1) * sz), in mtk_tx_alloc()
2708 soc->reg_map->qdma.crx_ptr); in mtk_tx_alloc()
2709 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2713 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2722 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2726 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2728 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2730 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2733 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2739 return -ENOMEM; in mtk_tx_alloc()
2744 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2745 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2748 if (ring->buf) { in mtk_tx_clean()
2749 for (i = 0; i < ring->dma_size; i++) in mtk_tx_clean()
2750 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2751 kfree(ring->buf); in mtk_tx_clean()
2752 ring->buf = NULL; in mtk_tx_clean()
2755 if (ring->dma) { in mtk_tx_clean()
2756 mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size, in mtk_tx_clean()
2757 ring->dma, ring->phys, true); in mtk_tx_clean()
2758 ring->dma = NULL; in mtk_tx_clean()
2761 if (ring->dma_pdma) { in mtk_tx_clean()
2762 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2763 ring->dma_size * soc->tx.desc_size, in mtk_tx_clean()
2764 ring->dma_pdma, ring->phys_pdma); in mtk_tx_clean()
2765 ring->dma_pdma = NULL; in mtk_tx_clean()
2771 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2772 const struct mtk_soc_data *soc = eth->soc; in mtk_rx_alloc()
2779 return -EINVAL; in mtk_rx_alloc()
2780 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2782 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2790 rx_dma_size = soc->rx.dma_size; in mtk_rx_alloc()
2793 ring->frag_size = mtk_max_frag_size(rx_data_len); in mtk_rx_alloc()
2794 ring->buf_size = mtk_max_buf_size(ring->frag_size); in mtk_rx_alloc()
2795 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), in mtk_rx_alloc()
2797 if (!ring->data) in mtk_rx_alloc()
2798 return -ENOMEM; in mtk_rx_alloc()
2803 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2808 ring->page_pool = pp; in mtk_rx_alloc()
2811 ring->dma = mtk_dma_ring_alloc(eth, in mtk_rx_alloc()
2812 rx_dma_size * eth->soc->rx.desc_size, in mtk_rx_alloc()
2813 &ring->phys, in mtk_rx_alloc()
2815 if (!ring->dma) in mtk_rx_alloc()
2816 return -ENOMEM; in mtk_rx_alloc()
2823 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_alloc()
2824 if (ring->page_pool) { in mtk_rx_alloc()
2825 data = mtk_page_pool_get_buff(ring->page_pool, in mtk_rx_alloc()
2828 return -ENOMEM; in mtk_rx_alloc()
2830 if (ring->frag_size <= PAGE_SIZE) in mtk_rx_alloc()
2831 data = netdev_alloc_frag(ring->frag_size); in mtk_rx_alloc()
2836 return -ENOMEM; in mtk_rx_alloc()
2838 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2839 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2840 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_alloc()
2841 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2844 return -ENOMEM; in mtk_rx_alloc()
2847 rxd->rxd1 = (unsigned int)dma_addr; in mtk_rx_alloc()
2848 ring->data[i] = data; in mtk_rx_alloc()
2850 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2851 rxd->rxd2 = RX_DMA_LSO; in mtk_rx_alloc()
2853 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); in mtk_rx_alloc()
2855 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2856 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); in mtk_rx_alloc()
2858 rxd->rxd3 = 0; in mtk_rx_alloc()
2859 rxd->rxd4 = 0; in mtk_rx_alloc()
2861 rxd->rxd5 = 0; in mtk_rx_alloc()
2862 rxd->rxd6 = 0; in mtk_rx_alloc()
2863 rxd->rxd7 = 0; in mtk_rx_alloc()
2864 rxd->rxd8 = 0; in mtk_rx_alloc()
2868 ring->dma_size = rx_dma_size; in mtk_rx_alloc()
2869 ring->calc_idx_update = false; in mtk_rx_alloc()
2870 ring->calc_idx = rx_dma_size - 1; in mtk_rx_alloc()
2872 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + in mtk_rx_alloc()
2875 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + in mtk_rx_alloc()
2883 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2884 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2886 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2888 reg_map->qdma.rst_idx); in mtk_rx_alloc()
2890 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2891 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2893 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); in mtk_rx_alloc()
2895 reg_map->pdma.rst_idx); in mtk_rx_alloc()
2897 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2907 if (ring->data && ring->dma) { in mtk_rx_clean()
2908 for (i = 0; i < ring->dma_size; i++) { in mtk_rx_clean()
2911 if (!ring->data[i]) in mtk_rx_clean()
2914 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_clean()
2915 if (!rxd->rxd1) in mtk_rx_clean()
2918 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2919 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2); in mtk_rx_clean()
2921 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2922 ring->buf_size, DMA_FROM_DEVICE); in mtk_rx_clean()
2923 mtk_rx_put_buff(ring, ring->data[i], false); in mtk_rx_clean()
2925 kfree(ring->data); in mtk_rx_clean()
2926 ring->data = NULL; in mtk_rx_clean()
2929 if (ring->dma) { in mtk_rx_clean()
2930 mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size, in mtk_rx_clean()
2931 ring->dma, ring->phys, in_sram); in mtk_rx_clean()
2932 ring->dma = NULL; in mtk_rx_clean()
2935 if (ring->page_pool) { in mtk_rx_clean()
2936 if (xdp_rxq_info_is_reg(&ring->xdp_q)) in mtk_rx_clean()
2937 xdp_rxq_info_unreg(&ring->xdp_q); in mtk_rx_clean()
2938 page_pool_destroy(ring->page_pool); in mtk_rx_clean()
2939 ring->page_pool = NULL; in mtk_rx_clean()
2949 /* set LRO rings to auto-learn modes */ in mtk_hwlro_rx_init()
2981 /* auto-learn score delta setting */ in mtk_hwlro_rx_init()
3056 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) in mtk_hwlro_get_ip_cnt() argument
3062 if (mac->hwlro_ip[i]) in mtk_hwlro_get_ip_cnt()
3073 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_add_ipaddr()
3074 struct mtk_mac *mac = netdev_priv(dev); in mtk_hwlro_add_ipaddr() local
3075 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr()
3078 if ((fsp->flow_type != TCP_V4_FLOW) || in mtk_hwlro_add_ipaddr()
3079 (!fsp->h_u.tcp_ip4_spec.ip4dst) || in mtk_hwlro_add_ipaddr()
3080 (fsp->location > 1)) in mtk_hwlro_add_ipaddr()
3081 return -EINVAL; in mtk_hwlro_add_ipaddr()
3083 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); in mtk_hwlro_add_ipaddr()
3084 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_add_ipaddr()
3086 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_add_ipaddr()
3088 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
3097 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_del_ipaddr()
3098 struct mtk_mac *mac = netdev_priv(dev); in mtk_hwlro_del_ipaddr() local
3099 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr()
3102 if (fsp->location > 1) in mtk_hwlro_del_ipaddr()
3103 return -EINVAL; in mtk_hwlro_del_ipaddr()
3105 mac->hwlro_ip[fsp->location] = 0; in mtk_hwlro_del_ipaddr()
3106 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; in mtk_hwlro_del_ipaddr()
3108 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_hwlro_del_ipaddr()
3117 struct mtk_mac *mac = netdev_priv(dev); in mtk_hwlro_netdev_disable() local
3118 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable()
3122 mac->hwlro_ip[i] = 0; in mtk_hwlro_netdev_disable()
3123 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; in mtk_hwlro_netdev_disable()
3128 mac->hwlro_ip_cnt = 0; in mtk_hwlro_netdev_disable()
3134 struct mtk_mac *mac = netdev_priv(dev); in mtk_hwlro_get_fdir_entry() local
3136 (struct ethtool_rx_flow_spec *)&cmd->fs; in mtk_hwlro_get_fdir_entry()
3138 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip)) in mtk_hwlro_get_fdir_entry()
3139 return -EINVAL; in mtk_hwlro_get_fdir_entry()
3142 fsp->flow_type = TCP_V4_FLOW; in mtk_hwlro_get_fdir_entry()
3143 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); in mtk_hwlro_get_fdir_entry()
3144 fsp->m_u.tcp_ip4_spec.ip4dst = 0; in mtk_hwlro_get_fdir_entry()
3146 fsp->h_u.tcp_ip4_spec.ip4src = 0; in mtk_hwlro_get_fdir_entry()
3147 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; in mtk_hwlro_get_fdir_entry()
3148 fsp->h_u.tcp_ip4_spec.psrc = 0; in mtk_hwlro_get_fdir_entry()
3149 fsp->m_u.tcp_ip4_spec.psrc = 0xffff; in mtk_hwlro_get_fdir_entry()
3150 fsp->h_u.tcp_ip4_spec.pdst = 0; in mtk_hwlro_get_fdir_entry()
3151 fsp->m_u.tcp_ip4_spec.pdst = 0xffff; in mtk_hwlro_get_fdir_entry()
3152 fsp->h_u.tcp_ip4_spec.tos = 0; in mtk_hwlro_get_fdir_entry()
3153 fsp->m_u.tcp_ip4_spec.tos = 0xff; in mtk_hwlro_get_fdir_entry()
3162 struct mtk_mac *mac = netdev_priv(dev); in mtk_hwlro_get_fdir_all() local
3167 if (cnt == cmd->rule_cnt) in mtk_hwlro_get_fdir_all()
3168 return -EMSGSIZE; in mtk_hwlro_get_fdir_all()
3170 if (mac->hwlro_ip[i]) { in mtk_hwlro_get_fdir_all()
3176 cmd->rule_cnt = cnt; in mtk_hwlro_get_fdir_all()
3185 struct mtk_mac *mac = netdev_priv(dev); in mtk_fix_features() local
3186 int ip_cnt = mtk_hwlro_get_ip_cnt(mac); in mtk_fix_features()
3200 netdev_features_t diff = dev->features ^ features; in mtk_set_features()
3215 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3216 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3218 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3220 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3224 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3235 return -EBUSY; in mtk_dma_init()
3237 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3250 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3260 if (eth->hwlro) { in mtk_dma_init()
3271 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3276 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3277 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3285 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3288 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_free()
3292 if (!eth->netdev[i]) in mtk_dma_free()
3296 netdev_tx_reset_subqueue(eth->netdev[i], j); in mtk_dma_free()
3299 if (eth->scratch_ring) { in mtk_dma_free()
3300 mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size, in mtk_dma_free()
3301 eth->scratch_ring, eth->phy_scratch_ring, in mtk_dma_free()
3303 eth->scratch_ring = NULL; in mtk_dma_free()
3304 eth->phy_scratch_ring = 0; in mtk_dma_free()
3308 mtk_rx_clean(eth, ð->rx_ring[0], true); in mtk_dma_free()
3309 mtk_rx_clean(eth, ð->rx_ring_qdma, false); in mtk_dma_free()
3311 if (eth->hwlro) { in mtk_dma_free()
3314 mtk_rx_clean(eth, ð->rx_ring[i], false); in mtk_dma_free()
3317 for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) { in mtk_dma_free()
3318 kfree(eth->scratch_head[i]); in mtk_dma_free()
3319 eth->scratch_head[i] = NULL; in mtk_dma_free()
3334 struct mtk_mac *mac = netdev_priv(dev); in mtk_tx_timeout() local
3335 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout()
3337 if (test_bit(MTK_RESETTING, ð->state)) in mtk_tx_timeout()
3343 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3346 schedule_work(ð->pending_work); in mtk_tx_timeout()
3354 eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1"); in mtk_get_irqs()
3355 eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2"); in mtk_get_irqs()
3356 if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0) in mtk_get_irqs()
3359 /* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */ in mtk_get_irqs()
3360 if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO) in mtk_get_irqs()
3361 return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX], in mtk_get_irqs()
3364 if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO) in mtk_get_irqs()
3365 return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX], in mtk_get_irqs()
3368 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) in mtk_get_irqs()
3369 dev_warn(&pdev->dev, "legacy DT: missing interrupt-names."); in mtk_get_irqs()
3373 * from devicetree and used for both RX and TX - it is shared. in mtk_get_irqs()
3374 * On SoCs with non-shared IRQs the first entry is not used, in mtk_get_irqs()
3378 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_get_irqs()
3380 eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i); in mtk_get_irqs()
3382 eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED]; in mtk_get_irqs()
3384 eth->irq[i] = platform_get_irq(pdev, i + 1); in mtk_get_irqs()
3387 if (eth->irq[i] < 0) { in mtk_get_irqs()
3388 dev_err(&pdev->dev, "no IRQ%d resource found\n", i); in mtk_get_irqs()
3389 return -ENXIO; in mtk_get_irqs()
3400 eth->rx_events++; in mtk_handle_irq_rx()
3401 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
3402 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_handle_irq_rx()
3403 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
3413 eth->tx_events++; in mtk_handle_irq_tx()
3414 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
3416 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
3425 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3427 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3428 eth->soc->rx.irq_done_mask) { in mtk_handle_irq()
3429 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3430 eth->soc->rx.irq_done_mask) in mtk_handle_irq()
3433 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3434 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3444 struct mtk_mac *mac = netdev_priv(dev); in mtk_poll_controller() local
3445 struct mtk_eth *eth = mac->hw; in mtk_poll_controller()
3448 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3449 mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev); in mtk_poll_controller()
3451 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3458 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3467 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3468 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3479 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3484 reg_map->pdma.glo_cfg); in mtk_start_dma()
3488 reg_map->pdma.glo_cfg); in mtk_start_dma()
3498 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3511 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) in mtk_gdm_config()
3522 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK; in mtk_uses_dsa()
3530 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier); in mtk_device_event() local
3531 struct mtk_eth *eth = mac->hw; in mtk_device_event()
3542 if (netdev_priv(ldev) == mac) in mtk_device_event()
3555 if (s.base.speed == 0 || s.base.speed == ((__u32)-1)) in mtk_device_event()
3559 if (dp->index >= MTK_QDMA_NUM_QUEUES) in mtk_device_event()
3562 if (mac->speed > 0 && mac->speed <= s.base.speed) in mtk_device_event()
3565 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3572 struct mtk_mac *mac = netdev_priv(dev); in mtk_open() local
3573 struct mtk_eth *eth = mac->hw; in mtk_open()
3577 ppe_num = eth->soc->ppe_num; in mtk_open()
3579 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); in mtk_open()
3587 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
3588 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3594 phylink_disconnect_phy(mac->phylink); in mtk_open()
3598 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3599 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3602 if (!eth->netdev[i]) in mtk_open()
3605 target_mac = netdev_priv(eth->netdev[i]); in mtk_open()
3606 if (!soc->offload_version) { in mtk_open()
3607 target_mac->ppe_idx = 0; in mtk_open()
3609 } else if (ppe_num >= 3 && target_mac->id == 2) { in mtk_open()
3610 target_mac->ppe_idx = 2; in mtk_open()
3611 gdm_config = soc->reg_map->gdma_to_ppe[2]; in mtk_open()
3612 } else if (ppe_num >= 2 && target_mac->id == 1) { in mtk_open()
3613 target_mac->ppe_idx = 1; in mtk_open()
3614 gdm_config = soc->reg_map->gdma_to_ppe[1]; in mtk_open()
3616 target_mac->ppe_idx = 0; in mtk_open()
3617 gdm_config = soc->reg_map->gdma_to_ppe[0]; in mtk_open()
3619 mtk_gdm_config(eth, target_mac->id, gdm_config); in mtk_open()
3622 napi_enable(ð->tx_napi); in mtk_open()
3623 napi_enable(ð->rx_napi); in mtk_open()
3625 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); in mtk_open()
3626 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3628 refcount_inc(ð->dma_refcnt); in mtk_open()
3631 phylink_start(mac->phylink); in mtk_open()
3637 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3638 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3639 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3647 return -ENOMEM; in mtk_open()
3649 md_dst->u.port_info.port_id = i; in mtk_open()
3650 eth->dsa_meta[i] = md_dst; in mtk_open()
3654 * disabled if at least one MAC does not use DSA. in mtk_open()
3673 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3677 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3692 struct mtk_mac *mac = netdev_priv(dev); in mtk_stop() local
3693 struct mtk_eth *eth = mac->hw; in mtk_stop()
3696 phylink_stop(mac->phylink); in mtk_stop()
3700 phylink_disconnect_phy(mac->phylink); in mtk_stop()
3703 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3710 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_stop()
3711 napi_disable(ð->tx_napi); in mtk_stop()
3712 napi_disable(ð->rx_napi); in mtk_stop()
3714 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3715 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3717 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3718 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3719 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3723 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3724 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3732 struct mtk_mac *mac = netdev_priv(dev); in mtk_xdp_setup() local
3733 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup()
3737 if (eth->hwlro) { in mtk_xdp_setup()
3739 return -EOPNOTSUPP; in mtk_xdp_setup()
3742 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) { in mtk_xdp_setup()
3744 return -EOPNOTSUPP; in mtk_xdp_setup()
3747 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3751 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3763 switch (xdp->command) { in mtk_xdp()
3765 return mtk_xdp_setup(dev, xdp->prog, xdp->extack); in mtk_xdp()
3767 return -EINVAL; in mtk_xdp()
3773 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3778 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3788 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) in mtk_clk_disable()
3789 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3797 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3805 while (--clk >= 0) in mtk_clk_enable()
3806 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3815 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3819 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3820 dim->profile_ix); in mtk_dim_rx()
3821 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3823 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3833 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3834 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3835 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3837 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3839 dim->state = DIM_START_MEASURE; in mtk_dim_rx()
3846 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3850 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3851 dim->profile_ix); in mtk_dim_tx()
3852 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3854 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3864 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3865 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3866 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3868 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3870 dim->state = DIM_START_MEASURE; in mtk_dim_tx()
3873 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) in mtk_set_mcr_max_rx() argument
3875 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx()
3878 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3881 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); in mtk_set_mcr_max_rx()
3894 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); in mtk_set_mcr_max_rx()
3902 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3907 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3910 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3917 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3926 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3929 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3937 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3945 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3949 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3956 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3958 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3964 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3970 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3975 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3979 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3984 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3990 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3998 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
4002 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
4004 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
4007 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
4010 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
4011 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
4012 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
4014 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
4015 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
4016 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
4023 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
4024 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
4030 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
4031 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
4036 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
4037 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
4044 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
4046 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
4047 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
4050 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
4051 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
4057 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
4058 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
4059 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
4061 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
4072 if (test_bit(MTK_RESETTING, ð->state)) in mtk_hw_reset_monitor_work()
4077 schedule_work(ð->pending_work); in mtk_hw_reset_monitor_work()
4080 schedule_delayed_work(ð->reset.monitor_work, in mtk_hw_reset_monitor_work()
4088 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
4091 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
4095 pm_runtime_enable(eth->dev); in mtk_hw_init()
4096 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
4103 if (eth->ethsys) in mtk_hw_init()
4104 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
4105 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
4107 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
4108 ret = device_reset(eth->dev); in mtk_hw_init()
4110 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
4115 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
4116 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
4133 if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_init()
4142 if (eth->pctl) { in mtk_hw_init()
4144 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
4147 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
4150 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
4158 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
4165 dev->mtu + MTK_RX_ETH_HLEN); in mtk_hw_init()
4181 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
4182 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
4189 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
4190 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
4191 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
4192 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
4227 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
4271 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4272 pm_runtime_disable(eth->dev); in mtk_hw_init()
4280 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
4285 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4286 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4293 struct mtk_mac *mac = netdev_priv(dev); in mtk_uninit() local
4294 struct mtk_eth *eth = mac->hw; in mtk_uninit()
4296 phylink_disconnect_phy(mac->phylink); in mtk_uninit()
4304 struct mtk_mac *mac = netdev_priv(dev); in mtk_change_mtu() local
4305 struct mtk_eth *eth = mac->hw; in mtk_change_mtu()
4307 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4310 return -EINVAL; in mtk_change_mtu()
4313 mtk_set_mcr_max_rx(mac, length); in mtk_change_mtu()
4314 WRITE_ONCE(dev->mtu, new_mtu); in mtk_change_mtu()
4321 struct mtk_mac *mac = netdev_priv(dev); in mtk_do_ioctl() local
4327 return phylink_mii_ioctl(mac->phylink, ifr, cmd); in mtk_do_ioctl()
4332 return -EOPNOTSUPP; in mtk_do_ioctl()
4345 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4347 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4353 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4354 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4374 set_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4385 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4388 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4394 if (eth->dev->pins) in mtk_pending_work()
4395 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4396 eth->dev->pins->default_state); in mtk_pending_work()
4401 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4404 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4405 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4407 dev_close(eth->netdev[i]); in mtk_pending_work()
4416 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4418 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4424 clear_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4436 if (!eth->netdev[i]) in mtk_free_dev()
4438 free_netdev(eth->netdev[i]); in mtk_free_dev()
4441 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4442 if (!eth->dsa_meta[i]) in mtk_free_dev()
4444 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4455 struct mtk_mac *mac; in mtk_unreg_dev() local
4456 if (!eth->netdev[i]) in mtk_unreg_dev()
4458 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4459 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4460 unregister_netdevice_notifier(&mac->device_notifier); in mtk_unreg_dev()
4461 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4472 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4480 cancel_work_sync(ð->pending_work); in mtk_cleanup()
4481 cancel_delayed_work_sync(ð->reset.monitor_work); in mtk_cleanup()
4489 struct mtk_mac *mac = netdev_priv(ndev); in mtk_get_link_ksettings() local
4491 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_link_ksettings()
4492 return -EBUSY; in mtk_get_link_ksettings()
4494 return phylink_ethtool_ksettings_get(mac->phylink, cmd); in mtk_get_link_ksettings()
4500 struct mtk_mac *mac = netdev_priv(ndev); in mtk_set_link_ksettings() local
4502 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_set_link_ksettings()
4503 return -EBUSY; in mtk_set_link_ksettings()
4505 return phylink_ethtool_ksettings_set(mac->phylink, cmd); in mtk_set_link_ksettings()
4511 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_drvinfo() local
4513 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); in mtk_get_drvinfo()
4514 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); in mtk_get_drvinfo()
4515 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); in mtk_get_drvinfo()
4520 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_msglevel() local
4522 return mac->hw->msg_enable; in mtk_get_msglevel()
4527 struct mtk_mac *mac = netdev_priv(dev); in mtk_set_msglevel() local
4529 mac->hw->msg_enable = value; in mtk_set_msglevel()
4534 struct mtk_mac *mac = netdev_priv(dev); in mtk_nway_reset() local
4536 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_nway_reset()
4537 return -EBUSY; in mtk_nway_reset()
4539 if (!mac->phylink) in mtk_nway_reset()
4540 return -ENOTSUPP; in mtk_nway_reset()
4542 return phylink_ethtool_nway_reset(mac->phylink); in mtk_nway_reset()
4551 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_strings() local
4555 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_strings()
4569 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_sset_count() local
4571 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_sset_count()
4576 return -EOPNOTSUPP; in mtk_get_sset_count()
4585 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4586 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
4588 if (!ring->page_pool) in mtk_ethtool_pp_stats()
4591 page_pool_get_stats(ring->page_pool, &stats); in mtk_ethtool_pp_stats()
4599 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_ethtool_stats() local
4600 struct mtk_hw_stats *hwstats = mac->hw_stats; in mtk_get_ethtool_stats()
4605 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) in mtk_get_ethtool_stats()
4609 if (spin_trylock_bh(&hwstats->stats_lock)) { in mtk_get_ethtool_stats()
4610 mtk_stats_update_mac(mac); in mtk_get_ethtool_stats()
4611 spin_unlock_bh(&hwstats->stats_lock); in mtk_get_ethtool_stats()
4619 start = u64_stats_fetch_begin(&hwstats->syncp); in mtk_get_ethtool_stats()
4623 if (mtk_page_pool_enabled(mac->hw)) in mtk_get_ethtool_stats()
4624 mtk_ethtool_pp_stats(mac->hw, data_dst); in mtk_get_ethtool_stats()
4625 } while (u64_stats_fetch_retry(&hwstats->syncp, start)); in mtk_get_ethtool_stats()
4631 int ret = -EOPNOTSUPP; in mtk_get_rxnfc()
4633 switch (cmd->cmd) { in mtk_get_rxnfc()
4635 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
4636 cmd->data = MTK_MAX_RX_RING_NUM; in mtk_get_rxnfc()
4641 if (dev->hw_features & NETIF_F_LRO) { in mtk_get_rxnfc()
4642 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_rxnfc() local
4644 cmd->rule_cnt = mac->hwlro_ip_cnt; in mtk_get_rxnfc()
4649 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
4653 if (dev->hw_features & NETIF_F_LRO) in mtk_get_rxnfc()
4666 int ret = -EOPNOTSUPP; in mtk_set_rxnfc()
4668 switch (cmd->cmd) { in mtk_set_rxnfc()
4670 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
4674 if (dev->hw_features & NETIF_F_LRO) in mtk_set_rxnfc()
4686 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_pauseparam() local
4688 phylink_ethtool_get_pauseparam(mac->phylink, pause); in mtk_get_pauseparam()
4693 struct mtk_mac *mac = netdev_priv(dev); in mtk_set_pauseparam() local
4695 return phylink_ethtool_set_pauseparam(mac->phylink, pause); in mtk_set_pauseparam()
4700 struct mtk_mac *mac = netdev_priv(dev); in mtk_get_eee() local
4702 return phylink_ethtool_get_eee(mac->phylink, eee); in mtk_get_eee()
4707 struct mtk_mac *mac = netdev_priv(dev); in mtk_set_eee() local
4709 return phylink_ethtool_set_eee(mac->phylink, eee); in mtk_set_eee()
4715 struct mtk_mac *mac = netdev_priv(dev); in mtk_select_queue() local
4721 queue = mac->id; in mtk_select_queue()
4723 if (queue >= dev->num_tx_queues) in mtk_select_queue()
4775 struct mtk_mac *mac; in mtk_add_mac() local
4781 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4782 return -EINVAL; in mtk_add_mac()
4787 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4788 return -EINVAL; in mtk_add_mac()
4791 if (eth->netdev[id]) { in mtk_add_mac()
4792 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4793 return -EINVAL; in mtk_add_mac()
4796 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4799 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4800 if (!eth->netdev[id]) { in mtk_add_mac()
4801 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4802 return -ENOMEM; in mtk_add_mac()
4804 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4805 eth->mac[id] = mac; in mtk_add_mac()
4806 mac->id = id; in mtk_add_mac()
4807 mac->hw = eth; in mtk_add_mac()
4808 mac->of_node = np; in mtk_add_mac()
4810 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4811 if (err == -EPROBE_DEFER) in mtk_add_mac()
4815 /* If the mac address is invalid, use random mac address */ in mtk_add_mac()
4816 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4817 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4818 eth->netdev[id]->dev_addr); in mtk_add_mac()
4821 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); in mtk_add_mac()
4822 mac->hwlro_ip_cnt = 0; in mtk_add_mac()
4824 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4825 sizeof(*mac->hw_stats), in mtk_add_mac()
4827 if (!mac->hw_stats) { in mtk_add_mac()
4828 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4829 err = -ENOMEM; in mtk_add_mac()
4832 spin_lock_init(&mac->hw_stats->stats_lock); in mtk_add_mac()
4833 u64_stats_init(&mac->hw_stats->syncp); in mtk_add_mac()
4836 mac->hw_stats->reg_offset = id * 0x80; in mtk_add_mac()
4838 mac->hw_stats->reg_offset = id * 0x40; in mtk_add_mac()
4843 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4847 /* mac config is not set */ in mtk_add_mac()
4848 mac->interface = PHY_INTERFACE_MODE_NA; in mtk_add_mac()
4849 mac->speed = SPEED_UNKNOWN; in mtk_add_mac()
4851 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
4852 mac->phylink_config.type = PHYLINK_NETDEV; in mtk_add_mac()
4853 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | in mtk_add_mac()
4855 mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD | in mtk_add_mac()
4857 mac->phylink_config.lpi_timer_default = 1000; in mtk_add_mac()
4859 /* MT7623 gmac0 is now missing its speed-specific PLL configuration in mtk_add_mac()
4860 * in its .mac_config method (since state->speed is not valid there. in mtk_add_mac()
4863 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) { in mtk_add_mac()
4865 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4867 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4869 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) in mtk_add_mac()
4870 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); in mtk_add_mac()
4873 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) in mtk_add_mac()
4875 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4878 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && in mtk_add_mac()
4879 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) { in mtk_add_mac()
4880 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4883 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4886 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { in mtk_add_mac()
4888 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4890 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4892 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4895 if (mtk_is_netsys_v3_or_greater(mac->hw) && in mtk_add_mac()
4896 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) && in mtk_add_mac()
4898 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | in mtk_add_mac()
4901 phy_interface_zero(mac->phylink_config.supported_interfaces); in mtk_add_mac()
4903 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4906 phylink = phylink_create(&mac->phylink_config, in mtk_add_mac()
4907 of_fwnode_handle(mac->of_node), in mtk_add_mac()
4914 mac->phylink = phylink; in mtk_add_mac()
4916 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) && in mtk_add_mac()
4919 mac->phylink_config.supported_interfaces); in mtk_add_mac()
4921 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4922 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4923 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4924 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4926 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4927 if (eth->hwlro) in mtk_add_mac()
4928 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4930 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4932 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4933 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4935 eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED]; in mtk_add_mac()
4936 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4938 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4939 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4941 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4943 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4944 mac->device_notifier.notifier_call = mtk_device_event; in mtk_add_mac()
4945 register_netdevice_notifier(&mac->device_notifier); in mtk_add_mac()
4949 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4957 free_netdev(eth->netdev[id]); in mtk_add_mac()
4970 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4972 if (!dev || !(dev->flags & IFF_UP)) in mtk_eth_set_dma_device()
4975 list_add_tail(&dev->close_list, &dev_list); in mtk_eth_set_dma_device()
4980 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4983 list_del_init(&dev->close_list); in mtk_eth_set_dma_device()
4998 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
5012 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
5013 eth->soc->ana_rgc3, in mtk_sgmii_init()
5022 dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n"); in mtk_setup_legacy_sram()
5024 if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 > in mtk_setup_legacy_sram()
5025 res->end) in mtk_setup_legacy_sram()
5026 return -EINVAL; in mtk_setup_legacy_sram()
5028 eth->sram_pool = devm_gen_pool_create(eth->dev, in mtk_setup_legacy_sram()
5030 NUMA_NO_NODE, dev_name(eth->dev)); in mtk_setup_legacy_sram()
5032 if (IS_ERR(eth->sram_pool)) in mtk_setup_legacy_sram()
5033 return PTR_ERR(eth->sram_pool); in mtk_setup_legacy_sram()
5035 return gen_pool_add_virt(eth->sram_pool, in mtk_setup_legacy_sram()
5036 (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET, in mtk_setup_legacy_sram()
5037 res->start + MTK_ETH_SRAM_OFFSET, in mtk_setup_legacy_sram()
5048 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
5050 return -ENOMEM; in mtk_probe()
5052 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
5054 eth->dev = &pdev->dev; in mtk_probe()
5055 eth->dma_dev = &pdev->dev; in mtk_probe()
5056 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
5057 if (IS_ERR(eth->base)) in mtk_probe()
5058 return PTR_ERR(eth->base); in mtk_probe()
5060 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
5061 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
5063 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
5064 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); in mtk_probe()
5066 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); in mtk_probe()
5069 dev_err(&pdev->dev, "Wrong DMA config\n"); in mtk_probe()
5070 return -EINVAL; in mtk_probe()
5074 spin_lock_init(ð->page_lock); in mtk_probe()
5075 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
5076 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
5077 spin_lock_init(ð->dim_lock); in mtk_probe()
5079 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
5080 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
5081 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
5083 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
5084 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
5086 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
5087 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
5089 if (IS_ERR(eth->ethsys)) { in mtk_probe()
5090 dev_err(&pdev->dev, "no ethsys regmap found\n"); in mtk_probe()
5091 return PTR_ERR(eth->ethsys); in mtk_probe()
5095 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
5096 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
5098 if (IS_ERR(eth->infra)) { in mtk_probe()
5099 dev_err(&pdev->dev, "no infracfg regmap found\n"); in mtk_probe()
5100 return PTR_ERR(eth->infra); in mtk_probe()
5104 if (of_dma_is_coherent(pdev->dev.of_node)) { in mtk_probe()
5107 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
5108 "cci-control-port"); in mtk_probe()
5114 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
5121 if (eth->soc->required_pctl) { in mtk_probe()
5122 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
5124 if (IS_ERR(eth->pctl)) { in mtk_probe()
5125 dev_err(&pdev->dev, "no pctl regmap found\n"); in mtk_probe()
5126 err = PTR_ERR(eth->pctl); in mtk_probe()
5134 err = -EINVAL; in mtk_probe()
5138 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
5139 eth->sram_pool = of_gen_pool_get(pdev->dev.of_node, in mtk_probe()
5141 if (!eth->sram_pool) { in mtk_probe()
5147 dev_err(&pdev->dev, in mtk_probe()
5149 err = -EINVAL; in mtk_probe()
5156 if (eth->soc->offload_version) { in mtk_probe()
5162 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
5165 np = of_parse_phandle(pdev->dev.of_node, in mtk_probe()
5170 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
5171 wdma_phy = res ? res->start + wdma_base : 0; in mtk_probe()
5172 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
5181 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
5182 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
5184 if (IS_ERR(eth->clks[i])) { in mtk_probe()
5185 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
5186 err = -EPROBE_DEFER; in mtk_probe()
5189 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
5190 dev_err(&pdev->dev, "clock %s not found\n", in mtk_probe()
5192 err = -EINVAL; in mtk_probe()
5195 eth->clks[i] = NULL; in mtk_probe()
5199 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
5200 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
5206 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
5208 for_each_child_of_node(pdev->dev.of_node, mac_np) { in mtk_probe()
5210 "mediatek,eth-mac")) in mtk_probe()
5223 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
5224 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED], in mtk_probe()
5226 dev_name(eth->dev), eth); in mtk_probe()
5228 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX], in mtk_probe()
5230 dev_name(eth->dev), eth); in mtk_probe()
5234 err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX], in mtk_probe()
5236 dev_name(eth->dev), eth); in mtk_probe()
5242 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
5248 if (eth->soc->offload_version) { in mtk_probe()
5249 u8 ppe_num = eth->soc->ppe_num; in mtk_probe()
5251 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); in mtk_probe()
5253 u32 ppe_addr = eth->soc->reg_map->ppe_base; in mtk_probe()
5256 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
5258 if (!eth->ppe[i]) { in mtk_probe()
5259 err = -ENOMEM; in mtk_probe()
5270 if (!eth->netdev[i]) in mtk_probe()
5273 err = register_netdev(eth->netdev[i]); in mtk_probe()
5275 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
5278 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
5280 eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]); in mtk_probe()
5286 eth->dummy_dev = alloc_netdev_dummy(0); in mtk_probe()
5287 if (!eth->dummy_dev) { in mtk_probe()
5288 err = -ENOMEM; in mtk_probe()
5289 dev_err(eth->dev, "failed to allocated dummy device\n"); in mtk_probe()
5292 netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
5293 netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
5296 schedule_delayed_work(ð->reset.monitor_work, in mtk_probe()
5321 struct mtk_mac *mac; in mtk_remove() local
5326 if (!eth->netdev[i]) in mtk_remove()
5328 mtk_stop(eth->netdev[i]); in mtk_remove()
5329 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5330 phylink_disconnect_phy(mac->phylink); in mtk_remove()
5336 netif_napi_del(ð->tx_napi); in mtk_remove()
5337 netif_napi_del(ð->rx_napi); in mtk_remove()
5339 free_netdev(eth->dummy_dev); in mtk_remove()
5594 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5595 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5596 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5597 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5598 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5599 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5600 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5601 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5602 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },