Lines Matching +full:num +full:- +full:tx +full:- +full:queues
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2014-2025 Broadcom
23 #include <linux/dma-mapping.h>
49 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
51 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
56 /* Tx/Rx DMA register offset, skip 256 descriptors */
57 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
60 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
63 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
72 * peripheral registers for CPU-native byte order. in bcmgenet_writel()
102 * the platform is explicitly configured for 64-bits/LPAE. in dmadesc_set_addr()
148 return bcmgenet_readl(priv->base + in bcmgenet_tbuf_ctrl_get()
149 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_get()
157 bcmgenet_writel(val, priv->base + in bcmgenet_tbuf_ctrl_set()
158 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_set()
166 return bcmgenet_readl(priv->base + in bcmgenet_bp_mc_get()
167 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_get()
175 bcmgenet_writel(val, priv->base + in bcmgenet_bp_mc_set()
176 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_set()
179 /* RX/TX DMA register accessors */
318 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_readl()
325 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_writel()
332 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_readl()
339 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_writel()
370 /* GENET v4 supports 40-bits pointer addressing
410 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_readl()
419 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_writel()
428 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_readl()
437 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_writel()
520 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4); in bcmgenet_hfb_set_filter_length()
535 size--; in bcmgenet_hfb_validate_mask()
538 return -EINVAL; in bcmgenet_hfb_validate_mask()
554 index = f_index * priv->hw_params->hfb_filter_size + offset / 2; in bcmgenet_hfb_insert_data()
557 while (size--) { in bcmgenet_hfb_insert_data()
601 struct ethtool_rx_flow_spec *fs = &rule->fs; in bcmgenet_hfb_create_rxnfc_filter()
608 f = fs->location + 1; in bcmgenet_hfb_create_rxnfc_filter()
609 if (fs->flow_type & FLOW_MAC_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
611 &fs->h_ext.h_dest, &fs->m_ext.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
612 sizeof(fs->h_ext.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
615 if (fs->flow_type & FLOW_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
616 if (fs->m_ext.vlan_etype || in bcmgenet_hfb_create_rxnfc_filter()
617 fs->m_ext.vlan_tci) { in bcmgenet_hfb_create_rxnfc_filter()
619 &fs->h_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
620 &fs->m_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
621 sizeof(fs->h_ext.vlan_etype)); in bcmgenet_hfb_create_rxnfc_filter()
623 &fs->h_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
624 &fs->m_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
625 sizeof(fs->h_ext.vlan_tci)); in bcmgenet_hfb_create_rxnfc_filter()
631 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_hfb_create_rxnfc_filter()
635 &fs->h_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
636 &fs->m_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
637 sizeof(fs->h_u.ether_spec.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
639 &fs->h_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
640 &fs->m_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
641 sizeof(fs->h_u.ether_spec.h_source)); in bcmgenet_hfb_create_rxnfc_filter()
643 &fs->h_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
644 &fs->m_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
645 sizeof(fs->h_u.ether_spec.h_proto)); in bcmgenet_hfb_create_rxnfc_filter()
655 &fs->h_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
656 &fs->m_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
657 sizeof(fs->h_u.usr_ip4_spec.tos)); in bcmgenet_hfb_create_rxnfc_filter()
659 &fs->h_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
660 &fs->m_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
661 sizeof(fs->h_u.usr_ip4_spec.proto)); in bcmgenet_hfb_create_rxnfc_filter()
663 &fs->h_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
664 &fs->m_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
665 sizeof(fs->h_u.usr_ip4_spec.ip4src)); in bcmgenet_hfb_create_rxnfc_filter()
667 &fs->h_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
668 &fs->m_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
669 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); in bcmgenet_hfb_create_rxnfc_filter()
670 if (!fs->m_u.usr_ip4_spec.l4_4_bytes) in bcmgenet_hfb_create_rxnfc_filter()
679 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); in bcmgenet_hfb_create_rxnfc_filter()
682 &fs->h_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
683 &fs->m_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
690 if (fs->ring_cookie == RX_CLS_FLOW_WAKE) in bcmgenet_hfb_create_rxnfc_filter()
692 else if (fs->ring_cookie == RX_CLS_FLOW_DISC) in bcmgenet_hfb_create_rxnfc_filter()
693 q = priv->hw_params->rx_queues + 1; in bcmgenet_hfb_create_rxnfc_filter()
696 q = fs->ring_cookie; in bcmgenet_hfb_create_rxnfc_filter()
699 rule->state = BCMGENET_RXNFC_STATE_ENABLED; in bcmgenet_hfb_create_rxnfc_filter()
711 base = f_index * priv->hw_params->hfb_filter_size; in bcmgenet_hfb_clear_filter()
712 for (i = 0; i < priv->hw_params->hfb_filter_size; i++) in bcmgenet_hfb_clear_filter()
731 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) in bcmgenet_hfb_clear()
743 INIT_LIST_HEAD(&priv->rxnfc_list); in bcmgenet_hfb_init()
745 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); in bcmgenet_hfb_init()
746 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_hfb_init()
757 return clk_prepare_enable(priv->clk); in bcmgenet_begin()
765 clk_disable_unprepare(priv->clk); in bcmgenet_complete()
772 return -EINVAL; in bcmgenet_get_link_ksettings()
774 if (!dev->phydev) in bcmgenet_get_link_ksettings()
775 return -ENODEV; in bcmgenet_get_link_ksettings()
777 phy_ethtool_ksettings_get(dev->phydev, cmd); in bcmgenet_get_link_ksettings()
786 return -EINVAL; in bcmgenet_set_link_ksettings()
788 if (!dev->phydev) in bcmgenet_set_link_ksettings()
789 return -ENODEV; in bcmgenet_set_link_ksettings()
791 return phy_ethtool_ksettings_set(dev->phydev, cmd); in bcmgenet_set_link_ksettings()
801 ret = clk_prepare_enable(priv->clk); in bcmgenet_set_features()
807 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); in bcmgenet_set_features()
809 clk_disable_unprepare(priv->clk); in bcmgenet_set_features()
818 return priv->msg_enable; in bcmgenet_get_msglevel()
825 priv->msg_enable = level; in bcmgenet_set_msglevel()
837 ec->tx_max_coalesced_frames = in bcmgenet_get_coalesce()
839 ec->rx_max_coalesced_frames = in bcmgenet_get_coalesce()
841 ec->rx_coalesce_usecs = in bcmgenet_get_coalesce()
844 for (i = 0; i <= priv->hw_params->rx_queues; i++) { in bcmgenet_get_coalesce()
845 ring = &priv->rx_rings[i]; in bcmgenet_get_coalesce()
846 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
855 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_set_rx_coalesce()
856 unsigned int i = ring->index; in bcmgenet_set_rx_coalesce()
873 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
874 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
875 usecs = ring->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
876 pkts = ring->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
878 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { in bcmgenet_set_ring_rx_coalesce()
879 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); in bcmgenet_set_ring_rx_coalesce()
884 ring->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcmgenet_set_ring_rx_coalesce()
900 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
901 ec->tx_max_coalesced_frames == 0 || in bcmgenet_set_coalesce()
902 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
903 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) in bcmgenet_set_coalesce()
904 return -EINVAL; in bcmgenet_set_coalesce()
906 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) in bcmgenet_set_coalesce()
907 return -EINVAL; in bcmgenet_set_coalesce()
914 /* Program all TX queues with the same values, as there is no in bcmgenet_set_coalesce()
915 * ethtool knob to do coalescing on a per-queue basis in bcmgenet_set_coalesce()
917 for (i = 0; i <= priv->hw_params->tx_queues; i++) in bcmgenet_set_coalesce()
919 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
922 for (i = 0; i <= priv->hw_params->rx_queues; i++) in bcmgenet_set_coalesce()
923 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); in bcmgenet_set_coalesce()
936 epause->autoneg = priv->autoneg_pause; in bcmgenet_get_pauseparam()
941 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
942 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
945 epause->tx_pause = priv->tx_pause; in bcmgenet_get_pauseparam()
946 epause->rx_pause = priv->rx_pause; in bcmgenet_get_pauseparam()
955 if (!dev->phydev) in bcmgenet_set_pauseparam()
956 return -ENODEV; in bcmgenet_set_pauseparam()
958 if (!phy_validate_pause(dev->phydev, epause)) in bcmgenet_set_pauseparam()
959 return -EINVAL; in bcmgenet_set_pauseparam()
961 priv->autoneg_pause = !!epause->autoneg; in bcmgenet_set_pauseparam()
962 priv->tx_pause = !!epause->tx_pause; in bcmgenet_set_pauseparam()
963 priv->rx_pause = !!epause->rx_pause; in bcmgenet_set_pauseparam()
965 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_set_pauseparam()
972 BCMGENET_STAT_RTNL = -1,
994 .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
1001 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1008 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
1021 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1027 #define STAT_GENET_Q(num) \ argument
1028 STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
1029 tx_rings[num].stats64, packets), \
1030 STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
1031 tx_rings[num].stats64, bytes), \
1032 STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
1033 tx_rings[num].stats64, errors), \
1034 STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
1035 tx_rings[num].stats64, dropped), \
1036 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
1037 rx_rings[num].stats64, bytes), \
1038 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
1039 rx_rings[num].stats64, packets), \
1040 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
1041 rx_rings[num].stats64, errors), \
1042 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
1043 rx_rings[num].stats64, dropped), \
1044 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
1045 rx_rings[num].stats64, multicast), \
1046 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
1047 rx_rings[num].stats64, missed), \
1048 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
1049 rx_rings[num].stats64, length_errors), \
1050 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
1051 rx_rings[num].stats64, over_errors), \
1052 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
1053 rx_rings[num].stats64, crc_errors), \
1054 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
1055 rx_rings[num].stats64, frame_errors), \
1056 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
1057 rx_rings[num].stats64, fragmented_errors), \
1058 STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
1059 rx_rings[num].stats64, broadcast)
1061 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
1062 * between the end of TX stats and the beginning of the RX RUNT
1116 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1117 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1118 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1119 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1120 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1121 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1122 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1123 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1124 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1125 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1126 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1127 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1128 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1129 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1130 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1131 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1132 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1133 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1134 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1135 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1136 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1137 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1138 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1139 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1140 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1141 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1142 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1143 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1144 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1162 /* Per TX queues */
1174 u64_stats_update_begin(&stats->syncp); \
1175 u64_stats_add(&stats->m, v); \
1176 u64_stats_update_end(&stats->syncp); \
1181 u64_stats_update_begin(&stats->syncp); \
1182 u64_stats_inc(&stats->m); \
1183 u64_stats_update_end(&stats->syncp); \
1189 strscpy(info->driver, "bcmgenet", sizeof(info->driver)); in bcmgenet_get_drvinfo()
1198 return -EOPNOTSUPP; in bcmgenet_get_sset_count()
1268 switch (s->type) { in bcmgenet_update_mib_counters()
1286 val = bcmgenet_umac_readl(priv, s->reg_offset); in bcmgenet_update_mib_counters()
1290 s->reg_offset); in bcmgenet_update_mib_counters()
1293 s->reg_offset); in bcmgenet_update_mib_counters()
1298 j += s->stat_sizeof; in bcmgenet_update_mib_counters()
1299 p = (char *)priv + s->stat_offset; in bcmgenet_update_mib_counters()
1326 if (s->type == BCMGENET_STAT_SOFT64) { in bcmgenet_get_ethtool_stats()
1327 syncp = (struct u64_stats_sync *)(p + s->syncp_offset); in bcmgenet_get_ethtool_stats()
1330 data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset)); in bcmgenet_get_ethtool_stats()
1333 if (s->type == BCMGENET_STAT_RTNL) in bcmgenet_get_ethtool_stats()
1336 p += s->stat_offset; in bcmgenet_get_ethtool_stats()
1338 s->stat_sizeof == sizeof(unsigned long)) in bcmgenet_get_ethtool_stats()
1350 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; in bcmgenet_eee_enable_set()
1353 if (enable && !priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1354 clk_prepare_enable(priv->clk_eee); in bcmgenet_eee_enable_set()
1355 priv->clk_eee_enabled = true; in bcmgenet_eee_enable_set()
1366 reg = bcmgenet_readl(priv->base + off); in bcmgenet_eee_enable_set()
1371 bcmgenet_writel(reg, priv->base + off); in bcmgenet_eee_enable_set()
1381 if (!enable && priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1382 clk_disable_unprepare(priv->clk_eee); in bcmgenet_eee_enable_set()
1383 priv->clk_eee_enabled = false; in bcmgenet_eee_enable_set()
1386 priv->eee.eee_enabled = enable; in bcmgenet_eee_enable_set()
1387 priv->eee.tx_lpi_enabled = tx_lpi_enabled; in bcmgenet_eee_enable_set()
1393 struct ethtool_keee *p = &priv->eee; in bcmgenet_get_eee()
1396 return -EOPNOTSUPP; in bcmgenet_get_eee()
1398 if (!dev->phydev) in bcmgenet_get_eee()
1399 return -ENODEV; in bcmgenet_get_eee()
1401 e->tx_lpi_enabled = p->tx_lpi_enabled; in bcmgenet_get_eee()
1402 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); in bcmgenet_get_eee()
1404 return phy_ethtool_get_eee(dev->phydev, e); in bcmgenet_get_eee()
1410 struct ethtool_keee *p = &priv->eee; in bcmgenet_set_eee()
1414 return -EOPNOTSUPP; in bcmgenet_set_eee()
1416 if (!dev->phydev) in bcmgenet_set_eee()
1417 return -ENODEV; in bcmgenet_set_eee()
1419 p->eee_enabled = e->eee_enabled; in bcmgenet_set_eee()
1421 if (!p->eee_enabled) { in bcmgenet_set_eee()
1424 active = phy_init_eee(dev->phydev, false) >= 0; in bcmgenet_set_eee()
1425 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); in bcmgenet_set_eee()
1426 bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); in bcmgenet_set_eee()
1429 return phy_ethtool_set_eee(dev->phydev, e); in bcmgenet_set_eee()
1438 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && in bcmgenet_validate_flow()
1439 cmd->fs.location != RX_CLS_LOC_ANY) { in bcmgenet_validate_flow()
1441 cmd->fs.location); in bcmgenet_validate_flow()
1442 return -EINVAL; in bcmgenet_validate_flow()
1445 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1447 l4_mask = &cmd->fs.m_u.usr_ip4_spec; in bcmgenet_validate_flow()
1449 if (VALIDATE_MASK(l4_mask->ip4src) || in bcmgenet_validate_flow()
1450 VALIDATE_MASK(l4_mask->ip4dst) || in bcmgenet_validate_flow()
1451 VALIDATE_MASK(l4_mask->l4_4_bytes) || in bcmgenet_validate_flow()
1452 VALIDATE_MASK(l4_mask->proto) || in bcmgenet_validate_flow()
1453 VALIDATE_MASK(l4_mask->ip_ver) || in bcmgenet_validate_flow()
1454 VALIDATE_MASK(l4_mask->tos)) { in bcmgenet_validate_flow()
1456 return -EINVAL; in bcmgenet_validate_flow()
1460 eth_mask = &cmd->fs.m_u.ether_spec; in bcmgenet_validate_flow()
1462 if (VALIDATE_MASK(eth_mask->h_dest) || in bcmgenet_validate_flow()
1463 VALIDATE_MASK(eth_mask->h_source) || in bcmgenet_validate_flow()
1464 VALIDATE_MASK(eth_mask->h_proto)) { in bcmgenet_validate_flow()
1466 return -EINVAL; in bcmgenet_validate_flow()
1471 cmd->fs.flow_type); in bcmgenet_validate_flow()
1472 return -EINVAL; in bcmgenet_validate_flow()
1475 if ((cmd->fs.flow_type & FLOW_EXT)) { in bcmgenet_validate_flow()
1477 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || in bcmgenet_validate_flow()
1478 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { in bcmgenet_validate_flow()
1480 return -EINVAL; in bcmgenet_validate_flow()
1482 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { in bcmgenet_validate_flow()
1483 netdev_err(dev, "rxnfc: user-def not supported\n"); in bcmgenet_validate_flow()
1484 return -EINVAL; in bcmgenet_validate_flow()
1488 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1490 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { in bcmgenet_validate_flow()
1492 return -EINVAL; in bcmgenet_validate_flow()
1506 if (priv->hw_params->hfb_filter_size < 128) { in bcmgenet_insert_flow()
1508 return -EINVAL; in bcmgenet_insert_flow()
1511 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && in bcmgenet_insert_flow()
1512 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE && in bcmgenet_insert_flow()
1513 cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) { in bcmgenet_insert_flow()
1515 cmd->fs.ring_cookie); in bcmgenet_insert_flow()
1516 return -EINVAL; in bcmgenet_insert_flow()
1523 if (cmd->fs.location == RX_CLS_LOC_ANY) { in bcmgenet_insert_flow()
1524 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { in bcmgenet_insert_flow()
1525 cmd->fs.location = loc_rule->fs.location; in bcmgenet_insert_flow()
1526 err = memcmp(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1533 loc_rule = &priv->rxnfc_rules[i]; in bcmgenet_insert_flow()
1534 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1535 cmd->fs.location = i; in bcmgenet_insert_flow()
1540 cmd->fs.location = RX_CLS_LOC_ANY; in bcmgenet_insert_flow()
1541 return -ENOSPC; in bcmgenet_insert_flow()
1544 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_insert_flow()
1546 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_insert_flow()
1547 bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); in bcmgenet_insert_flow()
1548 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1549 list_del(&loc_rule->list); in bcmgenet_insert_flow()
1550 bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); in bcmgenet_insert_flow()
1552 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_insert_flow()
1553 memcpy(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1558 list_add_tail(&loc_rule->list, &priv->rxnfc_list); in bcmgenet_insert_flow()
1570 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) in bcmgenet_delete_flow()
1571 return -EINVAL; in bcmgenet_delete_flow()
1573 rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_delete_flow()
1574 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1575 err = -ENOENT; in bcmgenet_delete_flow()
1579 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_delete_flow()
1580 bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); in bcmgenet_delete_flow()
1581 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1582 list_del(&rule->list); in bcmgenet_delete_flow()
1583 bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); in bcmgenet_delete_flow()
1585 rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_delete_flow()
1586 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); in bcmgenet_delete_flow()
1597 switch (cmd->cmd) { in bcmgenet_set_rxnfc()
1605 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", in bcmgenet_set_rxnfc()
1606 cmd->cmd); in bcmgenet_set_rxnfc()
1607 return -EINVAL; in bcmgenet_set_rxnfc()
1621 return -EINVAL; in bcmgenet_get_flow()
1623 rule = &priv->rxnfc_rules[loc]; in bcmgenet_get_flow()
1624 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_get_flow()
1625 err = -ENOENT; in bcmgenet_get_flow()
1627 memcpy(&cmd->fs, &rule->fs, in bcmgenet_get_flow()
1638 list_for_each(pos, &priv->rxnfc_list) in bcmgenet_get_num_flows()
1652 switch (cmd->cmd) { in bcmgenet_get_rxnfc()
1654 cmd->data = priv->hw_params->rx_queues ?: 1; in bcmgenet_get_rxnfc()
1657 cmd->rule_cnt = bcmgenet_get_num_flows(priv); in bcmgenet_get_rxnfc()
1658 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; in bcmgenet_get_rxnfc()
1661 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); in bcmgenet_get_rxnfc()
1664 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_get_rxnfc()
1665 if (i < cmd->rule_cnt) in bcmgenet_get_rxnfc()
1666 rule_locs[i++] = rule->fs.location; in bcmgenet_get_rxnfc()
1667 cmd->rule_cnt = i; in bcmgenet_get_rxnfc()
1668 cmd->data = MAX_NUM_OF_FS_RULES; in bcmgenet_get_rxnfc()
1671 err = -EOPNOTSUPP; in bcmgenet_get_rxnfc()
1717 phy_detach(priv->dev->phydev); in bcmgenet_power_down()
1741 bcmgenet_phy_power_set(priv->dev, false); in bcmgenet_power_down()
1783 bcmgenet_phy_power_set(priv->dev, true); in bcmgenet_power_up()
1808 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb()
1809 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_get_txcb()
1812 if (ring->write_ptr == ring->end_ptr) in bcmgenet_get_txcb()
1813 ring->write_ptr = ring->cb_ptr; in bcmgenet_get_txcb()
1815 ring->write_ptr++; in bcmgenet_get_txcb()
1825 tx_cb_ptr = ring->cbs; in bcmgenet_put_txcb()
1826 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_put_txcb()
1829 if (ring->write_ptr == ring->cb_ptr) in bcmgenet_put_txcb()
1830 ring->write_ptr = ring->end_ptr; in bcmgenet_put_txcb()
1832 ring->write_ptr--; in bcmgenet_put_txcb()
1839 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_disable()
1840 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_disable()
1846 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_enable()
1847 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_enable()
1853 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_enable()
1859 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_disable()
1872 skb = cb->skb; in bcmgenet_free_tx_cb()
1875 cb->skb = NULL; in bcmgenet_free_tx_cb()
1876 if (cb == GENET_CB(skb)->first_cb) in bcmgenet_free_tx_cb()
1886 if (cb == GENET_CB(skb)->last_cb) in bcmgenet_free_tx_cb()
1906 skb = cb->skb; in bcmgenet_free_rx_cb()
1907 cb->skb = NULL; in bcmgenet_free_rx_cb()
1922 struct bcmgenet_tx_stats64 *stats = &ring->stats64; in __bcmgenet_tx_reclaim()
1932 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR); in __bcmgenet_tx_reclaim()
1935 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) in __bcmgenet_tx_reclaim()
1937 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; in __bcmgenet_tx_reclaim()
1941 __func__, ring->index, ring->c_index, c_index, txbds_ready); in __bcmgenet_tx_reclaim()
1945 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, in __bcmgenet_tx_reclaim()
1946 &priv->tx_cbs[ring->clean_ptr]); in __bcmgenet_tx_reclaim()
1949 bytes_compl += GENET_CB(skb)->bytes_sent; in __bcmgenet_tx_reclaim()
1954 if (likely(ring->clean_ptr < ring->end_ptr)) in __bcmgenet_tx_reclaim()
1955 ring->clean_ptr++; in __bcmgenet_tx_reclaim()
1957 ring->clean_ptr = ring->cb_ptr; in __bcmgenet_tx_reclaim()
1960 ring->free_bds += txbds_processed; in __bcmgenet_tx_reclaim()
1961 ring->c_index = c_index; in __bcmgenet_tx_reclaim()
1963 u64_stats_update_begin(&stats->syncp); in __bcmgenet_tx_reclaim()
1964 u64_stats_add(&stats->packets, pkts_compl); in __bcmgenet_tx_reclaim()
1965 u64_stats_add(&stats->bytes, bytes_compl); in __bcmgenet_tx_reclaim()
1966 u64_stats_update_end(&stats->syncp); in __bcmgenet_tx_reclaim()
1968 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), in __bcmgenet_tx_reclaim()
1979 struct device *kdev = &priv->pdev->dev; in bcmgenet_tx_reclaim()
1984 spin_lock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1988 drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_tx_reclaim()
1990 ring->prod_index = ring->c_index & DMA_C_INDEX_MASK; in bcmgenet_tx_reclaim()
1991 while (drop--) { in bcmgenet_tx_reclaim()
1993 skb = cb_ptr->skb; in bcmgenet_tx_reclaim()
1995 if (skb && cb_ptr == GENET_CB(skb)->first_cb) { in bcmgenet_tx_reclaim()
2002 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_tx_reclaim()
2003 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_tx_reclaim()
2004 wr_ptr = ring->write_ptr * WORDS_PER_BD(priv); in bcmgenet_tx_reclaim()
2005 bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr, in bcmgenet_tx_reclaim()
2008 spin_unlock_bh(&ring->lock); in bcmgenet_tx_reclaim()
2020 spin_lock(&ring->lock); in bcmgenet_tx_poll()
2021 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); in bcmgenet_tx_poll()
2022 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { in bcmgenet_tx_poll()
2023 txq = netdev_get_tx_queue(ring->priv->dev, ring->index); in bcmgenet_tx_poll()
2026 spin_unlock(&ring->lock); in bcmgenet_tx_poll()
2044 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true); in bcmgenet_tx_reclaim_all()
2045 } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev)); in bcmgenet_tx_reclaim_all()
2055 struct bcmgenet_tx_stats64 *stats = &ring->stats64; in bcmgenet_add_tsb()
2071 priv->mib.tx_realloc_tsb_failed++; in bcmgenet_add_tsb()
2077 priv->mib.tx_realloc_tsb++; in bcmgenet_add_tsb()
2081 status = (struct status_64 *)skb->data; in bcmgenet_add_tsb()
2083 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcmgenet_add_tsb()
2084 ip_ver = skb->protocol; in bcmgenet_add_tsb()
2087 ip_proto = ip_hdr(skb)->protocol; in bcmgenet_add_tsb()
2090 ip_proto = ipv6_hdr(skb)->nexthdr; in bcmgenet_add_tsb()
2098 offset = skb_checksum_start_offset(skb) - sizeof(*status); in bcmgenet_add_tsb()
2100 (offset + skb->csum_offset) | in bcmgenet_add_tsb()
2107 status->tx_csum_info = tx_csum_info; in bcmgenet_add_tsb()
2121 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit()
2141 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
2144 nr_frags = skb_shinfo(skb)->nr_frags; in bcmgenet_xmit()
2146 spin_lock(&ring->lock); in bcmgenet_xmit()
2147 if (ring->free_bds <= (nr_frags + 1)) { in bcmgenet_xmit()
2157 GENET_CB(skb)->bytes_sent = skb->len; in bcmgenet_xmit()
2173 GENET_CB(skb)->first_cb = tx_cb_ptr; in bcmgenet_xmit()
2175 mapping = dma_map_single(kdev, skb->data, size, in bcmgenet_xmit()
2179 frag = &skb_shinfo(skb)->frags[i - 1]; in bcmgenet_xmit()
2187 priv->mib.tx_dma_failed++; in bcmgenet_xmit()
2188 netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); in bcmgenet_xmit()
2195 tx_cb_ptr->skb = skb; in bcmgenet_xmit()
2198 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); in bcmgenet_xmit()
2207 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcmgenet_xmit()
2213 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); in bcmgenet_xmit()
2216 GENET_CB(skb)->last_cb = tx_cb_ptr; in bcmgenet_xmit()
2222 ring->free_bds -= nr_frags + 1; in bcmgenet_xmit()
2223 ring->prod_index += nr_frags + 1; in bcmgenet_xmit()
2224 ring->prod_index &= DMA_P_INDEX_MASK; in bcmgenet_xmit()
2226 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); in bcmgenet_xmit()
2228 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) in bcmgenet_xmit()
2233 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_xmit()
2234 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_xmit()
2236 spin_unlock(&ring->lock); in bcmgenet_xmit()
2245 while (i-- > 0) { in bcmgenet_xmit()
2257 struct device *kdev = &priv->pdev->dev; in bcmgenet_rx_refill()
2263 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, in bcmgenet_rx_refill()
2266 priv->mib.alloc_rx_buff_failed++; in bcmgenet_rx_refill()
2267 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2272 /* DMA-map the new Rx skb */ in bcmgenet_rx_refill()
2273 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, in bcmgenet_rx_refill()
2276 priv->mib.rx_dma_failed++; in bcmgenet_rx_refill()
2278 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2283 /* Grab the current Rx skb from the ring and DMA-unmap it */ in bcmgenet_rx_refill()
2287 cb->skb = skb; in bcmgenet_rx_refill()
2289 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); in bcmgenet_rx_refill()
2290 dmadesc_set_addr(priv, cb->bd_addr, mapping); in bcmgenet_rx_refill()
2296 /* bcmgenet_desc_rx - descriptor based rx process.
2302 struct bcmgenet_rx_stats64 *stats = &ring->stats64; in bcmgenet_desc_rx()
2303 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_desc_rx()
2304 struct net_device *dev = priv->dev; in bcmgenet_desc_rx()
2316 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); in bcmgenet_desc_rx()
2319 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); in bcmgenet_desc_rx()
2323 if (discards > ring->old_discards) { in bcmgenet_desc_rx()
2324 discards = discards - ring->old_discards; in bcmgenet_desc_rx()
2326 ring->old_discards += discards; in bcmgenet_desc_rx()
2329 if (ring->old_discards >= 0xC000) { in bcmgenet_desc_rx()
2330 ring->old_discards = 0; in bcmgenet_desc_rx()
2331 bcmgenet_rdma_ring_writel(priv, ring->index, 0, in bcmgenet_desc_rx()
2337 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2347 cb = &priv->rx_cbs[ring->read_ptr]; in bcmgenet_desc_rx()
2355 status = (struct status_64 *)skb->data; in bcmgenet_desc_rx()
2356 dma_length_status = status->length_status; in bcmgenet_desc_rx()
2357 if (dev->features & NETIF_F_RXCSUM) { in bcmgenet_desc_rx()
2358 rx_csum = (__force __be16)(status->rx_csum & 0xffff); in bcmgenet_desc_rx()
2360 skb->csum = (__force __wsum)ntohs(rx_csum); in bcmgenet_desc_rx()
2361 skb->ip_summed = CHECKSUM_COMPLETE; in bcmgenet_desc_rx()
2373 __func__, p_index, ring->c_index, in bcmgenet_desc_rx()
2374 ring->read_ptr, dma_length_status); in bcmgenet_desc_rx()
2399 u64_stats_update_begin(&stats->syncp); in bcmgenet_desc_rx()
2401 u64_stats_inc(&stats->crc_errors); in bcmgenet_desc_rx()
2403 u64_stats_inc(&stats->over_errors); in bcmgenet_desc_rx()
2405 u64_stats_inc(&stats->frame_errors); in bcmgenet_desc_rx()
2407 u64_stats_inc(&stats->length_errors); in bcmgenet_desc_rx()
2413 u64_stats_inc(&stats->errors); in bcmgenet_desc_rx()
2414 u64_stats_update_end(&stats->syncp); in bcmgenet_desc_rx()
2423 len -= 66; in bcmgenet_desc_rx()
2425 if (priv->crc_fwd_en) { in bcmgenet_desc_rx()
2426 skb_trim(skb, len - ETH_FCS_LEN); in bcmgenet_desc_rx()
2427 len -= ETH_FCS_LEN; in bcmgenet_desc_rx()
2433 skb->protocol = eth_type_trans(skb, priv->dev); in bcmgenet_desc_rx()
2435 u64_stats_update_begin(&stats->syncp); in bcmgenet_desc_rx()
2436 u64_stats_inc(&stats->packets); in bcmgenet_desc_rx()
2437 u64_stats_add(&stats->bytes, len); in bcmgenet_desc_rx()
2439 u64_stats_inc(&stats->multicast); in bcmgenet_desc_rx()
2441 u64_stats_inc(&stats->broadcast); in bcmgenet_desc_rx()
2442 u64_stats_update_end(&stats->syncp); in bcmgenet_desc_rx()
2445 napi_gro_receive(&ring->napi, skb); in bcmgenet_desc_rx()
2450 if (likely(ring->read_ptr < ring->end_ptr)) in bcmgenet_desc_rx()
2451 ring->read_ptr++; in bcmgenet_desc_rx()
2453 ring->read_ptr = ring->cb_ptr; in bcmgenet_desc_rx()
2455 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2456 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); in bcmgenet_desc_rx()
2459 ring->dim.bytes = bytes_processed; in bcmgenet_desc_rx()
2460 ring->dim.packets = rxpktprocessed; in bcmgenet_desc_rx()
2478 if (ring->dim.use_dim) { in bcmgenet_rx_poll()
2479 dim_update_sample(ring->dim.event_ctr, ring->dim.packets, in bcmgenet_rx_poll()
2480 ring->dim.bytes, &dim_sample); in bcmgenet_rx_poll()
2481 net_dim(&ring->dim.dim, &dim_sample); in bcmgenet_rx_poll()
2495 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in bcmgenet_dim_work()
2498 dim->state = DIM_START_MEASURE; in bcmgenet_dim_work()
2509 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_alloc_rx_buffers()
2512 for (i = 0; i < ring->size; i++) { in bcmgenet_alloc_rx_buffers()
2513 cb = ring->cbs + i; in bcmgenet_alloc_rx_buffers()
2517 if (!cb->skb) in bcmgenet_alloc_rx_buffers()
2518 return -ENOMEM; in bcmgenet_alloc_rx_buffers()
2530 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_free_rx_buffers()
2531 cb = &priv->rx_cbs[i]; in bcmgenet_free_rx_buffers()
2533 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); in bcmgenet_free_rx_buffers()
2543 spin_lock_bh(&priv->reg_lock); in umac_enable_set()
2546 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2554 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2556 /* UniMAC stops on a packet boundary, wait for a full-size packet in umac_enable_set()
2570 spin_lock_bh(&priv->reg_lock); in reset_umac()
2573 spin_unlock_bh(&priv->reg_lock); in reset_umac()
2592 if (priv->internal_phy) { in bcmgenet_link_intr_enable()
2596 } else if (priv->ext_phy) { in bcmgenet_link_intr_enable()
2598 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in bcmgenet_link_intr_enable()
2607 struct device *kdev = &priv->pdev->dev; in init_umac()
2611 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); in init_umac()
2615 /* clear tx/rx counter */ in init_umac()
2623 /* init tx registers, enable TSB */ in init_umac()
2637 * a valid CHK bit to be set in the per-packet status word in init_umac()
2639 if (priv->crc_fwd_en) in init_umac()
2651 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in init_umac()
2653 reg |= BIT(priv->hw_params->bp_in_en_shift); in init_umac()
2656 if (netif_is_multiqueue(priv->dev)) in init_umac()
2657 reg |= priv->hw_params->bp_in_mask; in init_umac()
2659 reg &= ~priv->hw_params->bp_in_mask; in init_umac()
2675 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_dim()
2677 INIT_WORK(&dim->dim.work, cb); in bcmgenet_init_dim()
2678 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcmgenet_init_dim()
2679 dim->event_ctr = 0; in bcmgenet_init_dim()
2680 dim->packets = 0; in bcmgenet_init_dim()
2681 dim->bytes = 0; in bcmgenet_init_dim()
2686 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_rx_coalesce()
2690 usecs = ring->rx_coalesce_usecs; in bcmgenet_init_rx_coalesce()
2691 pkts = ring->rx_max_coalesced_frames; in bcmgenet_init_rx_coalesce()
2693 /* If DIM was enabled, re-apply default parameters */ in bcmgenet_init_rx_coalesce()
2694 if (dim->use_dim) { in bcmgenet_init_rx_coalesce()
2695 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcmgenet_init_rx_coalesce()
2703 /* Initialize a Tx ring along with corresponding hardware registers */
2708 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
2712 spin_lock_init(&ring->lock); in bcmgenet_init_tx_ring()
2713 ring->priv = priv; in bcmgenet_init_tx_ring()
2714 ring->index = index; in bcmgenet_init_tx_ring()
2715 ring->cbs = priv->tx_cbs + start_ptr; in bcmgenet_init_tx_ring()
2716 ring->size = size; in bcmgenet_init_tx_ring()
2717 ring->clean_ptr = start_ptr; in bcmgenet_init_tx_ring()
2718 ring->c_index = 0; in bcmgenet_init_tx_ring()
2719 ring->free_bds = size; in bcmgenet_init_tx_ring()
2720 ring->write_ptr = start_ptr; in bcmgenet_init_tx_ring()
2721 ring->cb_ptr = start_ptr; in bcmgenet_init_tx_ring()
2722 ring->end_ptr = end_ptr - 1; in bcmgenet_init_tx_ring()
2723 ring->prod_index = 0; in bcmgenet_init_tx_ring()
2746 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_tx_ring()
2749 /* Initialize Tx NAPI */ in bcmgenet_init_tx_ring()
2750 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); in bcmgenet_init_tx_ring()
2758 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; in bcmgenet_init_rx_ring()
2762 ring->priv = priv; in bcmgenet_init_rx_ring()
2763 ring->index = index; in bcmgenet_init_rx_ring()
2764 ring->cbs = priv->rx_cbs + start_ptr; in bcmgenet_init_rx_ring()
2765 ring->size = size; in bcmgenet_init_rx_ring()
2766 ring->c_index = 0; in bcmgenet_init_rx_ring()
2767 ring->read_ptr = start_ptr; in bcmgenet_init_rx_ring()
2768 ring->cb_ptr = start_ptr; in bcmgenet_init_rx_ring()
2769 ring->end_ptr = end_ptr - 1; in bcmgenet_init_rx_ring()
2779 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); in bcmgenet_init_rx_ring()
2798 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_rx_ring()
2809 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_enable_tx_napi()
2810 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
2811 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2821 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_disable_tx_napi()
2822 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
2823 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2832 for (i = 0; i <= priv->hw_params->tx_queues; ++i) { in bcmgenet_fini_tx_napi()
2833 ring = &priv->tx_rings[i]; in bcmgenet_fini_tx_napi()
2834 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2844 mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; in bcmgenet_tdma_disable()
2858 return -ETIMEDOUT; in bcmgenet_tdma_disable()
2867 mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; in bcmgenet_rdma_disable()
2881 return -ETIMEDOUT; in bcmgenet_rdma_disable()
2884 /* Initialize Tx queues
2886 * Queues 1-4 are priority-based, each one has 32 descriptors,
2889 * Queue 0 is the default Tx queue with
2890 * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2893 * - Tx queue 0 uses tx_cbs[0..127]
2894 * - Tx queue 1 uses tx_cbs[128..159]
2895 * - Tx queue 2 uses tx_cbs[160..191]
2896 * - Tx queue 3 uses tx_cbs[192..223]
2897 * - Tx queue 4 uses tx_cbs[224..255]
2908 /* Initialize Tx priority queues */ in bcmgenet_init_tx_queues()
2909 for (i = 0; i <= priv->hw_params->tx_queues; i++) { in bcmgenet_init_tx_queues()
2910 bcmgenet_init_tx_ring(priv, i, end - start, start, end); in bcmgenet_init_tx_queues()
2912 end += priv->hw_params->tx_bds_per_q; in bcmgenet_init_tx_queues()
2918 /* Set Tx queue priorities */ in bcmgenet_init_tx_queues()
2923 /* Configure Tx queues as descriptor rings */ in bcmgenet_init_tx_queues()
2924 ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; in bcmgenet_init_tx_queues()
2927 /* Enable Tx rings */ in bcmgenet_init_tx_queues()
2937 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_enable_rx_napi()
2938 ring = &priv->rx_rings[i]; in bcmgenet_enable_rx_napi()
2939 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2949 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_disable_rx_napi()
2950 ring = &priv->rx_rings[i]; in bcmgenet_disable_rx_napi()
2951 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2952 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2961 for (i = 0; i <= priv->hw_params->rx_queues; ++i) { in bcmgenet_fini_rx_napi()
2962 ring = &priv->rx_rings[i]; in bcmgenet_fini_rx_napi()
2963 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2967 /* Initialize Rx queues
2969 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2970 * used to direct traffic to these queues.
2981 /* Initialize Rx priority queues */ in bcmgenet_init_rx_queues()
2982 for (i = 0; i <= priv->hw_params->rx_queues; i++) { in bcmgenet_init_rx_queues()
2983 ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end); in bcmgenet_init_rx_queues()
2988 end += priv->hw_params->rx_bds_per_q; in bcmgenet_init_rx_queues()
2991 /* Configure Rx queues as descriptor rings */ in bcmgenet_init_rx_queues()
2992 ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; in bcmgenet_init_rx_queues()
3006 /* Disable TDMA to stop add more frames in TX DMA */ in bcmgenet_dma_teardown()
3007 if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) { in bcmgenet_dma_teardown()
3008 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); in bcmgenet_dma_teardown()
3009 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3012 /* Wait 10ms for packet drain in both tx and rx dma */ in bcmgenet_dma_teardown()
3016 if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) { in bcmgenet_dma_teardown()
3017 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); in bcmgenet_dma_teardown()
3018 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3032 for (i = 0; i <= priv->hw_params->tx_queues; i++) { in bcmgenet_fini_dma()
3033 txq = netdev_get_tx_queue(priv->dev, i); in bcmgenet_fini_dma()
3038 kfree(priv->rx_cbs); in bcmgenet_fini_dma()
3039 kfree(priv->tx_cbs); in bcmgenet_fini_dma()
3050 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_init_dma()
3052 /* Disable TX DMA */ in bcmgenet_init_dma()
3055 netdev_err(priv->dev, "failed to halt Tx DMA\n"); in bcmgenet_init_dma()
3062 netdev_err(priv->dev, "failed to halt Rx DMA\n"); in bcmgenet_init_dma()
3066 /* Flush TX queues */ in bcmgenet_init_dma()
3080 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; in bcmgenet_init_dma()
3081 priv->num_rx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3082 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3084 if (!priv->rx_cbs) in bcmgenet_init_dma()
3085 return -ENOMEM; in bcmgenet_init_dma()
3087 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_init_dma()
3088 cb = priv->rx_cbs + i; in bcmgenet_init_dma()
3089 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3092 /* Initialize common TX ring structures */ in bcmgenet_init_dma()
3093 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; in bcmgenet_init_dma()
3094 priv->num_tx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3095 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3097 if (!priv->tx_cbs) { in bcmgenet_init_dma()
3098 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3099 return -ENOMEM; in bcmgenet_init_dma()
3102 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_init_dma()
3103 cb = priv->tx_cbs + i; in bcmgenet_init_dma()
3104 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3108 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3111 /* Initialize Rx queues */ in bcmgenet_init_dma()
3112 ret = bcmgenet_init_rx_queues(priv->dev); in bcmgenet_init_dma()
3114 netdev_err(priv->dev, "failed to initialize Rx queues\n"); in bcmgenet_init_dma()
3116 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3117 kfree(priv->tx_cbs); in bcmgenet_init_dma()
3122 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3125 /* Initialize Tx queues */ in bcmgenet_init_dma()
3126 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_init_dma()
3128 /* Enable RX/TX DMA */ in bcmgenet_init_dma()
3147 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); in bcmgenet_irq_task()
3149 spin_lock_irq(&priv->lock); in bcmgenet_irq_task()
3150 status = priv->irq0_stat; in bcmgenet_irq_task()
3151 priv->irq0_stat = 0; in bcmgenet_irq_task()
3152 spin_unlock_irq(&priv->lock); in bcmgenet_irq_task()
3155 priv->dev->phydev->autoneg != AUTONEG_ENABLE) { in bcmgenet_irq_task()
3156 phy_init_hw(priv->dev->phydev); in bcmgenet_irq_task()
3157 genphy_config_aneg(priv->dev->phydev); in bcmgenet_irq_task()
3162 phy_mac_interrupt(priv->dev->phydev); in bcmgenet_irq_task()
3166 /* bcmgenet_isr1: handle Rx and Tx queues */
3181 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr1()
3185 for (index = 0; index <= priv->hw_params->rx_queues; index++) { in bcmgenet_isr1()
3189 rx_ring = &priv->rx_rings[index]; in bcmgenet_isr1()
3190 rx_ring->dim.event_ctr++; in bcmgenet_isr1()
3192 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr1()
3194 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr1()
3198 /* Check Tx priority queue interrupts */ in bcmgenet_isr1()
3199 for (index = 0; index <= priv->hw_params->tx_queues; index++) { in bcmgenet_isr1()
3203 tx_ring = &priv->tx_rings[index]; in bcmgenet_isr1()
3205 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr1()
3207 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr1()
3228 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr0()
3232 wake_up(&priv->wq); in bcmgenet_isr0()
3237 /* Save irq status for bottom-half processing. */ in bcmgenet_isr0()
3238 spin_lock_irqsave(&priv->lock, flags); in bcmgenet_isr0()
3239 priv->irq0_stat |= status; in bcmgenet_isr0()
3240 spin_unlock_irqrestore(&priv->lock, flags); in bcmgenet_isr0()
3242 schedule_work(&priv->bcmgenet_irq_work); in bcmgenet_isr0()
3303 phy_start(dev->phydev); in bcmgenet_netif_start()
3314 clk_prepare_enable(priv->clk); in bcmgenet_open()
3319 if (priv->internal_phy) in bcmgenet_open()
3330 bcmgenet_set_features(dev, dev->features); in bcmgenet_open()
3332 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_open()
3344 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, in bcmgenet_open()
3345 dev->name, priv); in bcmgenet_open()
3347 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); in bcmgenet_open()
3351 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, in bcmgenet_open()
3352 dev->name, priv); in bcmgenet_open()
3354 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); in bcmgenet_open()
3364 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_open()
3373 free_irq(priv->irq1, priv); in bcmgenet_open()
3375 free_irq(priv->irq0, priv); in bcmgenet_open()
3380 if (priv->internal_phy) in bcmgenet_open()
3382 clk_disable_unprepare(priv->clk); in bcmgenet_open()
3397 phy_stop(dev->phydev); in bcmgenet_netif_stop()
3401 /* Disable MAC transmit. TX DMA disabled must be done before this */ in bcmgenet_netif_stop()
3411 cancel_work_sync(&priv->bcmgenet_irq_work); in bcmgenet_netif_stop()
3413 /* tx reclaim */ in bcmgenet_netif_stop()
3428 phy_disconnect(dev->phydev); in bcmgenet_close()
3430 free_irq(priv->irq0, priv); in bcmgenet_close()
3431 free_irq(priv->irq1, priv); in bcmgenet_close()
3433 if (priv->internal_phy) in bcmgenet_close()
3436 clk_disable_unprepare(priv->clk); in bcmgenet_close()
3443 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_dump_tx_queue()
3452 txq = netdev_get_tx_queue(priv->dev, ring->index); in bcmgenet_dump_tx_queue()
3454 spin_lock(&ring->lock); in bcmgenet_dump_tx_queue()
3456 intmsk = 1 << ring->index; in bcmgenet_dump_tx_queue()
3457 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); in bcmgenet_dump_tx_queue()
3458 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); in bcmgenet_dump_tx_queue()
3460 free_bds = ring->free_bds; in bcmgenet_dump_tx_queue()
3461 spin_unlock(&ring->lock); in bcmgenet_dump_tx_queue()
3463 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" in bcmgenet_dump_tx_queue()
3464 "TX queue status: %s, interrupts: %s\n" in bcmgenet_dump_tx_queue()
3470 ring->index, ring->index, in bcmgenet_dump_tx_queue()
3473 free_bds, ring->size, in bcmgenet_dump_tx_queue()
3474 ring->prod_index, p_index & DMA_P_INDEX_MASK, in bcmgenet_dump_tx_queue()
3475 ring->c_index, c_index & DMA_C_INDEX_MASK, in bcmgenet_dump_tx_queue()
3476 ring->clean_ptr, ring->write_ptr, in bcmgenet_dump_tx_queue()
3477 ring->cb_ptr, ring->end_ptr); in bcmgenet_dump_tx_queue()
3488 for (q = 0; q <= priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3489 bcmgenet_dump_tx_queue(&priv->tx_rings[q]); in bcmgenet_timeout()
3493 for (q = 0; q <= priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3496 /* Re-enable TX interrupts if disabled */ in bcmgenet_timeout()
3501 BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors); in bcmgenet_timeout()
3527 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); in bcmgenet_set_rx_mode()
3539 spin_lock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3541 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || in bcmgenet_set_rx_mode()
3545 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3551 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3557 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); in bcmgenet_set_rx_mode()
3559 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); in bcmgenet_set_rx_mode()
3563 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3567 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3570 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); in bcmgenet_set_rx_mode()
3580 * without disabling the UniMAC RX/TX enable bits. in bcmgenet_set_mac_addr()
3583 return -EBUSY; in bcmgenet_set_mac_addr()
3585 eth_hw_addr_set(dev, addr->sa_data); in bcmgenet_set_mac_addr()
3607 for (q = 0; q <= priv->hw_params->tx_queues; q++) { in bcmgenet_get_stats64()
3608 tx_stats = &priv->tx_rings[q].stats64; in bcmgenet_get_stats64()
3610 start = u64_stats_fetch_begin(&tx_stats->syncp); in bcmgenet_get_stats64()
3611 tx_bytes = u64_stats_read(&tx_stats->bytes); in bcmgenet_get_stats64()
3612 tx_packets = u64_stats_read(&tx_stats->packets); in bcmgenet_get_stats64()
3613 tx_errors = u64_stats_read(&tx_stats->errors); in bcmgenet_get_stats64()
3614 tx_dropped = u64_stats_read(&tx_stats->dropped); in bcmgenet_get_stats64()
3615 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); in bcmgenet_get_stats64()
3617 stats->tx_bytes += tx_bytes; in bcmgenet_get_stats64()
3618 stats->tx_packets += tx_packets; in bcmgenet_get_stats64()
3619 stats->tx_errors += tx_errors; in bcmgenet_get_stats64()
3620 stats->tx_dropped += tx_dropped; in bcmgenet_get_stats64()
3623 for (q = 0; q <= priv->hw_params->rx_queues; q++) { in bcmgenet_get_stats64()
3624 rx_stats = &priv->rx_rings[q].stats64; in bcmgenet_get_stats64()
3626 start = u64_stats_fetch_begin(&rx_stats->syncp); in bcmgenet_get_stats64()
3627 rx_bytes = u64_stats_read(&rx_stats->bytes); in bcmgenet_get_stats64()
3628 rx_packets = u64_stats_read(&rx_stats->packets); in bcmgenet_get_stats64()
3629 rx_errors = u64_stats_read(&rx_stats->errors); in bcmgenet_get_stats64()
3630 rx_dropped = u64_stats_read(&rx_stats->dropped); in bcmgenet_get_stats64()
3631 rx_missed = u64_stats_read(&rx_stats->missed); in bcmgenet_get_stats64()
3632 rx_length_errors = u64_stats_read(&rx_stats->length_errors); in bcmgenet_get_stats64()
3633 rx_over_errors = u64_stats_read(&rx_stats->over_errors); in bcmgenet_get_stats64()
3634 rx_crc_errors = u64_stats_read(&rx_stats->crc_errors); in bcmgenet_get_stats64()
3635 rx_frame_errors = u64_stats_read(&rx_stats->frame_errors); in bcmgenet_get_stats64()
3636 rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors); in bcmgenet_get_stats64()
3637 multicast = u64_stats_read(&rx_stats->multicast); in bcmgenet_get_stats64()
3638 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); in bcmgenet_get_stats64()
3645 stats->rx_bytes += rx_bytes; in bcmgenet_get_stats64()
3646 stats->rx_packets += rx_packets; in bcmgenet_get_stats64()
3647 stats->rx_errors += rx_errors; in bcmgenet_get_stats64()
3648 stats->rx_dropped += rx_dropped; in bcmgenet_get_stats64()
3649 stats->rx_missed_errors += rx_missed; in bcmgenet_get_stats64()
3650 stats->rx_length_errors += rx_length_errors; in bcmgenet_get_stats64()
3651 stats->rx_over_errors += rx_over_errors; in bcmgenet_get_stats64()
3652 stats->rx_crc_errors += rx_crc_errors; in bcmgenet_get_stats64()
3653 stats->rx_frame_errors += rx_frame_errors; in bcmgenet_get_stats64()
3654 stats->multicast += multicast; in bcmgenet_get_stats64()
3662 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || in bcmgenet_change_carrier()
3663 priv->phy_interface != PHY_INTERFACE_MODE_MOCA) in bcmgenet_change_carrier()
3664 return -EOPNOTSUPP; in bcmgenet_change_carrier()
3784 priv->hw_params = params; in bcmgenet_set_hw_params()
3795 if (major != priv->version) { in bcmgenet_set_hw_params()
3796 dev_err(&priv->pdev->dev, in bcmgenet_set_hw_params()
3798 major, priv->version); in bcmgenet_set_hw_params()
3802 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, in bcmgenet_set_hw_params()
3814 * heuristic to check for the new GPHY revision and re-arrange things in bcmgenet_set_hw_params()
3833 priv->gphy_rev = gphy_rev << 8; in bcmgenet_set_hw_params()
3836 priv->gphy_rev = gphy_rev; in bcmgenet_set_hw_params()
3841 pr_warn("GENET does not support 40-bits PA\n"); in bcmgenet_set_hw_params()
3851 priv->version, in bcmgenet_set_hw_params()
3852 params->tx_queues, params->tx_bds_per_q, in bcmgenet_set_hw_params()
3853 params->rx_queues, params->rx_bds_per_q, in bcmgenet_set_hw_params()
3854 params->bp_in_en_shift, params->bp_in_mask, in bcmgenet_set_hw_params()
3855 params->hfb_filter_cnt, params->qtag_mask, in bcmgenet_set_hw_params()
3856 params->tbuf_offset, params->hfb_offset, in bcmgenet_set_hw_params()
3857 params->hfb_reg_offset, in bcmgenet_set_hw_params()
3858 params->rdma_offset, params->tdma_offset, in bcmgenet_set_hw_params()
3859 params->words_per_bd); in bcmgenet_set_hw_params()
3916 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3917 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3918 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3919 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3920 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3921 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3922 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3929 struct bcmgenet_platform_data *pd = pdev->dev.platform_data; in bcmgenet_probe()
3934 int err = -EIO; in bcmgenet_probe()
3936 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ in bcmgenet_probe()
3940 dev_err(&pdev->dev, "can't allocate net device\n"); in bcmgenet_probe()
3941 return -ENOMEM; in bcmgenet_probe()
3945 priv->irq0 = platform_get_irq(pdev, 0); in bcmgenet_probe()
3946 if (priv->irq0 < 0) { in bcmgenet_probe()
3947 err = priv->irq0; in bcmgenet_probe()
3950 priv->irq1 = platform_get_irq(pdev, 1); in bcmgenet_probe()
3951 if (priv->irq1 < 0) { in bcmgenet_probe()
3952 err = priv->irq1; in bcmgenet_probe()
3955 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcmgenet_probe()
3956 if (priv->wol_irq == -EPROBE_DEFER) { in bcmgenet_probe()
3957 err = priv->wol_irq; in bcmgenet_probe()
3961 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcmgenet_probe()
3962 if (IS_ERR(priv->base)) { in bcmgenet_probe()
3963 err = PTR_ERR(priv->base); in bcmgenet_probe()
3967 spin_lock_init(&priv->reg_lock); in bcmgenet_probe()
3968 spin_lock_init(&priv->lock); in bcmgenet_probe()
3971 priv->autoneg_pause = 1; in bcmgenet_probe()
3972 priv->tx_pause = 1; in bcmgenet_probe()
3973 priv->rx_pause = 1; in bcmgenet_probe()
3975 SET_NETDEV_DEV(dev, &pdev->dev); in bcmgenet_probe()
3976 dev_set_drvdata(&pdev->dev, dev); in bcmgenet_probe()
3977 dev->watchdog_timeo = 2 * HZ; in bcmgenet_probe()
3978 dev->ethtool_ops = &bcmgenet_ethtool_ops; in bcmgenet_probe()
3979 dev->netdev_ops = &bcmgenet_netdev_ops; in bcmgenet_probe()
3981 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); in bcmgenet_probe()
3984 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | in bcmgenet_probe()
3986 dev->hw_features |= dev->features; in bcmgenet_probe()
3987 dev->vlan_features |= dev->features; in bcmgenet_probe()
3992 priv->wol_irq_disabled = true; in bcmgenet_probe()
3993 if (priv->wol_irq > 0) { in bcmgenet_probe()
3994 err = devm_request_irq(&pdev->dev, priv->wol_irq, in bcmgenet_probe()
3995 bcmgenet_wol_isr, 0, dev->name, priv); in bcmgenet_probe()
3997 device_set_wakeup_capable(&pdev->dev, 1); in bcmgenet_probe()
4003 dev->needed_headroom += 64; in bcmgenet_probe()
4005 priv->dev = dev; in bcmgenet_probe()
4006 priv->pdev = pdev; in bcmgenet_probe()
4008 pdata = device_get_match_data(&pdev->dev); in bcmgenet_probe()
4010 priv->version = pdata->version; in bcmgenet_probe()
4011 priv->dma_max_burst_length = pdata->dma_max_burst_length; in bcmgenet_probe()
4012 priv->flags = pdata->flags; in bcmgenet_probe()
4014 priv->version = pd->genet_version; in bcmgenet_probe()
4015 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; in bcmgenet_probe()
4018 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); in bcmgenet_probe()
4019 if (IS_ERR(priv->clk)) { in bcmgenet_probe()
4020 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); in bcmgenet_probe()
4021 err = PTR_ERR(priv->clk); in bcmgenet_probe()
4025 err = clk_prepare_enable(priv->clk); in bcmgenet_probe()
4031 err = -EIO; in bcmgenet_probe()
4033 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcmgenet_probe()
4035 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcmgenet_probe()
4040 init_waitqueue_head(&priv->wq); in bcmgenet_probe()
4042 priv->rx_buf_len = RX_BUF_LENGTH; in bcmgenet_probe()
4043 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); in bcmgenet_probe()
4045 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); in bcmgenet_probe()
4046 if (IS_ERR(priv->clk_wol)) { in bcmgenet_probe()
4047 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); in bcmgenet_probe()
4048 err = PTR_ERR(priv->clk_wol); in bcmgenet_probe()
4052 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); in bcmgenet_probe()
4053 if (IS_ERR(priv->clk_eee)) { in bcmgenet_probe()
4054 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); in bcmgenet_probe()
4055 err = PTR_ERR(priv->clk_eee); in bcmgenet_probe()
4062 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) in bcmgenet_probe()
4065 if (pd && !IS_ERR_OR_NULL(pd->mac_address)) in bcmgenet_probe()
4066 eth_hw_addr_set(dev, pd->mac_address); in bcmgenet_probe()
4068 if (device_get_ethdev_address(&pdev->dev, dev)) in bcmgenet_probe()
4069 if (has_acpi_companion(&pdev->dev)) { in bcmgenet_probe()
4076 if (!is_valid_ether_addr(dev->dev_addr)) { in bcmgenet_probe()
4077 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcmgenet_probe()
4087 /* setup number of real queues + 1 */ in bcmgenet_probe()
4088 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); in bcmgenet_probe()
4089 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); in bcmgenet_probe()
4092 for (i = 0; i <= priv->hw_params->rx_queues; i++) in bcmgenet_probe()
4093 priv->rx_rings[i].rx_max_coalesced_frames = 1; in bcmgenet_probe()
4096 for (i = 0; i <= priv->hw_params->rx_queues; i++) in bcmgenet_probe()
4097 u64_stats_init(&priv->rx_rings[i].stats64.syncp); in bcmgenet_probe()
4098 for (i = 0; i <= priv->hw_params->tx_queues; i++) in bcmgenet_probe()
4099 u64_stats_init(&priv->tx_rings[i].stats64.syncp); in bcmgenet_probe()
4105 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4116 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4124 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); in bcmgenet_remove()
4126 dev_set_drvdata(&pdev->dev, NULL); in bcmgenet_remove()
4127 unregister_netdev(priv->dev); in bcmgenet_remove()
4128 bcmgenet_mii_exit(priv->dev); in bcmgenet_remove()
4129 free_netdev(priv->dev); in bcmgenet_remove()
4149 ret = clk_prepare_enable(priv->clk); in bcmgenet_resume_noirq()
4153 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume_noirq()
4154 /* Account for Wake-on-LAN events and clear those events in bcmgenet_resume_noirq()
4162 pm_wakeup_event(&priv->pdev->dev, 0); in bcmgenet_resume_noirq()
4164 /* From WOL-enabled suspend, switch to regular clock */ in bcmgenet_resume_noirq()
4174 if (priv->internal_phy) in bcmgenet_resume_noirq()
4194 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume()
4198 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4199 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_resume()
4201 rule->fs.location + 1); in bcmgenet_resume()
4206 /* Reinitialize Tx flows */ in bcmgenet_resume()
4208 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_resume()
4215 phy_start_machine(dev->phydev); in bcmgenet_resume()
4218 enable_irq(priv->irq1); in bcmgenet_resume()
4226 enable_irq(priv->irq1); in bcmgenet_resume()
4231 phy_init_hw(dev->phydev); in bcmgenet_resume()
4234 genphy_config_aneg(dev->phydev); in bcmgenet_resume()
4235 bcmgenet_mii_config(priv->dev, false); in bcmgenet_resume()
4238 bcmgenet_set_features(dev, dev->features); in bcmgenet_resume()
4240 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_resume()
4244 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4245 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_resume()
4256 phy_resume(dev->phydev); in bcmgenet_resume()
4265 if (priv->internal_phy) in bcmgenet_resume()
4267 clk_disable_unprepare(priv->clk); in bcmgenet_resume()
4283 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_suspend()
4286 /* Suspend non-wake Rx data flows */ in bcmgenet_suspend()
4287 if (priv->wolopts & WAKE_FILTER) in bcmgenet_suspend()
4288 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_suspend()
4289 if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE && in bcmgenet_suspend()
4290 rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_suspend()
4291 hfb_enable |= 1 << rule->fs.location; in bcmgenet_suspend()
4308 if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) in bcmgenet_suspend()
4309 netdev_warn(priv->dev, in bcmgenet_suspend()
4310 "Timed out while disabling TX DMA\n"); in bcmgenet_suspend()
4314 disable_irq(priv->irq1); in bcmgenet_suspend()
4334 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ in bcmgenet_suspend_noirq()
4335 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_suspend_noirq()
4337 else if (priv->internal_phy) in bcmgenet_suspend_noirq()
4345 clk_disable_unprepare(priv->clk); in bcmgenet_suspend_noirq()
4386 MODULE_SOFTDEP("pre: mdio-bcm-unimac");