1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2017-2019 NXP */ 3 4 #include <linux/ethtool_netlink.h> 5 #include <linux/net_tstamp.h> 6 #include <linux/module.h> 7 #include "enetc.h" 8 9 static const u32 enetc_si_regs[] = { 10 ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, 11 ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, 12 ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, 13 ENETC_SIUEFDCR 14 }; 15 16 static const u32 enetc_txbdr_regs[] = { 17 ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, 18 ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0, 19 ENETC_TBICR1 20 }; 21 22 static const u32 enetc_rxbdr_regs[] = { 23 ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, 24 ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0, 25 ENETC_RBICR1 26 }; 27 28 static const u32 enetc_port_regs[] = { 29 ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), 30 ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, 31 ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), 32 ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE 33 }; 34 35 static const u32 enetc_port_mm_regs[] = { 36 ENETC_MMCSR, ENETC_PFPMR, ENETC_PTCFPR(0), ENETC_PTCFPR(1), 37 ENETC_PTCFPR(2), ENETC_PTCFPR(3), ENETC_PTCFPR(4), ENETC_PTCFPR(5), 38 ENETC_PTCFPR(6), ENETC_PTCFPR(7), 39 }; 40 41 static int enetc_get_reglen(struct net_device *ndev) 42 { 43 struct enetc_ndev_priv *priv = netdev_priv(ndev); 44 struct enetc_hw *hw = &priv->si->hw; 45 int len; 46 47 len = ARRAY_SIZE(enetc_si_regs); 48 len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; 49 len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; 50 51 if (hw->port) 52 len += ARRAY_SIZE(enetc_port_regs); 53 54 if (hw->port && !!(priv->si->hw_features & ENETC_SI_F_QBU)) 55 len += ARRAY_SIZE(enetc_port_mm_regs); 56 57 len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ 58 59 return len; 60 } 61 62 static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, 63 void *regbuf) 64 { 65 struct enetc_ndev_priv *priv = netdev_priv(ndev); 66 struct enetc_hw *hw = &priv->si->hw; 67 u32 *buf = (u32 *)regbuf; 68 int i, j; 69 u32 addr; 70 71 for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { 72 *buf++ = enetc_si_regs[i]; 73 *buf++ = enetc_rd(hw, enetc_si_regs[i]); 74 } 75 76 for (i = 0; i < priv->num_tx_rings; i++) { 77 for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { 78 addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); 79 80 *buf++ = addr; 81 *buf++ = enetc_rd(hw, addr); 82 } 83 } 84 85 for (i = 0; i < priv->num_rx_rings; i++) { 86 for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { 87 addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); 88 89 *buf++ = addr; 90 *buf++ = enetc_rd(hw, addr); 91 } 92 } 93 94 if (!hw->port) 95 return; 96 97 for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { 98 addr = ENETC_PORT_BASE + enetc_port_regs[i]; 99 *buf++ = addr; 100 *buf++ = enetc_rd(hw, addr); 101 } 102 103 if (priv->si->hw_features & ENETC_SI_F_QBU) { 104 for (i = 0; i < ARRAY_SIZE(enetc_port_mm_regs); i++) { 105 addr = ENETC_PORT_BASE + enetc_port_mm_regs[i]; 106 *buf++ = addr; 107 *buf++ = enetc_rd(hw, addr); 108 } 109 } 110 } 111 112 static const struct { 113 int reg; 114 char name[ETH_GSTRING_LEN]; 115 } enetc_si_counters[] = { 116 { ENETC_SIROCT, "SI rx octets" }, 117 { ENETC_SIRFRM, "SI rx frames" }, 118 { ENETC_SIRUCA, "SI rx u-cast frames" }, 119 { ENETC_SIRMCA, "SI rx m-cast frames" }, 120 { ENETC_SITOCT, "SI tx octets" }, 121 { ENETC_SITFRM, "SI tx frames" }, 122 { ENETC_SITUCA, "SI tx u-cast frames" }, 123 { ENETC_SITMCA, "SI tx m-cast frames" }, 124 { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, 125 { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, 126 { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, 127 { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, 128 { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, 129 { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, 130 { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, 131 { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, 132 { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, 133 { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, 134 { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, 135 { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, 136 { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, 137 { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, 138 { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, 139 { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, 140 }; 141 142 static const struct { 143 int reg; 144 char name[ETH_GSTRING_LEN]; 145 } enetc_port_counters[] = { 146 { ENETC_PM_REOCT(0), "MAC rx ethernet octets" }, 147 { ENETC_PM_RALN(0), "MAC rx alignment errors" }, 148 { ENETC_PM_RXPF(0), "MAC rx valid pause frames" }, 149 { ENETC_PM_RFRM(0), "MAC rx valid frames" }, 150 { ENETC_PM_RFCS(0), "MAC rx fcs errors" }, 151 { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" }, 152 { ENETC_PM_RERR(0), "MAC rx frame errors" }, 153 { ENETC_PM_RUCA(0), "MAC rx unicast frames" }, 154 { ENETC_PM_RMCA(0), "MAC rx multicast frames" }, 155 { ENETC_PM_RBCA(0), "MAC rx broadcast frames" }, 156 { ENETC_PM_RDRP(0), "MAC rx dropped packets" }, 157 { ENETC_PM_RPKT(0), "MAC rx packets" }, 158 { ENETC_PM_RUND(0), "MAC rx undersized packets" }, 159 { ENETC_PM_R64(0), "MAC rx 64 byte packets" }, 160 { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" }, 161 { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" }, 162 { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" }, 163 { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" }, 164 { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" }, 165 { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" }, 166 { ENETC_PM_ROVR(0), "MAC rx oversized packets" }, 167 { ENETC_PM_RJBR(0), "MAC rx jabber packets" }, 168 { ENETC_PM_RFRG(0), "MAC rx fragment packets" }, 169 { ENETC_PM_RCNP(0), "MAC rx control packets" }, 170 { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" }, 171 { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" }, 172 { ENETC_PM_TOCT(0), "MAC tx octets" }, 173 { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" }, 174 { ENETC_PM_TXPF(0), "MAC tx valid pause frames" }, 175 { ENETC_PM_TFRM(0), "MAC tx frames" }, 176 { ENETC_PM_TFCS(0), "MAC tx fcs errors" }, 177 { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" }, 178 { ENETC_PM_TERR(0), "MAC tx frame errors" }, 179 { ENETC_PM_TUCA(0), "MAC tx unicast frames" }, 180 { ENETC_PM_TMCA(0), "MAC tx multicast frames" }, 181 { ENETC_PM_TBCA(0), "MAC tx broadcast frames" }, 182 { ENETC_PM_TPKT(0), "MAC tx packets" }, 183 { ENETC_PM_TUND(0), "MAC tx undersized packets" }, 184 { ENETC_PM_T64(0), "MAC tx 64 byte packets" }, 185 { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" }, 186 { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" }, 187 { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" }, 188 { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" }, 189 { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" }, 190 { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" }, 191 { ENETC_PM_TCNP(0), "MAC tx control packets" }, 192 { ENETC_PM_TDFR(0), "MAC tx deferred packets" }, 193 { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" }, 194 { ENETC_PM_TSCOL(0), "MAC tx single collisions" }, 195 { ENETC_PM_TLCOL(0), "MAC tx late collisions" }, 196 { ENETC_PM_TECOL(0), "MAC tx excessive collisions" }, 197 { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, 198 { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, 199 { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, 200 { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, 201 { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, 202 { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, 203 { ENETC_PFDMSAPR, "SI pruning discarded frames" }, 204 { ENETC_PICDR(0), "ICM DR0 discarded frames" }, 205 { ENETC_PICDR(1), "ICM DR1 discarded frames" }, 206 { ENETC_PICDR(2), "ICM DR2 discarded frames" }, 207 { ENETC_PICDR(3), "ICM DR3 discarded frames" }, 208 }; 209 210 static const char rx_ring_stats[][ETH_GSTRING_LEN] = { 211 "Rx ring %2d frames", 212 "Rx ring %2d alloc errors", 213 "Rx ring %2d XDP drops", 214 "Rx ring %2d recycles", 215 "Rx ring %2d recycle failures", 216 "Rx ring %2d redirects", 217 "Rx ring %2d redirect failures", 218 }; 219 220 static const char tx_ring_stats[][ETH_GSTRING_LEN] = { 221 "Tx ring %2d frames", 222 "Tx ring %2d XDP frames", 223 "Tx ring %2d XDP drops", 224 "Tx window drop %2d frames", 225 }; 226 227 static int enetc_get_sset_count(struct net_device *ndev, int sset) 228 { 229 struct enetc_ndev_priv *priv = netdev_priv(ndev); 230 int len; 231 232 if (sset != ETH_SS_STATS) 233 return -EOPNOTSUPP; 234 235 len = ARRAY_SIZE(enetc_si_counters) + 236 ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + 237 ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings; 238 239 if (!enetc_si_is_pf(priv->si)) 240 return len; 241 242 len += ARRAY_SIZE(enetc_port_counters); 243 244 return len; 245 } 246 247 static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 248 { 249 struct enetc_ndev_priv *priv = netdev_priv(ndev); 250 u8 *p = data; 251 int i, j; 252 253 switch (stringset) { 254 case ETH_SS_STATS: 255 for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { 256 strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); 257 p += ETH_GSTRING_LEN; 258 } 259 for (i = 0; i < priv->num_tx_rings; i++) { 260 for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { 261 snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], 262 i); 263 p += ETH_GSTRING_LEN; 264 } 265 } 266 for (i = 0; i < priv->num_rx_rings; i++) { 267 for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { 268 snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], 269 i); 270 p += ETH_GSTRING_LEN; 271 } 272 } 273 274 if (!enetc_si_is_pf(priv->si)) 275 break; 276 277 for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { 278 strscpy(p, enetc_port_counters[i].name, 279 ETH_GSTRING_LEN); 280 p += ETH_GSTRING_LEN; 281 } 282 break; 283 } 284 } 285 286 static void enetc_get_ethtool_stats(struct net_device *ndev, 287 struct ethtool_stats *stats, u64 *data) 288 { 289 struct enetc_ndev_priv *priv = netdev_priv(ndev); 290 struct enetc_hw *hw = &priv->si->hw; 291 int i, o = 0; 292 293 for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) 294 data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); 295 296 for (i = 0; i < priv->num_tx_rings; i++) { 297 data[o++] = priv->tx_ring[i]->stats.packets; 298 data[o++] = priv->tx_ring[i]->stats.xdp_tx; 299 data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; 300 data[o++] = priv->tx_ring[i]->stats.win_drop; 301 } 302 303 for (i = 0; i < priv->num_rx_rings; i++) { 304 data[o++] = priv->rx_ring[i]->stats.packets; 305 data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; 306 data[o++] = priv->rx_ring[i]->stats.xdp_drops; 307 data[o++] = priv->rx_ring[i]->stats.recycles; 308 data[o++] = priv->rx_ring[i]->stats.recycle_failures; 309 data[o++] = priv->rx_ring[i]->stats.xdp_redirect; 310 data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; 311 } 312 313 if (!enetc_si_is_pf(priv->si)) 314 return; 315 316 for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) 317 data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); 318 } 319 320 static void enetc_pause_stats(struct enetc_hw *hw, int mac, 321 struct ethtool_pause_stats *pause_stats) 322 { 323 pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac)); 324 pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac)); 325 } 326 327 static void enetc_get_pause_stats(struct net_device *ndev, 328 struct ethtool_pause_stats *pause_stats) 329 { 330 struct enetc_ndev_priv *priv = netdev_priv(ndev); 331 struct enetc_hw *hw = &priv->si->hw; 332 struct enetc_si *si = priv->si; 333 334 switch (pause_stats->src) { 335 case ETHTOOL_MAC_STATS_SRC_EMAC: 336 enetc_pause_stats(hw, 0, pause_stats); 337 break; 338 case ETHTOOL_MAC_STATS_SRC_PMAC: 339 if (si->hw_features & ENETC_SI_F_QBU) 340 enetc_pause_stats(hw, 1, pause_stats); 341 break; 342 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 343 ethtool_aggregate_pause_stats(ndev, pause_stats); 344 break; 345 } 346 } 347 348 static void enetc_mac_stats(struct enetc_hw *hw, int mac, 349 struct ethtool_eth_mac_stats *s) 350 { 351 s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac)); 352 s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac)); 353 s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac)); 354 s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac)); 355 s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac)); 356 s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac)); 357 s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac)); 358 s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac)); 359 s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac)); 360 s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac)); 361 s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac)); 362 s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac)); 363 s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac)); 364 s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac)); 365 s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac)); 366 s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac)); 367 s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac)); 368 s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac)); 369 } 370 371 static void enetc_ctrl_stats(struct enetc_hw *hw, int mac, 372 struct ethtool_eth_ctrl_stats *s) 373 { 374 s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac)); 375 s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac)); 376 } 377 378 static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = { 379 { 64, 64 }, 380 { 65, 127 }, 381 { 128, 255 }, 382 { 256, 511 }, 383 { 512, 1023 }, 384 { 1024, 1522 }, 385 { 1523, ENETC_MAC_MAXFRM_SIZE }, 386 {}, 387 }; 388 389 static void enetc_rmon_stats(struct enetc_hw *hw, int mac, 390 struct ethtool_rmon_stats *s) 391 { 392 s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac)); 393 s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac)); 394 s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac)); 395 s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac)); 396 397 s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac)); 398 s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac)); 399 s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac)); 400 s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac)); 401 s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac)); 402 s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac)); 403 s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac)); 404 405 s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac)); 406 s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac)); 407 s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac)); 408 s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac)); 409 s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac)); 410 s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac)); 411 s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac)); 412 } 413 414 static void enetc_get_eth_mac_stats(struct net_device *ndev, 415 struct ethtool_eth_mac_stats *mac_stats) 416 { 417 struct enetc_ndev_priv *priv = netdev_priv(ndev); 418 struct enetc_hw *hw = &priv->si->hw; 419 struct enetc_si *si = priv->si; 420 421 switch (mac_stats->src) { 422 case ETHTOOL_MAC_STATS_SRC_EMAC: 423 enetc_mac_stats(hw, 0, mac_stats); 424 break; 425 case ETHTOOL_MAC_STATS_SRC_PMAC: 426 if (si->hw_features & ENETC_SI_F_QBU) 427 enetc_mac_stats(hw, 1, mac_stats); 428 break; 429 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 430 ethtool_aggregate_mac_stats(ndev, mac_stats); 431 break; 432 } 433 } 434 435 static void enetc_get_eth_ctrl_stats(struct net_device *ndev, 436 struct ethtool_eth_ctrl_stats *ctrl_stats) 437 { 438 struct enetc_ndev_priv *priv = netdev_priv(ndev); 439 struct enetc_hw *hw = &priv->si->hw; 440 struct enetc_si *si = priv->si; 441 442 switch (ctrl_stats->src) { 443 case ETHTOOL_MAC_STATS_SRC_EMAC: 444 enetc_ctrl_stats(hw, 0, ctrl_stats); 445 break; 446 case ETHTOOL_MAC_STATS_SRC_PMAC: 447 if (si->hw_features & ENETC_SI_F_QBU) 448 enetc_ctrl_stats(hw, 1, ctrl_stats); 449 break; 450 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 451 ethtool_aggregate_ctrl_stats(ndev, ctrl_stats); 452 break; 453 } 454 } 455 456 static void enetc_get_rmon_stats(struct net_device *ndev, 457 struct ethtool_rmon_stats *rmon_stats, 458 const struct ethtool_rmon_hist_range **ranges) 459 { 460 struct enetc_ndev_priv *priv = netdev_priv(ndev); 461 struct enetc_hw *hw = &priv->si->hw; 462 struct enetc_si *si = priv->si; 463 464 *ranges = enetc_rmon_ranges; 465 466 switch (rmon_stats->src) { 467 case ETHTOOL_MAC_STATS_SRC_EMAC: 468 enetc_rmon_stats(hw, 0, rmon_stats); 469 break; 470 case ETHTOOL_MAC_STATS_SRC_PMAC: 471 if (si->hw_features & ENETC_SI_F_QBU) 472 enetc_rmon_stats(hw, 1, rmon_stats); 473 break; 474 case ETHTOOL_MAC_STATS_SRC_AGGREGATE: 475 ethtool_aggregate_rmon_stats(ndev, rmon_stats); 476 break; 477 } 478 } 479 480 #define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ 481 RXH_IP_DST) 482 #define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) 483 static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) 484 { 485 static const u32 rsshash[] = { 486 [TCP_V4_FLOW] = ENETC_RSSHASH_L4, 487 [UDP_V4_FLOW] = ENETC_RSSHASH_L4, 488 [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, 489 [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, 490 [IPV4_FLOW] = ENETC_RSSHASH_L3, 491 [TCP_V6_FLOW] = ENETC_RSSHASH_L4, 492 [UDP_V6_FLOW] = ENETC_RSSHASH_L4, 493 [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, 494 [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, 495 [IPV6_FLOW] = ENETC_RSSHASH_L3, 496 [ETHER_FLOW] = 0, 497 }; 498 499 if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) 500 return -EINVAL; 501 502 rxnfc->data = rsshash[rxnfc->flow_type]; 503 504 return 0; 505 } 506 507 /* current HW spec does byte reversal on everything including MAC addresses */ 508 static void ether_addr_copy_swap(u8 *dst, const u8 *src) 509 { 510 int i; 511 512 for (i = 0; i < ETH_ALEN; i++) 513 dst[i] = src[ETH_ALEN - i - 1]; 514 } 515 516 static int enetc_set_cls_entry(struct enetc_si *si, 517 struct ethtool_rx_flow_spec *fs, bool en) 518 { 519 struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; 520 struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; 521 struct ethhdr *eth_h, *eth_m; 522 struct enetc_cmd_rfse rfse = { {0} }; 523 524 if (!en) 525 goto done; 526 527 switch (fs->flow_type & 0xff) { 528 case TCP_V4_FLOW: 529 l4ip4_h = &fs->h_u.tcp_ip4_spec; 530 l4ip4_m = &fs->m_u.tcp_ip4_spec; 531 goto l4ip4; 532 case UDP_V4_FLOW: 533 l4ip4_h = &fs->h_u.udp_ip4_spec; 534 l4ip4_m = &fs->m_u.udp_ip4_spec; 535 goto l4ip4; 536 case SCTP_V4_FLOW: 537 l4ip4_h = &fs->h_u.sctp_ip4_spec; 538 l4ip4_m = &fs->m_u.sctp_ip4_spec; 539 l4ip4: 540 rfse.sip_h[0] = l4ip4_h->ip4src; 541 rfse.sip_m[0] = l4ip4_m->ip4src; 542 rfse.dip_h[0] = l4ip4_h->ip4dst; 543 rfse.dip_m[0] = l4ip4_m->ip4dst; 544 rfse.sport_h = ntohs(l4ip4_h->psrc); 545 rfse.sport_m = ntohs(l4ip4_m->psrc); 546 rfse.dport_h = ntohs(l4ip4_h->pdst); 547 rfse.dport_m = ntohs(l4ip4_m->pdst); 548 if (l4ip4_m->tos) 549 netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); 550 rfse.ethtype_h = ETH_P_IP; /* IPv4 */ 551 rfse.ethtype_m = 0xffff; 552 break; 553 case IP_USER_FLOW: 554 l3ip4_h = &fs->h_u.usr_ip4_spec; 555 l3ip4_m = &fs->m_u.usr_ip4_spec; 556 557 rfse.sip_h[0] = l3ip4_h->ip4src; 558 rfse.sip_m[0] = l3ip4_m->ip4src; 559 rfse.dip_h[0] = l3ip4_h->ip4dst; 560 rfse.dip_m[0] = l3ip4_m->ip4dst; 561 if (l3ip4_m->tos) 562 netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); 563 rfse.ethtype_h = ETH_P_IP; /* IPv4 */ 564 rfse.ethtype_m = 0xffff; 565 break; 566 case ETHER_FLOW: 567 eth_h = &fs->h_u.ether_spec; 568 eth_m = &fs->m_u.ether_spec; 569 570 ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); 571 ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); 572 ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); 573 ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); 574 rfse.ethtype_h = ntohs(eth_h->h_proto); 575 rfse.ethtype_m = ntohs(eth_m->h_proto); 576 break; 577 default: 578 return -EOPNOTSUPP; 579 } 580 581 rfse.mode |= ENETC_RFSE_EN; 582 if (fs->ring_cookie != RX_CLS_FLOW_DISC) { 583 rfse.mode |= ENETC_RFSE_MODE_BD; 584 rfse.result = fs->ring_cookie; 585 } 586 done: 587 return enetc_set_fs_entry(si, &rfse, fs->location); 588 } 589 590 static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, 591 u32 *rule_locs) 592 { 593 struct enetc_ndev_priv *priv = netdev_priv(ndev); 594 int i, j; 595 596 switch (rxnfc->cmd) { 597 case ETHTOOL_GRXRINGS: 598 rxnfc->data = priv->num_rx_rings; 599 break; 600 case ETHTOOL_GRXFH: 601 /* get RSS hash config */ 602 return enetc_get_rsshash(rxnfc); 603 case ETHTOOL_GRXCLSRLCNT: 604 /* total number of entries */ 605 rxnfc->data = priv->si->num_fs_entries; 606 /* number of entries in use */ 607 rxnfc->rule_cnt = 0; 608 for (i = 0; i < priv->si->num_fs_entries; i++) 609 if (priv->cls_rules[i].used) 610 rxnfc->rule_cnt++; 611 break; 612 case ETHTOOL_GRXCLSRULE: 613 if (rxnfc->fs.location >= priv->si->num_fs_entries) 614 return -EINVAL; 615 616 /* get entry x */ 617 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; 618 break; 619 case ETHTOOL_GRXCLSRLALL: 620 /* total number of entries */ 621 rxnfc->data = priv->si->num_fs_entries; 622 /* array of indexes of used entries */ 623 j = 0; 624 for (i = 0; i < priv->si->num_fs_entries; i++) { 625 if (!priv->cls_rules[i].used) 626 continue; 627 if (j == rxnfc->rule_cnt) 628 return -EMSGSIZE; 629 rule_locs[j++] = i; 630 } 631 /* number of entries in use */ 632 rxnfc->rule_cnt = j; 633 break; 634 default: 635 return -EOPNOTSUPP; 636 } 637 638 return 0; 639 } 640 641 static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) 642 { 643 struct enetc_ndev_priv *priv = netdev_priv(ndev); 644 int err; 645 646 switch (rxnfc->cmd) { 647 case ETHTOOL_SRXCLSRLINS: 648 if (rxnfc->fs.location >= priv->si->num_fs_entries) 649 return -EINVAL; 650 651 if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && 652 rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) 653 return -EINVAL; 654 655 err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); 656 if (err) 657 return err; 658 priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; 659 priv->cls_rules[rxnfc->fs.location].used = 1; 660 break; 661 case ETHTOOL_SRXCLSRLDEL: 662 if (rxnfc->fs.location >= priv->si->num_fs_entries) 663 return -EINVAL; 664 665 err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); 666 if (err) 667 return err; 668 priv->cls_rules[rxnfc->fs.location].used = 0; 669 break; 670 default: 671 return -EOPNOTSUPP; 672 } 673 674 return 0; 675 } 676 677 static u32 enetc_get_rxfh_key_size(struct net_device *ndev) 678 { 679 struct enetc_ndev_priv *priv = netdev_priv(ndev); 680 681 /* return the size of the RX flow hash key. PF only */ 682 return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; 683 } 684 685 static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) 686 { 687 struct enetc_ndev_priv *priv = netdev_priv(ndev); 688 689 /* return the size of the RX flow hash indirection table */ 690 return priv->si->num_rss; 691 } 692 693 static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, 694 u8 *hfunc) 695 { 696 struct enetc_ndev_priv *priv = netdev_priv(ndev); 697 struct enetc_hw *hw = &priv->si->hw; 698 int err = 0, i; 699 700 /* return hash function */ 701 if (hfunc) 702 *hfunc = ETH_RSS_HASH_TOP; 703 704 /* return hash key */ 705 if (key && hw->port) 706 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) 707 ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); 708 709 /* return RSS table */ 710 if (indir) 711 err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); 712 713 return err; 714 } 715 716 void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) 717 { 718 int i; 719 720 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) 721 enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); 722 } 723 EXPORT_SYMBOL_GPL(enetc_set_rss_key); 724 725 static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, 726 const u8 *key, const u8 hfunc) 727 { 728 struct enetc_ndev_priv *priv = netdev_priv(ndev); 729 struct enetc_hw *hw = &priv->si->hw; 730 int err = 0; 731 732 /* set hash key, if PF */ 733 if (key && hw->port) 734 enetc_set_rss_key(hw, key); 735 736 /* set RSS table */ 737 if (indir) 738 err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); 739 740 return err; 741 } 742 743 static void enetc_get_ringparam(struct net_device *ndev, 744 struct ethtool_ringparam *ring, 745 struct kernel_ethtool_ringparam *kernel_ring, 746 struct netlink_ext_ack *extack) 747 { 748 struct enetc_ndev_priv *priv = netdev_priv(ndev); 749 750 ring->rx_pending = priv->rx_bd_count; 751 ring->tx_pending = priv->tx_bd_count; 752 753 /* do some h/w sanity checks for BDR length */ 754 if (netif_running(ndev)) { 755 struct enetc_hw *hw = &priv->si->hw; 756 u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); 757 758 if (val != priv->rx_bd_count) 759 netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); 760 761 val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); 762 763 if (val != priv->tx_bd_count) 764 netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); 765 } 766 } 767 768 static int enetc_get_coalesce(struct net_device *ndev, 769 struct ethtool_coalesce *ic, 770 struct kernel_ethtool_coalesce *kernel_coal, 771 struct netlink_ext_ack *extack) 772 { 773 struct enetc_ndev_priv *priv = netdev_priv(ndev); 774 struct enetc_int_vector *v = priv->int_vector[0]; 775 776 ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); 777 ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); 778 779 ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; 780 ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; 781 782 ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE; 783 784 return 0; 785 } 786 787 static int enetc_set_coalesce(struct net_device *ndev, 788 struct ethtool_coalesce *ic, 789 struct kernel_ethtool_coalesce *kernel_coal, 790 struct netlink_ext_ack *extack) 791 { 792 struct enetc_ndev_priv *priv = netdev_priv(ndev); 793 u32 rx_ictt, tx_ictt; 794 int i, ic_mode; 795 bool changed; 796 797 tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); 798 rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); 799 800 if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) 801 return -EOPNOTSUPP; 802 803 if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR) 804 return -EOPNOTSUPP; 805 806 ic_mode = ENETC_IC_NONE; 807 if (ic->use_adaptive_rx_coalesce) { 808 ic_mode |= ENETC_IC_RX_ADAPTIVE; 809 rx_ictt = 0x1; 810 } else { 811 ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; 812 } 813 814 ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; 815 816 /* commit the settings */ 817 changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt); 818 819 priv->ic_mode = ic_mode; 820 priv->tx_ictt = tx_ictt; 821 822 for (i = 0; i < priv->bdr_int_num; i++) { 823 struct enetc_int_vector *v = priv->int_vector[i]; 824 825 v->rx_ictt = rx_ictt; 826 v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE); 827 } 828 829 if (netif_running(ndev) && changed) { 830 /* reconfigure the operation mode of h/w interrupts, 831 * traffic needs to be paused in the process 832 */ 833 enetc_stop(ndev); 834 enetc_start(ndev); 835 } 836 837 return 0; 838 } 839 840 static int enetc_get_ts_info(struct net_device *ndev, 841 struct ethtool_ts_info *info) 842 { 843 int *phc_idx; 844 845 phc_idx = symbol_get(enetc_phc_index); 846 if (phc_idx) { 847 info->phc_index = *phc_idx; 848 symbol_put(enetc_phc_index); 849 } else { 850 info->phc_index = -1; 851 } 852 853 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK 854 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 855 SOF_TIMESTAMPING_RX_HARDWARE | 856 SOF_TIMESTAMPING_RAW_HARDWARE | 857 SOF_TIMESTAMPING_TX_SOFTWARE | 858 SOF_TIMESTAMPING_RX_SOFTWARE | 859 SOF_TIMESTAMPING_SOFTWARE; 860 861 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 862 (1 << HWTSTAMP_TX_ON) | 863 (1 << HWTSTAMP_TX_ONESTEP_SYNC); 864 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 865 (1 << HWTSTAMP_FILTER_ALL); 866 #else 867 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 868 SOF_TIMESTAMPING_TX_SOFTWARE | 869 SOF_TIMESTAMPING_SOFTWARE; 870 #endif 871 return 0; 872 } 873 874 static void enetc_get_wol(struct net_device *dev, 875 struct ethtool_wolinfo *wol) 876 { 877 wol->supported = 0; 878 wol->wolopts = 0; 879 880 if (dev->phydev) 881 phy_ethtool_get_wol(dev->phydev, wol); 882 } 883 884 static int enetc_set_wol(struct net_device *dev, 885 struct ethtool_wolinfo *wol) 886 { 887 int ret; 888 889 if (!dev->phydev) 890 return -EOPNOTSUPP; 891 892 ret = phy_ethtool_set_wol(dev->phydev, wol); 893 if (!ret) 894 device_set_wakeup_enable(&dev->dev, wol->wolopts); 895 896 return ret; 897 } 898 899 static void enetc_get_pauseparam(struct net_device *dev, 900 struct ethtool_pauseparam *pause) 901 { 902 struct enetc_ndev_priv *priv = netdev_priv(dev); 903 904 phylink_ethtool_get_pauseparam(priv->phylink, pause); 905 } 906 907 static int enetc_set_pauseparam(struct net_device *dev, 908 struct ethtool_pauseparam *pause) 909 { 910 struct enetc_ndev_priv *priv = netdev_priv(dev); 911 912 return phylink_ethtool_set_pauseparam(priv->phylink, pause); 913 } 914 915 static int enetc_get_link_ksettings(struct net_device *dev, 916 struct ethtool_link_ksettings *cmd) 917 { 918 struct enetc_ndev_priv *priv = netdev_priv(dev); 919 920 if (!priv->phylink) 921 return -EOPNOTSUPP; 922 923 return phylink_ethtool_ksettings_get(priv->phylink, cmd); 924 } 925 926 static int enetc_set_link_ksettings(struct net_device *dev, 927 const struct ethtool_link_ksettings *cmd) 928 { 929 struct enetc_ndev_priv *priv = netdev_priv(dev); 930 931 if (!priv->phylink) 932 return -EOPNOTSUPP; 933 934 return phylink_ethtool_ksettings_set(priv->phylink, cmd); 935 } 936 937 static void enetc_get_mm_stats(struct net_device *ndev, 938 struct ethtool_mm_stats *s) 939 { 940 struct enetc_ndev_priv *priv = netdev_priv(ndev); 941 struct enetc_hw *hw = &priv->si->hw; 942 struct enetc_si *si = priv->si; 943 944 if (!(si->hw_features & ENETC_SI_F_QBU)) 945 return; 946 947 s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR); 948 s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR); 949 s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR); 950 s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR); 951 s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR); 952 s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR); 953 } 954 955 static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state) 956 { 957 struct enetc_ndev_priv *priv = netdev_priv(ndev); 958 struct enetc_si *si = priv->si; 959 struct enetc_hw *hw = &si->hw; 960 u32 lafs, rafs, val; 961 962 if (!(si->hw_features & ENETC_SI_F_QBU)) 963 return -EOPNOTSUPP; 964 965 mutex_lock(&priv->mm_lock); 966 967 val = enetc_port_rd(hw, ENETC_PFPMR); 968 state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE); 969 970 val = enetc_port_rd(hw, ENETC_MMCSR); 971 972 switch (ENETC_MMCSR_GET_VSTS(val)) { 973 case 0: 974 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; 975 break; 976 case 2: 977 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; 978 break; 979 case 3: 980 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; 981 break; 982 case 4: 983 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED; 984 break; 985 case 5: 986 default: 987 state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN; 988 break; 989 } 990 991 rafs = ENETC_MMCSR_GET_RAFS(val); 992 state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs); 993 lafs = ENETC_MMCSR_GET_LAFS(val); 994 state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs); 995 state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */ 996 state->tx_active = state->tx_enabled && 997 (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || 998 state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED); 999 state->verify_enabled = !(val & ENETC_MMCSR_VDIS); 1000 state->verify_time = ENETC_MMCSR_GET_VT(val); 1001 /* A verifyTime of 128 ms would exceed the 7 bit width 1002 * of the ENETC_MMCSR_VT field 1003 */ 1004 state->max_verify_time = 127; 1005 1006 mutex_unlock(&priv->mm_lock); 1007 1008 return 0; 1009 } 1010 1011 static int enetc_mm_wait_tx_active(struct enetc_hw *hw, int verify_time) 1012 { 1013 int timeout = verify_time * USEC_PER_MSEC * ENETC_MM_VERIFY_RETRIES; 1014 u32 val; 1015 1016 /* This will time out after the standard value of 3 verification 1017 * attempts. To not sleep forever, it relies on a non-zero verify_time, 1018 * guarantee which is provided by the ethtool nlattr policy. 1019 */ 1020 return read_poll_timeout(enetc_port_rd, val, 1021 ENETC_MMCSR_GET_VSTS(val) == 3, 1022 ENETC_MM_VERIFY_SLEEP_US, timeout, 1023 true, hw, ENETC_MMCSR); 1024 } 1025 1026 static void enetc_set_ptcfpr(struct enetc_hw *hw, u8 preemptible_tcs) 1027 { 1028 u32 val; 1029 int tc; 1030 1031 for (tc = 0; tc < 8; tc++) { 1032 val = enetc_port_rd(hw, ENETC_PTCFPR(tc)); 1033 1034 if (preemptible_tcs & BIT(tc)) 1035 val |= ENETC_PTCFPR_FPE; 1036 else 1037 val &= ~ENETC_PTCFPR_FPE; 1038 1039 enetc_port_wr(hw, ENETC_PTCFPR(tc), val); 1040 } 1041 } 1042 1043 /* ENETC does not have an IRQ to notify changes to the MAC Merge TX status 1044 * (active/inactive), but the preemptible traffic classes should only be 1045 * committed to hardware once TX is active. Resort to polling. 1046 */ 1047 void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv) 1048 { 1049 struct enetc_hw *hw = &priv->si->hw; 1050 u8 preemptible_tcs = 0; 1051 u32 val; 1052 int err; 1053 1054 val = enetc_port_rd(hw, ENETC_MMCSR); 1055 if (!(val & ENETC_MMCSR_ME)) 1056 goto out; 1057 1058 if (!(val & ENETC_MMCSR_VDIS)) { 1059 err = enetc_mm_wait_tx_active(hw, ENETC_MMCSR_GET_VT(val)); 1060 if (err) 1061 goto out; 1062 } 1063 1064 preemptible_tcs = priv->preemptible_tcs; 1065 out: 1066 enetc_set_ptcfpr(hw, preemptible_tcs); 1067 } 1068 1069 /* FIXME: Workaround for the link partner's verification failing if ENETC 1070 * priorly received too much express traffic. The documentation doesn't 1071 * suggest this is needed. 1072 */ 1073 static void enetc_restart_emac_rx(struct enetc_si *si) 1074 { 1075 u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG); 1076 1077 enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN); 1078 1079 if (val & ENETC_PM0_RX_EN) 1080 enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val); 1081 } 1082 1083 static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, 1084 struct netlink_ext_ack *extack) 1085 { 1086 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1087 struct enetc_hw *hw = &priv->si->hw; 1088 struct enetc_si *si = priv->si; 1089 u32 val, add_frag_size; 1090 int err; 1091 1092 if (!(si->hw_features & ENETC_SI_F_QBU)) 1093 return -EOPNOTSUPP; 1094 1095 err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, 1096 &add_frag_size, extack); 1097 if (err) 1098 return err; 1099 1100 mutex_lock(&priv->mm_lock); 1101 1102 val = enetc_port_rd(hw, ENETC_PFPMR); 1103 if (cfg->pmac_enabled) 1104 val |= ENETC_PFPMR_PMACE; 1105 else 1106 val &= ~ENETC_PFPMR_PMACE; 1107 enetc_port_wr(hw, ENETC_PFPMR, val); 1108 1109 val = enetc_port_rd(hw, ENETC_MMCSR); 1110 1111 if (cfg->verify_enabled) 1112 val &= ~ENETC_MMCSR_VDIS; 1113 else 1114 val |= ENETC_MMCSR_VDIS; 1115 1116 if (cfg->tx_enabled) 1117 priv->active_offloads |= ENETC_F_QBU; 1118 else 1119 priv->active_offloads &= ~ENETC_F_QBU; 1120 1121 /* If link is up, enable/disable MAC Merge right away */ 1122 if (!(val & ENETC_MMCSR_LINK_FAIL)) { 1123 if (!!(priv->active_offloads & ENETC_F_QBU)) 1124 val |= ENETC_MMCSR_ME; 1125 else 1126 val &= ~ENETC_MMCSR_ME; 1127 } 1128 1129 val &= ~ENETC_MMCSR_VT_MASK; 1130 val |= ENETC_MMCSR_VT(cfg->verify_time); 1131 1132 val &= ~ENETC_MMCSR_RAFS_MASK; 1133 val |= ENETC_MMCSR_RAFS(add_frag_size); 1134 1135 enetc_port_wr(hw, ENETC_MMCSR, val); 1136 1137 enetc_restart_emac_rx(priv->si); 1138 1139 enetc_mm_commit_preemptible_tcs(priv); 1140 1141 mutex_unlock(&priv->mm_lock); 1142 1143 return 0; 1144 } 1145 1146 /* When the link is lost, the verification state machine goes to the FAILED 1147 * state and doesn't restart on its own after a new link up event. 1148 * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit 1149 * should have been sufficient to re-trigger verification, but for ENETC it 1150 * doesn't. As a workaround, we need to toggle the Merge Enable bit to 1151 * re-trigger verification when link comes up. 1152 */ 1153 void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link) 1154 { 1155 struct enetc_hw *hw = &priv->si->hw; 1156 u32 val; 1157 1158 mutex_lock(&priv->mm_lock); 1159 1160 val = enetc_port_rd(hw, ENETC_MMCSR); 1161 1162 if (link) { 1163 val &= ~ENETC_MMCSR_LINK_FAIL; 1164 if (priv->active_offloads & ENETC_F_QBU) 1165 val |= ENETC_MMCSR_ME; 1166 } else { 1167 val |= ENETC_MMCSR_LINK_FAIL; 1168 if (priv->active_offloads & ENETC_F_QBU) 1169 val &= ~ENETC_MMCSR_ME; 1170 } 1171 1172 enetc_port_wr(hw, ENETC_MMCSR, val); 1173 1174 enetc_mm_commit_preemptible_tcs(priv); 1175 1176 mutex_unlock(&priv->mm_lock); 1177 } 1178 EXPORT_SYMBOL_GPL(enetc_mm_link_state_update); 1179 1180 static const struct ethtool_ops enetc_pf_ethtool_ops = { 1181 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1182 ETHTOOL_COALESCE_MAX_FRAMES | 1183 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1184 .get_regs_len = enetc_get_reglen, 1185 .get_regs = enetc_get_regs, 1186 .get_sset_count = enetc_get_sset_count, 1187 .get_strings = enetc_get_strings, 1188 .get_ethtool_stats = enetc_get_ethtool_stats, 1189 .get_pause_stats = enetc_get_pause_stats, 1190 .get_rmon_stats = enetc_get_rmon_stats, 1191 .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats, 1192 .get_eth_mac_stats = enetc_get_eth_mac_stats, 1193 .get_rxnfc = enetc_get_rxnfc, 1194 .set_rxnfc = enetc_set_rxnfc, 1195 .get_rxfh_key_size = enetc_get_rxfh_key_size, 1196 .get_rxfh_indir_size = enetc_get_rxfh_indir_size, 1197 .get_rxfh = enetc_get_rxfh, 1198 .set_rxfh = enetc_set_rxfh, 1199 .get_ringparam = enetc_get_ringparam, 1200 .get_coalesce = enetc_get_coalesce, 1201 .set_coalesce = enetc_set_coalesce, 1202 .get_link_ksettings = enetc_get_link_ksettings, 1203 .set_link_ksettings = enetc_set_link_ksettings, 1204 .get_link = ethtool_op_get_link, 1205 .get_ts_info = enetc_get_ts_info, 1206 .get_wol = enetc_get_wol, 1207 .set_wol = enetc_set_wol, 1208 .get_pauseparam = enetc_get_pauseparam, 1209 .set_pauseparam = enetc_set_pauseparam, 1210 .get_mm = enetc_get_mm, 1211 .set_mm = enetc_set_mm, 1212 .get_mm_stats = enetc_get_mm_stats, 1213 }; 1214 1215 static const struct ethtool_ops enetc_vf_ethtool_ops = { 1216 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1217 ETHTOOL_COALESCE_MAX_FRAMES | 1218 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1219 .get_regs_len = enetc_get_reglen, 1220 .get_regs = enetc_get_regs, 1221 .get_sset_count = enetc_get_sset_count, 1222 .get_strings = enetc_get_strings, 1223 .get_ethtool_stats = enetc_get_ethtool_stats, 1224 .get_rxnfc = enetc_get_rxnfc, 1225 .set_rxnfc = enetc_set_rxnfc, 1226 .get_rxfh_indir_size = enetc_get_rxfh_indir_size, 1227 .get_rxfh = enetc_get_rxfh, 1228 .set_rxfh = enetc_set_rxfh, 1229 .get_ringparam = enetc_get_ringparam, 1230 .get_coalesce = enetc_get_coalesce, 1231 .set_coalesce = enetc_set_coalesce, 1232 .get_link = ethtool_op_get_link, 1233 .get_ts_info = enetc_get_ts_info, 1234 }; 1235 1236 void enetc_set_ethtool_ops(struct net_device *ndev) 1237 { 1238 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1239 1240 if (enetc_si_is_pf(priv->si)) 1241 ndev->ethtool_ops = &enetc_pf_ethtool_ops; 1242 else 1243 ndev->ethtool_ops = &enetc_vf_ethtool_ops; 1244 } 1245 EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops); 1246