1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Broadcom BCM7xxx System Port Ethernet MAC driver 4 * 5 * Copyright (C) 2014 Broadcom Corporation 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/netdevice.h> 15 #include <linux/dsa/brcm.h> 16 #include <linux/etherdevice.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_net.h> 20 #include <linux/of_mdio.h> 21 #include <linux/phy.h> 22 #include <linux/phy_fixed.h> 23 #include <net/dsa.h> 24 #include <linux/clk.h> 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 28 #include "bcmsysport.h" 29 30 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 31 * same layout, except it has been moved by 4 bytes up, *sigh* 32 */ 33 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 34 { 35 if (priv->is_lite && off >= RDMA_STATUS) 36 off += 4; 37 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); 38 } 39 40 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 41 { 42 if (priv->is_lite && off >= RDMA_STATUS) 43 off += 4; 44 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 45 } 46 47 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 48 { 49 if (!priv->is_lite) { 50 return BIT(bit); 51 } else { 52 if (bit >= ACB_ALGO) 53 return BIT(bit + 1); 54 else 55 return BIT(bit); 56 } 57 } 58 59 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 60 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 61 */ 62 #define BCM_SYSPORT_INTR_L2(which) \ 63 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 64 u32 mask) \ 65 { \ 66 priv->irq##which##_mask &= ~(mask); \ 67 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 68 } \ 69 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 70 u32 mask) \ 71 { \ 72 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 73 priv->irq##which##_mask |= (mask); \ 74 } \ 75 76 BCM_SYSPORT_INTR_L2(0) 77 BCM_SYSPORT_INTR_L2(1) 78 79 /* Register accesses to GISB/RBUS registers are expensive (few hundred 80 * nanoseconds), so keep the check for 64-bits explicit here to save 81 * one register write per-packet on 32-bits platforms. 82 */ 83 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 84 void __iomem *d, 85 dma_addr_t addr) 86 { 87 #ifdef CONFIG_PHYS_ADDR_T_64BIT 88 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 89 d + DESC_ADDR_HI_STATUS_LEN); 90 #endif 91 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); 92 } 93 94 /* Ethtool operations */ 95 static void bcm_sysport_set_rx_csum(struct net_device *dev, 96 netdev_features_t wanted) 97 { 98 struct bcm_sysport_priv *priv = netdev_priv(dev); 99 u32 reg; 100 101 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 102 reg = rxchk_readl(priv, RXCHK_CONTROL); 103 /* Clear L2 header checks, which would prevent BPDUs 104 * from being received. 105 */ 106 reg &= ~RXCHK_L2_HDR_DIS; 107 if (priv->rx_chk_en) 108 reg |= RXCHK_EN; 109 else 110 reg &= ~RXCHK_EN; 111 112 /* If UniMAC forwards CRC, we need to skip over it to get 113 * a valid CHK bit to be set in the per-packet status word 114 */ 115 if (priv->rx_chk_en && priv->crc_fwd) 116 reg |= RXCHK_SKIP_FCS; 117 else 118 reg &= ~RXCHK_SKIP_FCS; 119 120 /* If Broadcom tags are enabled (e.g: using a switch), make 121 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 122 * tag after the Ethernet MAC Source Address. 123 */ 124 if (netdev_uses_dsa(dev)) 125 reg |= RXCHK_BRCM_TAG_EN; 126 else 127 reg &= ~RXCHK_BRCM_TAG_EN; 128 129 rxchk_writel(priv, reg, RXCHK_CONTROL); 130 } 131 132 static void bcm_sysport_set_tx_csum(struct net_device *dev, 133 netdev_features_t wanted) 134 { 135 struct bcm_sysport_priv *priv = netdev_priv(dev); 136 u32 reg; 137 138 /* Hardware transmit checksum requires us to enable the Transmit status 139 * block prepended to the packet contents 140 */ 141 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 142 NETIF_F_HW_VLAN_CTAG_TX)); 143 reg = tdma_readl(priv, TDMA_CONTROL); 144 if (priv->tsb_en) 145 reg |= tdma_control_bit(priv, TSB_EN); 146 else 147 reg &= ~tdma_control_bit(priv, TSB_EN); 148 /* Indicating that software inserts Broadcom tags is needed for the TX 149 * checksum to be computed correctly when using VLAN HW acceleration, 150 * else it has no effect, so it can always be turned on. 151 */ 152 if (netdev_uses_dsa(dev)) 153 reg |= tdma_control_bit(priv, SW_BRCM_TAG); 154 else 155 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG); 156 tdma_writel(priv, reg, TDMA_CONTROL); 157 158 /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */ 159 if (wanted & NETIF_F_HW_VLAN_CTAG_TX) 160 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID); 161 } 162 163 static int bcm_sysport_set_features(struct net_device *dev, 164 netdev_features_t features) 165 { 166 struct bcm_sysport_priv *priv = netdev_priv(dev); 167 int ret; 168 169 ret = clk_prepare_enable(priv->clk); 170 if (ret) 171 return ret; 172 173 /* Read CRC forward */ 174 if (!priv->is_lite) 175 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 176 else 177 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & 178 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); 179 180 bcm_sysport_set_rx_csum(dev, features); 181 bcm_sysport_set_tx_csum(dev, features); 182 183 clk_disable_unprepare(priv->clk); 184 185 return 0; 186 } 187 188 /* Hardware counters must be kept in sync because the order/offset 189 * is important here (order in structure declaration = order in hardware) 190 */ 191 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 192 /* general stats */ 193 STAT_NETDEV64(rx_packets), 194 STAT_NETDEV64(tx_packets), 195 STAT_NETDEV64(rx_bytes), 196 STAT_NETDEV64(tx_bytes), 197 STAT_NETDEV(rx_errors), 198 STAT_NETDEV(tx_errors), 199 STAT_NETDEV(rx_dropped), 200 STAT_NETDEV(tx_dropped), 201 STAT_NETDEV(multicast), 202 /* UniMAC RSV counters */ 203 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 204 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 205 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 206 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 207 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 208 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 209 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 210 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 211 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 212 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 213 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 214 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 215 STAT_MIB_RX("rx_multicast", mib.rx.mca), 216 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 217 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 218 STAT_MIB_RX("rx_control", mib.rx.cf), 219 STAT_MIB_RX("rx_pause", mib.rx.pf), 220 STAT_MIB_RX("rx_unknown", mib.rx.uo), 221 STAT_MIB_RX("rx_align", mib.rx.aln), 222 STAT_MIB_RX("rx_outrange", mib.rx.flr), 223 STAT_MIB_RX("rx_code", mib.rx.cde), 224 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 225 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 226 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 227 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 228 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 229 STAT_MIB_RX("rx_unicast", mib.rx.uc), 230 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 231 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 232 /* UniMAC TSV counters */ 233 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 234 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 235 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 236 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 237 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 238 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 239 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 240 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 241 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 242 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 243 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 244 STAT_MIB_TX("tx_multicast", mib.tx.mca), 245 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 246 STAT_MIB_TX("tx_pause", mib.tx.pf), 247 STAT_MIB_TX("tx_control", mib.tx.cf), 248 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 249 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 250 STAT_MIB_TX("tx_defer", mib.tx.drf), 251 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 252 STAT_MIB_TX("tx_single_col", mib.tx.scl), 253 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 254 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 255 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 256 STAT_MIB_TX("tx_frags", mib.tx.frg), 257 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 258 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 259 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 260 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 261 STAT_MIB_TX("tx_unicast", mib.tx.uc), 262 /* UniMAC RUNT counters */ 263 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 264 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 265 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 266 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 267 /* RXCHK misc statistics */ 268 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 269 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 270 RXCHK_OTHER_DISC_CNTR), 271 /* RBUF misc statistics */ 272 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 273 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 274 /* RDMA misc statistics */ 275 STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR), 276 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), 280 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), 281 /* Per TX-queue statistics are dynamically appended */ 282 }; 283 284 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 285 286 static void bcm_sysport_get_drvinfo(struct net_device *dev, 287 struct ethtool_drvinfo *info) 288 { 289 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 290 strscpy(info->bus_info, "platform", sizeof(info->bus_info)); 291 } 292 293 static u32 bcm_sysport_get_msglvl(struct net_device *dev) 294 { 295 struct bcm_sysport_priv *priv = netdev_priv(dev); 296 297 return priv->msg_enable; 298 } 299 300 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 301 { 302 struct bcm_sysport_priv *priv = netdev_priv(dev); 303 304 priv->msg_enable = enable; 305 } 306 307 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 308 { 309 switch (type) { 310 case BCM_SYSPORT_STAT_NETDEV: 311 case BCM_SYSPORT_STAT_NETDEV64: 312 case BCM_SYSPORT_STAT_RXCHK: 313 case BCM_SYSPORT_STAT_RBUF: 314 case BCM_SYSPORT_STAT_RDMA: 315 case BCM_SYSPORT_STAT_SOFT: 316 return true; 317 default: 318 return false; 319 } 320 } 321 322 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 323 { 324 struct bcm_sysport_priv *priv = netdev_priv(dev); 325 const struct bcm_sysport_stats *s; 326 unsigned int i, j; 327 328 switch (string_set) { 329 case ETH_SS_STATS: 330 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 331 s = &bcm_sysport_gstrings_stats[i]; 332 if (priv->is_lite && 333 !bcm_sysport_lite_stat_valid(s->type)) 334 continue; 335 j++; 336 } 337 /* Include per-queue statistics */ 338 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 339 default: 340 return -EOPNOTSUPP; 341 } 342 } 343 344 static void bcm_sysport_get_strings(struct net_device *dev, 345 u32 stringset, u8 *data) 346 { 347 struct bcm_sysport_priv *priv = netdev_priv(dev); 348 const struct bcm_sysport_stats *s; 349 char buf[128]; 350 int i, j; 351 352 switch (stringset) { 353 case ETH_SS_STATS: 354 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 355 s = &bcm_sysport_gstrings_stats[i]; 356 if (priv->is_lite && 357 !bcm_sysport_lite_stat_valid(s->type)) 358 continue; 359 360 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 361 ETH_GSTRING_LEN); 362 j++; 363 } 364 365 for (i = 0; i < dev->num_tx_queues; i++) { 366 snprintf(buf, sizeof(buf), "txq%d_packets", i); 367 memcpy(data + j * ETH_GSTRING_LEN, buf, 368 ETH_GSTRING_LEN); 369 j++; 370 371 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 372 memcpy(data + j * ETH_GSTRING_LEN, buf, 373 ETH_GSTRING_LEN); 374 j++; 375 } 376 break; 377 default: 378 break; 379 } 380 } 381 382 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 383 { 384 int i, j = 0; 385 386 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 387 const struct bcm_sysport_stats *s; 388 u8 offset = 0; 389 u32 val = 0; 390 char *p; 391 392 s = &bcm_sysport_gstrings_stats[i]; 393 switch (s->type) { 394 case BCM_SYSPORT_STAT_NETDEV: 395 case BCM_SYSPORT_STAT_NETDEV64: 396 case BCM_SYSPORT_STAT_SOFT: 397 continue; 398 case BCM_SYSPORT_STAT_MIB_RX: 399 case BCM_SYSPORT_STAT_MIB_TX: 400 case BCM_SYSPORT_STAT_RUNT: 401 if (priv->is_lite) 402 continue; 403 404 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 405 offset = UMAC_MIB_STAT_OFFSET; 406 val = umac_readl(priv, UMAC_MIB_START + j + offset); 407 break; 408 case BCM_SYSPORT_STAT_RXCHK: 409 val = rxchk_readl(priv, s->reg_offset); 410 if (val == ~0) 411 rxchk_writel(priv, 0, s->reg_offset); 412 break; 413 case BCM_SYSPORT_STAT_RBUF: 414 val = rbuf_readl(priv, s->reg_offset); 415 if (val == ~0) 416 rbuf_writel(priv, 0, s->reg_offset); 417 break; 418 case BCM_SYSPORT_STAT_RDMA: 419 if (!priv->is_lite) 420 continue; 421 422 val = rdma_readl(priv, s->reg_offset); 423 if (val == ~0) 424 rdma_writel(priv, 0, s->reg_offset); 425 break; 426 } 427 428 j += s->stat_sizeof; 429 p = (char *)priv + s->stat_offset; 430 *(u32 *)p = val; 431 } 432 433 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 434 } 435 436 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, 437 u64 *tx_bytes, u64 *tx_packets) 438 { 439 struct bcm_sysport_tx_ring *ring; 440 u64 bytes = 0, packets = 0; 441 unsigned int start; 442 unsigned int q; 443 444 for (q = 0; q < priv->netdev->num_tx_queues; q++) { 445 ring = &priv->tx_rings[q]; 446 do { 447 start = u64_stats_fetch_begin(&priv->syncp); 448 bytes = ring->bytes; 449 packets = ring->packets; 450 } while (u64_stats_fetch_retry(&priv->syncp, start)); 451 452 *tx_bytes += bytes; 453 *tx_packets += packets; 454 } 455 } 456 457 static void bcm_sysport_get_stats(struct net_device *dev, 458 struct ethtool_stats *stats, u64 *data) 459 { 460 struct bcm_sysport_priv *priv = netdev_priv(dev); 461 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 462 struct u64_stats_sync *syncp = &priv->syncp; 463 struct bcm_sysport_tx_ring *ring; 464 u64 tx_bytes = 0, tx_packets = 0; 465 unsigned int start; 466 int i, j; 467 468 if (netif_running(dev)) { 469 bcm_sysport_update_mib_counters(priv); 470 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); 471 stats64->tx_bytes = tx_bytes; 472 stats64->tx_packets = tx_packets; 473 } 474 475 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 476 const struct bcm_sysport_stats *s; 477 char *p; 478 479 s = &bcm_sysport_gstrings_stats[i]; 480 if (s->type == BCM_SYSPORT_STAT_NETDEV) 481 p = (char *)&dev->stats; 482 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) 483 p = (char *)stats64; 484 else 485 p = (char *)priv; 486 487 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 488 continue; 489 p += s->stat_offset; 490 491 if (s->stat_sizeof == sizeof(u64) && 492 s->type == BCM_SYSPORT_STAT_NETDEV64) { 493 do { 494 start = u64_stats_fetch_begin(syncp); 495 data[i] = *(u64 *)p; 496 } while (u64_stats_fetch_retry(syncp, start)); 497 } else 498 data[i] = *(u32 *)p; 499 j++; 500 } 501 502 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 503 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 504 * needs to point to how many total statistics we have minus the 505 * number of per TX queue statistics 506 */ 507 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 508 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 509 510 for (i = 0; i < dev->num_tx_queues; i++) { 511 ring = &priv->tx_rings[i]; 512 data[j] = ring->packets; 513 j++; 514 data[j] = ring->bytes; 515 j++; 516 } 517 } 518 519 static void bcm_sysport_get_wol(struct net_device *dev, 520 struct ethtool_wolinfo *wol) 521 { 522 struct bcm_sysport_priv *priv = netdev_priv(dev); 523 524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 525 wol->wolopts = priv->wolopts; 526 527 if (!(priv->wolopts & WAKE_MAGICSECURE)) 528 return; 529 530 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); 531 } 532 533 static int bcm_sysport_set_wol(struct net_device *dev, 534 struct ethtool_wolinfo *wol) 535 { 536 struct bcm_sysport_priv *priv = netdev_priv(dev); 537 struct device *kdev = &priv->pdev->dev; 538 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 539 540 if (!device_can_wakeup(kdev)) 541 return -ENOTSUPP; 542 543 if (wol->wolopts & ~supported) 544 return -EINVAL; 545 546 if (wol->wolopts & WAKE_MAGICSECURE) 547 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); 548 549 /* Flag the device and relevant IRQ as wakeup capable */ 550 if (wol->wolopts) { 551 device_set_wakeup_enable(kdev, 1); 552 if (priv->wol_irq_disabled) 553 enable_irq_wake(priv->wol_irq); 554 priv->wol_irq_disabled = 0; 555 } else { 556 device_set_wakeup_enable(kdev, 0); 557 /* Avoid unbalanced disable_irq_wake calls */ 558 if (!priv->wol_irq_disabled) 559 disable_irq_wake(priv->wol_irq); 560 priv->wol_irq_disabled = 1; 561 } 562 563 priv->wolopts = wol->wolopts; 564 565 return 0; 566 } 567 568 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, 569 u32 usecs, u32 pkts) 570 { 571 u32 reg; 572 573 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 574 reg &= ~(RDMA_INTR_THRESH_MASK | 575 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 576 reg |= pkts; 577 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; 578 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 579 } 580 581 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, 582 struct ethtool_coalesce *ec) 583 { 584 struct bcm_sysport_priv *priv = ring->priv; 585 u32 reg; 586 587 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 588 reg &= ~(RING_INTR_THRESH_MASK | 589 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 590 reg |= ec->tx_max_coalesced_frames; 591 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 592 RING_TIMEOUT_SHIFT; 593 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 594 } 595 596 static int bcm_sysport_get_coalesce(struct net_device *dev, 597 struct ethtool_coalesce *ec, 598 struct kernel_ethtool_coalesce *kernel_coal, 599 struct netlink_ext_ack *extack) 600 { 601 struct bcm_sysport_priv *priv = netdev_priv(dev); 602 u32 reg; 603 604 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 605 606 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 607 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 608 609 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 610 611 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 612 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 613 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; 614 615 return 0; 616 } 617 618 static int bcm_sysport_set_coalesce(struct net_device *dev, 619 struct ethtool_coalesce *ec, 620 struct kernel_ethtool_coalesce *kernel_coal, 621 struct netlink_ext_ack *extack) 622 { 623 struct bcm_sysport_priv *priv = netdev_priv(dev); 624 struct dim_cq_moder moder; 625 u32 usecs, pkts; 626 unsigned int i; 627 628 /* Base system clock is 125Mhz, DMA timeout is this reference clock 629 * divided by 1024, which yield roughly 8.192 us, our maximum value has 630 * to fit in the RING_TIMEOUT_MASK (16 bits). 631 */ 632 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 633 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 634 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 635 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 636 return -EINVAL; 637 638 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 639 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) 640 return -EINVAL; 641 642 for (i = 0; i < dev->num_tx_queues; i++) 643 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); 644 645 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 646 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 647 usecs = priv->rx_coalesce_usecs; 648 pkts = priv->rx_max_coalesced_frames; 649 650 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { 651 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); 652 usecs = moder.usec; 653 pkts = moder.pkts; 654 } 655 656 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; 657 658 /* Apply desired coalescing parameters */ 659 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 660 661 return 0; 662 } 663 664 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 665 { 666 dev_consume_skb_any(cb->skb); 667 cb->skb = NULL; 668 dma_unmap_addr_set(cb, dma_addr, 0); 669 } 670 671 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 672 struct bcm_sysport_cb *cb) 673 { 674 struct device *kdev = &priv->pdev->dev; 675 struct net_device *ndev = priv->netdev; 676 struct sk_buff *skb, *rx_skb; 677 dma_addr_t mapping; 678 679 /* Allocate a new SKB for a new packet */ 680 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, 681 GFP_ATOMIC | __GFP_NOWARN); 682 if (!skb) { 683 priv->mib.alloc_rx_buff_failed++; 684 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 685 return NULL; 686 } 687 688 mapping = dma_map_single(kdev, skb->data, 689 RX_BUF_LENGTH, DMA_FROM_DEVICE); 690 if (dma_mapping_error(kdev, mapping)) { 691 priv->mib.rx_dma_failed++; 692 dev_kfree_skb_any(skb); 693 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 694 return NULL; 695 } 696 697 /* Grab the current SKB on the ring */ 698 rx_skb = cb->skb; 699 if (likely(rx_skb)) 700 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 701 RX_BUF_LENGTH, DMA_FROM_DEVICE); 702 703 /* Put the new SKB on the ring */ 704 cb->skb = skb; 705 dma_unmap_addr_set(cb, dma_addr, mapping); 706 dma_desc_set_addr(priv, cb->bd_addr, mapping); 707 708 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 709 710 /* Return the current SKB to the caller */ 711 return rx_skb; 712 } 713 714 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 715 { 716 struct bcm_sysport_cb *cb; 717 struct sk_buff *skb; 718 unsigned int i; 719 720 for (i = 0; i < priv->num_rx_bds; i++) { 721 cb = &priv->rx_cbs[i]; 722 skb = bcm_sysport_rx_refill(priv, cb); 723 dev_kfree_skb(skb); 724 if (!cb->skb) 725 return -ENOMEM; 726 } 727 728 return 0; 729 } 730 731 /* Poll the hardware for up to budget packets to process */ 732 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 733 unsigned int budget) 734 { 735 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 736 struct net_device *ndev = priv->netdev; 737 unsigned int processed = 0, to_process; 738 unsigned int processed_bytes = 0; 739 struct bcm_sysport_cb *cb; 740 struct sk_buff *skb; 741 unsigned int p_index; 742 u16 len, status; 743 struct bcm_rsb *rsb; 744 745 /* Clear status before servicing to reduce spurious interrupts */ 746 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 747 748 /* Determine how much we should process since last call, SYSTEMPORT Lite 749 * groups the producer and consumer indexes into the same 32-bit 750 * which we access using RDMA_CONS_INDEX 751 */ 752 if (!priv->is_lite) 753 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 754 else 755 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 756 p_index &= RDMA_PROD_INDEX_MASK; 757 758 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 759 760 netif_dbg(priv, rx_status, ndev, 761 "p_index=%d rx_c_index=%d to_process=%d\n", 762 p_index, priv->rx_c_index, to_process); 763 764 while ((processed < to_process) && (processed < budget)) { 765 cb = &priv->rx_cbs[priv->rx_read_ptr]; 766 skb = bcm_sysport_rx_refill(priv, cb); 767 768 769 /* We do not have a backing SKB, so we do not a corresponding 770 * DMA mapping for this incoming packet since 771 * bcm_sysport_rx_refill always either has both skb and mapping 772 * or none. 773 */ 774 if (unlikely(!skb)) { 775 netif_err(priv, rx_err, ndev, "out of memory!\n"); 776 ndev->stats.rx_dropped++; 777 ndev->stats.rx_errors++; 778 goto next; 779 } 780 781 /* Extract the Receive Status Block prepended */ 782 rsb = (struct bcm_rsb *)skb->data; 783 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 784 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 785 DESC_STATUS_MASK; 786 787 netif_dbg(priv, rx_status, ndev, 788 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 789 p_index, priv->rx_c_index, priv->rx_read_ptr, 790 len, status); 791 792 if (unlikely(len > RX_BUF_LENGTH)) { 793 netif_err(priv, rx_status, ndev, "oversized packet\n"); 794 ndev->stats.rx_length_errors++; 795 ndev->stats.rx_errors++; 796 dev_kfree_skb_any(skb); 797 goto next; 798 } 799 800 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 801 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 802 ndev->stats.rx_dropped++; 803 ndev->stats.rx_errors++; 804 dev_kfree_skb_any(skb); 805 goto next; 806 } 807 808 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 809 netif_err(priv, rx_err, ndev, "error packet\n"); 810 if (status & RX_STATUS_OVFLOW) 811 ndev->stats.rx_over_errors++; 812 ndev->stats.rx_dropped++; 813 ndev->stats.rx_errors++; 814 dev_kfree_skb_any(skb); 815 goto next; 816 } 817 818 skb_put(skb, len); 819 820 /* Hardware validated our checksum */ 821 if (likely(status & DESC_L4_CSUM)) 822 skb->ip_summed = CHECKSUM_UNNECESSARY; 823 824 /* Hardware pre-pends packets with 2bytes before Ethernet 825 * header plus we have the Receive Status Block, strip off all 826 * of this from the SKB. 827 */ 828 skb_pull(skb, sizeof(*rsb) + 2); 829 len -= (sizeof(*rsb) + 2); 830 processed_bytes += len; 831 832 /* UniMAC may forward CRC */ 833 if (priv->crc_fwd) { 834 skb_trim(skb, len - ETH_FCS_LEN); 835 len -= ETH_FCS_LEN; 836 } 837 838 skb->protocol = eth_type_trans(skb, ndev); 839 ndev->stats.rx_packets++; 840 ndev->stats.rx_bytes += len; 841 u64_stats_update_begin(&priv->syncp); 842 stats64->rx_packets++; 843 stats64->rx_bytes += len; 844 u64_stats_update_end(&priv->syncp); 845 846 napi_gro_receive(&priv->napi, skb); 847 next: 848 processed++; 849 priv->rx_read_ptr++; 850 851 if (priv->rx_read_ptr == priv->num_rx_bds) 852 priv->rx_read_ptr = 0; 853 } 854 855 priv->dim.packets = processed; 856 priv->dim.bytes = processed_bytes; 857 858 return processed; 859 } 860 861 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 862 struct bcm_sysport_cb *cb, 863 unsigned int *bytes_compl, 864 unsigned int *pkts_compl) 865 { 866 struct bcm_sysport_priv *priv = ring->priv; 867 struct device *kdev = &priv->pdev->dev; 868 869 if (cb->skb) { 870 *bytes_compl += cb->skb->len; 871 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 872 dma_unmap_len(cb, dma_len), 873 DMA_TO_DEVICE); 874 (*pkts_compl)++; 875 bcm_sysport_free_cb(cb); 876 /* SKB fragment */ 877 } else if (dma_unmap_addr(cb, dma_addr)) { 878 *bytes_compl += dma_unmap_len(cb, dma_len); 879 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 880 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 881 dma_unmap_addr_set(cb, dma_addr, 0); 882 } 883 } 884 885 /* Reclaim queued SKBs for transmission completion, lockless version */ 886 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 887 struct bcm_sysport_tx_ring *ring) 888 { 889 unsigned int pkts_compl = 0, bytes_compl = 0; 890 struct net_device *ndev = priv->netdev; 891 unsigned int txbds_processed = 0; 892 struct bcm_sysport_cb *cb; 893 unsigned int txbds_ready; 894 unsigned int c_index; 895 u32 hw_ind; 896 897 /* Clear status before servicing to reduce spurious interrupts */ 898 if (!ring->priv->is_lite) 899 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 900 else 901 intrl2_0_writel(ring->priv, BIT(ring->index + 902 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 903 904 /* Compute how many descriptors have been processed since last call */ 905 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 906 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 907 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; 908 909 netif_dbg(priv, tx_done, ndev, 910 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 911 ring->index, ring->c_index, c_index, txbds_ready); 912 913 while (txbds_processed < txbds_ready) { 914 cb = &ring->cbs[ring->clean_index]; 915 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 916 917 ring->desc_count++; 918 txbds_processed++; 919 920 if (likely(ring->clean_index < ring->size - 1)) 921 ring->clean_index++; 922 else 923 ring->clean_index = 0; 924 } 925 926 u64_stats_update_begin(&priv->syncp); 927 ring->packets += pkts_compl; 928 ring->bytes += bytes_compl; 929 u64_stats_update_end(&priv->syncp); 930 931 ring->c_index = c_index; 932 933 netif_dbg(priv, tx_done, ndev, 934 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 935 ring->index, ring->c_index, pkts_compl, bytes_compl); 936 937 return pkts_compl; 938 } 939 940 /* Locked version of the per-ring TX reclaim routine */ 941 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 942 struct bcm_sysport_tx_ring *ring) 943 { 944 struct netdev_queue *txq; 945 unsigned int released; 946 unsigned long flags; 947 948 txq = netdev_get_tx_queue(priv->netdev, ring->index); 949 950 spin_lock_irqsave(&ring->lock, flags); 951 released = __bcm_sysport_tx_reclaim(priv, ring); 952 if (released) 953 netif_tx_wake_queue(txq); 954 955 spin_unlock_irqrestore(&ring->lock, flags); 956 957 return released; 958 } 959 960 /* Locked version of the per-ring TX reclaim, but does not wake the queue */ 961 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 962 struct bcm_sysport_tx_ring *ring) 963 { 964 unsigned long flags; 965 966 spin_lock_irqsave(&ring->lock, flags); 967 __bcm_sysport_tx_reclaim(priv, ring); 968 spin_unlock_irqrestore(&ring->lock, flags); 969 } 970 971 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 972 { 973 struct bcm_sysport_tx_ring *ring = 974 container_of(napi, struct bcm_sysport_tx_ring, napi); 975 unsigned int work_done = 0; 976 977 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 978 979 if (work_done == 0) { 980 napi_complete(napi); 981 /* re-enable TX interrupt */ 982 if (!ring->priv->is_lite) 983 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 984 else 985 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 986 INTRL2_0_TDMA_MBDONE_SHIFT)); 987 988 return 0; 989 } 990 991 return budget; 992 } 993 994 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 995 { 996 unsigned int q; 997 998 for (q = 0; q < priv->netdev->num_tx_queues; q++) 999 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 1000 } 1001 1002 static int bcm_sysport_poll(struct napi_struct *napi, int budget) 1003 { 1004 struct bcm_sysport_priv *priv = 1005 container_of(napi, struct bcm_sysport_priv, napi); 1006 struct dim_sample dim_sample = {}; 1007 unsigned int work_done = 0; 1008 1009 work_done = bcm_sysport_desc_rx(priv, budget); 1010 1011 priv->rx_c_index += work_done; 1012 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 1013 1014 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 1015 * maintained by HW, but writes to it will be ignore while RDMA 1016 * is active 1017 */ 1018 if (!priv->is_lite) 1019 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 1020 else 1021 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 1022 1023 if (work_done < budget) { 1024 napi_complete_done(napi, work_done); 1025 /* re-enable RX interrupts */ 1026 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 1027 } 1028 1029 if (priv->dim.use_dim) { 1030 dim_update_sample(priv->dim.event_ctr, priv->dim.packets, 1031 priv->dim.bytes, &dim_sample); 1032 net_dim(&priv->dim.dim, dim_sample); 1033 } 1034 1035 return work_done; 1036 } 1037 1038 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) 1039 { 1040 u32 reg, bit; 1041 1042 reg = umac_readl(priv, UMAC_MPD_CTRL); 1043 if (enable) 1044 reg |= MPD_EN; 1045 else 1046 reg &= ~MPD_EN; 1047 umac_writel(priv, reg, UMAC_MPD_CTRL); 1048 1049 if (priv->is_lite) 1050 bit = RBUF_ACPI_EN_LITE; 1051 else 1052 bit = RBUF_ACPI_EN; 1053 1054 reg = rbuf_readl(priv, RBUF_CONTROL); 1055 if (enable) 1056 reg |= bit; 1057 else 1058 reg &= ~bit; 1059 rbuf_writel(priv, reg, RBUF_CONTROL); 1060 } 1061 1062 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 1063 { 1064 unsigned int index; 1065 u32 reg; 1066 1067 /* Disable RXCHK, active filters and Broadcom tag matching */ 1068 reg = rxchk_readl(priv, RXCHK_CONTROL); 1069 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1070 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); 1071 rxchk_writel(priv, reg, RXCHK_CONTROL); 1072 1073 /* Make sure we restore correct CID index in case HW lost 1074 * its context during deep idle state 1075 */ 1076 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 1077 rxchk_writel(priv, priv->filters_loc[index] << 1078 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); 1079 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 1080 } 1081 1082 /* Clear the MagicPacket detection logic */ 1083 mpd_enable_set(priv, false); 1084 1085 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS); 1086 if (reg & INTRL2_0_MPD) 1087 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); 1088 1089 if (reg & INTRL2_0_BRCM_MATCH_TAG) { 1090 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & 1091 RXCHK_BRCM_TAG_MATCH_MASK; 1092 netdev_info(priv->netdev, 1093 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); 1094 } 1095 1096 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1097 } 1098 1099 static void bcm_sysport_dim_work(struct work_struct *work) 1100 { 1101 struct dim *dim = container_of(work, struct dim, work); 1102 struct bcm_sysport_net_dim *ndim = 1103 container_of(dim, struct bcm_sysport_net_dim, dim); 1104 struct bcm_sysport_priv *priv = 1105 container_of(ndim, struct bcm_sysport_priv, dim); 1106 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode, 1107 dim->profile_ix); 1108 1109 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); 1110 dim->state = DIM_START_MEASURE; 1111 } 1112 1113 /* RX and misc interrupt routine */ 1114 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 1115 { 1116 struct net_device *dev = dev_id; 1117 struct bcm_sysport_priv *priv = netdev_priv(dev); 1118 struct bcm_sysport_tx_ring *txr; 1119 unsigned int ring, ring_bit; 1120 1121 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1122 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1123 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1124 1125 if (unlikely(priv->irq0_stat == 0)) { 1126 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 1127 return IRQ_NONE; 1128 } 1129 1130 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 1131 priv->dim.event_ctr++; 1132 if (likely(napi_schedule_prep(&priv->napi))) { 1133 /* disable RX interrupts */ 1134 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 1135 __napi_schedule_irqoff(&priv->napi); 1136 } 1137 } 1138 1139 /* TX ring is full, perform a full reclaim since we do not know 1140 * which one would trigger this interrupt 1141 */ 1142 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1143 bcm_sysport_tx_reclaim_all(priv); 1144 1145 if (!priv->is_lite) 1146 goto out; 1147 1148 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1149 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1150 if (!(priv->irq0_stat & ring_bit)) 1151 continue; 1152 1153 txr = &priv->tx_rings[ring]; 1154 1155 if (likely(napi_schedule_prep(&txr->napi))) { 1156 intrl2_0_mask_set(priv, ring_bit); 1157 __napi_schedule(&txr->napi); 1158 } 1159 } 1160 out: 1161 return IRQ_HANDLED; 1162 } 1163 1164 /* TX interrupt service routine */ 1165 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1166 { 1167 struct net_device *dev = dev_id; 1168 struct bcm_sysport_priv *priv = netdev_priv(dev); 1169 struct bcm_sysport_tx_ring *txr; 1170 unsigned int ring; 1171 1172 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1173 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1174 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1175 1176 if (unlikely(priv->irq1_stat == 0)) { 1177 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1178 return IRQ_NONE; 1179 } 1180 1181 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1182 if (!(priv->irq1_stat & BIT(ring))) 1183 continue; 1184 1185 txr = &priv->tx_rings[ring]; 1186 1187 if (likely(napi_schedule_prep(&txr->napi))) { 1188 intrl2_1_mask_set(priv, BIT(ring)); 1189 __napi_schedule_irqoff(&txr->napi); 1190 } 1191 } 1192 1193 return IRQ_HANDLED; 1194 } 1195 1196 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1197 { 1198 struct bcm_sysport_priv *priv = dev_id; 1199 1200 pm_wakeup_event(&priv->pdev->dev, 0); 1201 1202 return IRQ_HANDLED; 1203 } 1204 1205 #ifdef CONFIG_NET_POLL_CONTROLLER 1206 static void bcm_sysport_poll_controller(struct net_device *dev) 1207 { 1208 struct bcm_sysport_priv *priv = netdev_priv(dev); 1209 1210 disable_irq(priv->irq0); 1211 bcm_sysport_rx_isr(priv->irq0, priv); 1212 enable_irq(priv->irq0); 1213 1214 if (!priv->is_lite) { 1215 disable_irq(priv->irq1); 1216 bcm_sysport_tx_isr(priv->irq1, priv); 1217 enable_irq(priv->irq1); 1218 } 1219 } 1220 #endif 1221 1222 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1223 struct net_device *dev) 1224 { 1225 struct bcm_sysport_priv *priv = netdev_priv(dev); 1226 struct sk_buff *nskb; 1227 struct bcm_tsb *tsb; 1228 u32 csum_info; 1229 u8 ip_proto; 1230 u16 csum_start; 1231 __be16 ip_ver; 1232 1233 /* Re-allocate SKB if needed */ 1234 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1235 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1236 if (!nskb) { 1237 dev_kfree_skb_any(skb); 1238 priv->mib.tx_realloc_tsb_failed++; 1239 dev->stats.tx_errors++; 1240 dev->stats.tx_dropped++; 1241 return NULL; 1242 } 1243 dev_consume_skb_any(skb); 1244 skb = nskb; 1245 priv->mib.tx_realloc_tsb++; 1246 } 1247 1248 tsb = skb_push(skb, sizeof(*tsb)); 1249 /* Zero-out TSB by default */ 1250 memset(tsb, 0, sizeof(*tsb)); 1251 1252 if (skb_vlan_tag_present(skb)) { 1253 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK; 1254 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; 1255 } 1256 1257 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1258 ip_ver = skb->protocol; 1259 switch (ip_ver) { 1260 case htons(ETH_P_IP): 1261 ip_proto = ip_hdr(skb)->protocol; 1262 break; 1263 case htons(ETH_P_IPV6): 1264 ip_proto = ipv6_hdr(skb)->nexthdr; 1265 break; 1266 default: 1267 return skb; 1268 } 1269 1270 /* Get the checksum offset and the L4 (transport) offset */ 1271 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1272 /* Account for the HW inserted VLAN tag */ 1273 if (skb_vlan_tag_present(skb)) 1274 csum_start += VLAN_HLEN; 1275 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1276 csum_info |= (csum_start << L4_PTR_SHIFT); 1277 1278 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1279 csum_info |= L4_LENGTH_VALID; 1280 if (ip_proto == IPPROTO_UDP && 1281 ip_ver == htons(ETH_P_IP)) 1282 csum_info |= L4_UDP; 1283 } else { 1284 csum_info = 0; 1285 } 1286 1287 tsb->l4_ptr_dest_map = csum_info; 1288 } 1289 1290 return skb; 1291 } 1292 1293 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1294 struct net_device *dev) 1295 { 1296 struct bcm_sysport_priv *priv = netdev_priv(dev); 1297 struct device *kdev = &priv->pdev->dev; 1298 struct bcm_sysport_tx_ring *ring; 1299 unsigned long flags, desc_flags; 1300 struct bcm_sysport_cb *cb; 1301 struct netdev_queue *txq; 1302 u32 len_status, addr_lo; 1303 unsigned int skb_len; 1304 dma_addr_t mapping; 1305 u16 queue; 1306 int ret; 1307 1308 queue = skb_get_queue_mapping(skb); 1309 txq = netdev_get_tx_queue(dev, queue); 1310 ring = &priv->tx_rings[queue]; 1311 1312 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1313 spin_lock_irqsave(&ring->lock, flags); 1314 if (unlikely(ring->desc_count == 0)) { 1315 netif_tx_stop_queue(txq); 1316 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1317 ret = NETDEV_TX_BUSY; 1318 goto out; 1319 } 1320 1321 /* Insert TSB and checksum infos */ 1322 if (priv->tsb_en) { 1323 skb = bcm_sysport_insert_tsb(skb, dev); 1324 if (!skb) { 1325 ret = NETDEV_TX_OK; 1326 goto out; 1327 } 1328 } 1329 1330 skb_len = skb->len; 1331 1332 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1333 if (dma_mapping_error(kdev, mapping)) { 1334 priv->mib.tx_dma_failed++; 1335 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1336 skb->data, skb_len); 1337 ret = NETDEV_TX_OK; 1338 dev_kfree_skb_any(skb); 1339 goto out; 1340 } 1341 1342 /* Remember the SKB for future freeing */ 1343 cb = &ring->cbs[ring->curr_desc]; 1344 cb->skb = skb; 1345 dma_unmap_addr_set(cb, dma_addr, mapping); 1346 dma_unmap_len_set(cb, dma_len, skb_len); 1347 1348 addr_lo = lower_32_bits(mapping); 1349 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1350 len_status |= (skb_len << DESC_LEN_SHIFT); 1351 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1352 DESC_STATUS_SHIFT; 1353 if (skb->ip_summed == CHECKSUM_PARTIAL) 1354 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1355 if (skb_vlan_tag_present(skb)) 1356 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT); 1357 1358 ring->curr_desc++; 1359 if (ring->curr_desc == ring->size) 1360 ring->curr_desc = 0; 1361 ring->desc_count--; 1362 1363 /* Ports are latched, so write upper address first */ 1364 spin_lock_irqsave(&priv->desc_lock, desc_flags); 1365 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); 1366 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); 1367 spin_unlock_irqrestore(&priv->desc_lock, desc_flags); 1368 1369 /* Check ring space and update SW control flow */ 1370 if (ring->desc_count == 0) 1371 netif_tx_stop_queue(txq); 1372 1373 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1374 ring->index, ring->desc_count, ring->curr_desc); 1375 1376 ret = NETDEV_TX_OK; 1377 out: 1378 spin_unlock_irqrestore(&ring->lock, flags); 1379 return ret; 1380 } 1381 1382 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue) 1383 { 1384 netdev_warn(dev, "transmit timeout!\n"); 1385 1386 netif_trans_update(dev); 1387 dev->stats.tx_errors++; 1388 1389 netif_tx_wake_all_queues(dev); 1390 } 1391 1392 /* phylib adjust link callback */ 1393 static void bcm_sysport_adj_link(struct net_device *dev) 1394 { 1395 struct bcm_sysport_priv *priv = netdev_priv(dev); 1396 struct phy_device *phydev = dev->phydev; 1397 unsigned int changed = 0; 1398 u32 cmd_bits = 0, reg; 1399 1400 if (priv->old_link != phydev->link) { 1401 changed = 1; 1402 priv->old_link = phydev->link; 1403 } 1404 1405 if (priv->old_duplex != phydev->duplex) { 1406 changed = 1; 1407 priv->old_duplex = phydev->duplex; 1408 } 1409 1410 if (priv->is_lite) 1411 goto out; 1412 1413 switch (phydev->speed) { 1414 case SPEED_2500: 1415 cmd_bits = CMD_SPEED_2500; 1416 break; 1417 case SPEED_1000: 1418 cmd_bits = CMD_SPEED_1000; 1419 break; 1420 case SPEED_100: 1421 cmd_bits = CMD_SPEED_100; 1422 break; 1423 case SPEED_10: 1424 cmd_bits = CMD_SPEED_10; 1425 break; 1426 default: 1427 break; 1428 } 1429 cmd_bits <<= CMD_SPEED_SHIFT; 1430 1431 if (phydev->duplex == DUPLEX_HALF) 1432 cmd_bits |= CMD_HD_EN; 1433 1434 if (priv->old_pause != phydev->pause) { 1435 changed = 1; 1436 priv->old_pause = phydev->pause; 1437 } 1438 1439 if (!phydev->pause) 1440 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1441 1442 if (!changed) 1443 return; 1444 1445 if (phydev->link) { 1446 reg = umac_readl(priv, UMAC_CMD); 1447 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1448 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1449 CMD_TX_PAUSE_IGNORE); 1450 reg |= cmd_bits; 1451 umac_writel(priv, reg, UMAC_CMD); 1452 } 1453 out: 1454 if (changed) 1455 phy_print_status(phydev); 1456 } 1457 1458 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, 1459 void (*cb)(struct work_struct *work)) 1460 { 1461 struct bcm_sysport_net_dim *dim = &priv->dim; 1462 1463 INIT_WORK(&dim->dim.work, cb); 1464 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1465 dim->event_ctr = 0; 1466 dim->packets = 0; 1467 dim->bytes = 0; 1468 } 1469 1470 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) 1471 { 1472 struct bcm_sysport_net_dim *dim = &priv->dim; 1473 struct dim_cq_moder moder; 1474 u32 usecs, pkts; 1475 1476 usecs = priv->rx_coalesce_usecs; 1477 pkts = priv->rx_max_coalesced_frames; 1478 1479 /* If DIM was enabled, re-apply default parameters */ 1480 if (dim->use_dim) { 1481 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 1482 usecs = moder.usec; 1483 pkts = moder.pkts; 1484 } 1485 1486 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 1487 } 1488 1489 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1490 unsigned int index) 1491 { 1492 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1493 size_t size; 1494 u32 reg; 1495 1496 /* Simple descriptors partitioning for now */ 1497 size = 256; 1498 1499 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1500 if (!ring->cbs) { 1501 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1502 return -ENOMEM; 1503 } 1504 1505 /* Initialize SW view of the ring */ 1506 spin_lock_init(&ring->lock); 1507 ring->priv = priv; 1508 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll); 1509 ring->index = index; 1510 ring->size = size; 1511 ring->clean_index = 0; 1512 ring->alloc_size = ring->size; 1513 ring->desc_count = ring->size; 1514 ring->curr_desc = 0; 1515 1516 /* Initialize HW ring */ 1517 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1518 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1519 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1520 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1521 1522 /* Configure QID and port mapping */ 1523 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); 1524 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); 1525 if (ring->inspect) { 1526 reg |= ring->switch_queue & RING_QID_MASK; 1527 reg |= ring->switch_port << RING_PORT_ID_SHIFT; 1528 } else { 1529 reg |= RING_IGNORE_STATUS; 1530 } 1531 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); 1532 reg = 0; 1533 /* Adjust the packet size calculations if SYSTEMPORT is responsible 1534 * for HW insertion of VLAN tags 1535 */ 1536 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 1537 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT; 1538 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index)); 1539 1540 /* Enable ACB algorithm 2 */ 1541 reg = tdma_readl(priv, TDMA_CONTROL); 1542 reg |= tdma_control_bit(priv, ACB_ALGO); 1543 tdma_writel(priv, reg, TDMA_CONTROL); 1544 1545 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides 1546 * with the original definition of ACB_ALGO 1547 */ 1548 reg = tdma_readl(priv, TDMA_CONTROL); 1549 if (priv->is_lite) 1550 reg &= ~BIT(TSB_SWAP1); 1551 /* Set a correct TSB format based on host endian */ 1552 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1553 reg |= tdma_control_bit(priv, TSB_SWAP0); 1554 else 1555 reg &= ~tdma_control_bit(priv, TSB_SWAP0); 1556 tdma_writel(priv, reg, TDMA_CONTROL); 1557 1558 /* Program the number of descriptors as MAX_THRESHOLD and half of 1559 * its size for the hysteresis trigger 1560 */ 1561 tdma_writel(priv, ring->size | 1562 1 << RING_HYST_THRESH_SHIFT, 1563 TDMA_DESC_RING_MAX_HYST(index)); 1564 1565 /* Enable the ring queue in the arbiter */ 1566 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1567 reg |= (1 << index); 1568 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1569 1570 napi_enable(&ring->napi); 1571 1572 netif_dbg(priv, hw, priv->netdev, 1573 "TDMA cfg, size=%d, switch q=%d,port=%d\n", 1574 ring->size, ring->switch_queue, 1575 ring->switch_port); 1576 1577 return 0; 1578 } 1579 1580 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1581 unsigned int index) 1582 { 1583 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1584 u32 reg; 1585 1586 /* Caller should stop the TDMA engine */ 1587 reg = tdma_readl(priv, TDMA_STATUS); 1588 if (!(reg & TDMA_DISABLED)) 1589 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1590 1591 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1592 * fail, so by checking this pointer we know whether the TX ring was 1593 * fully initialized or not. 1594 */ 1595 if (!ring->cbs) 1596 return; 1597 1598 napi_disable(&ring->napi); 1599 netif_napi_del(&ring->napi); 1600 1601 bcm_sysport_tx_clean(priv, ring); 1602 1603 kfree(ring->cbs); 1604 ring->cbs = NULL; 1605 ring->size = 0; 1606 ring->alloc_size = 0; 1607 1608 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1609 } 1610 1611 /* RDMA helper */ 1612 static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1613 unsigned int enable) 1614 { 1615 unsigned int timeout = 1000; 1616 u32 reg; 1617 1618 reg = rdma_readl(priv, RDMA_CONTROL); 1619 if (enable) 1620 reg |= RDMA_EN; 1621 else 1622 reg &= ~RDMA_EN; 1623 rdma_writel(priv, reg, RDMA_CONTROL); 1624 1625 /* Poll for RMDA disabling completion */ 1626 do { 1627 reg = rdma_readl(priv, RDMA_STATUS); 1628 if (!!(reg & RDMA_DISABLED) == !enable) 1629 return 0; 1630 usleep_range(1000, 2000); 1631 } while (timeout-- > 0); 1632 1633 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1634 1635 return -ETIMEDOUT; 1636 } 1637 1638 /* TDMA helper */ 1639 static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1640 unsigned int enable) 1641 { 1642 unsigned int timeout = 1000; 1643 u32 reg; 1644 1645 reg = tdma_readl(priv, TDMA_CONTROL); 1646 if (enable) 1647 reg |= tdma_control_bit(priv, TDMA_EN); 1648 else 1649 reg &= ~tdma_control_bit(priv, TDMA_EN); 1650 tdma_writel(priv, reg, TDMA_CONTROL); 1651 1652 /* Poll for TMDA disabling completion */ 1653 do { 1654 reg = tdma_readl(priv, TDMA_STATUS); 1655 if (!!(reg & TDMA_DISABLED) == !enable) 1656 return 0; 1657 1658 usleep_range(1000, 2000); 1659 } while (timeout-- > 0); 1660 1661 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1662 1663 return -ETIMEDOUT; 1664 } 1665 1666 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1667 { 1668 struct bcm_sysport_cb *cb; 1669 u32 reg; 1670 int ret; 1671 int i; 1672 1673 /* Initialize SW view of the RX ring */ 1674 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1675 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1676 priv->rx_c_index = 0; 1677 priv->rx_read_ptr = 0; 1678 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1679 GFP_KERNEL); 1680 if (!priv->rx_cbs) { 1681 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1682 return -ENOMEM; 1683 } 1684 1685 for (i = 0; i < priv->num_rx_bds; i++) { 1686 cb = priv->rx_cbs + i; 1687 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1688 } 1689 1690 ret = bcm_sysport_alloc_rx_bufs(priv); 1691 if (ret) { 1692 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1693 return ret; 1694 } 1695 1696 /* Initialize HW, ensure RDMA is disabled */ 1697 reg = rdma_readl(priv, RDMA_STATUS); 1698 if (!(reg & RDMA_DISABLED)) 1699 rdma_enable_set(priv, 0); 1700 1701 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1702 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1703 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1704 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1705 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1706 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1707 /* Operate the queue in ring mode */ 1708 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1709 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1710 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1711 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1712 1713 netif_dbg(priv, hw, priv->netdev, 1714 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1715 priv->num_rx_bds, priv->rx_bds); 1716 1717 return 0; 1718 } 1719 1720 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1721 { 1722 struct bcm_sysport_cb *cb; 1723 unsigned int i; 1724 u32 reg; 1725 1726 /* Caller should ensure RDMA is disabled */ 1727 reg = rdma_readl(priv, RDMA_STATUS); 1728 if (!(reg & RDMA_DISABLED)) 1729 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1730 1731 for (i = 0; i < priv->num_rx_bds; i++) { 1732 cb = &priv->rx_cbs[i]; 1733 if (dma_unmap_addr(cb, dma_addr)) 1734 dma_unmap_single(&priv->pdev->dev, 1735 dma_unmap_addr(cb, dma_addr), 1736 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1737 bcm_sysport_free_cb(cb); 1738 } 1739 1740 kfree(priv->rx_cbs); 1741 priv->rx_cbs = NULL; 1742 1743 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1744 } 1745 1746 static void bcm_sysport_set_rx_mode(struct net_device *dev) 1747 { 1748 struct bcm_sysport_priv *priv = netdev_priv(dev); 1749 u32 reg; 1750 1751 if (priv->is_lite) 1752 return; 1753 1754 reg = umac_readl(priv, UMAC_CMD); 1755 if (dev->flags & IFF_PROMISC) 1756 reg |= CMD_PROMISC; 1757 else 1758 reg &= ~CMD_PROMISC; 1759 umac_writel(priv, reg, UMAC_CMD); 1760 1761 /* No support for ALLMULTI */ 1762 if (dev->flags & IFF_ALLMULTI) 1763 return; 1764 } 1765 1766 static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1767 u32 mask, unsigned int enable) 1768 { 1769 u32 reg; 1770 1771 if (!priv->is_lite) { 1772 reg = umac_readl(priv, UMAC_CMD); 1773 if (enable) 1774 reg |= mask; 1775 else 1776 reg &= ~mask; 1777 umac_writel(priv, reg, UMAC_CMD); 1778 } else { 1779 reg = gib_readl(priv, GIB_CONTROL); 1780 if (enable) 1781 reg |= mask; 1782 else 1783 reg &= ~mask; 1784 gib_writel(priv, reg, GIB_CONTROL); 1785 } 1786 1787 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1788 * to be processed (1 msec). 1789 */ 1790 if (enable == 0) 1791 usleep_range(1000, 2000); 1792 } 1793 1794 static inline void umac_reset(struct bcm_sysport_priv *priv) 1795 { 1796 u32 reg; 1797 1798 if (priv->is_lite) 1799 return; 1800 1801 reg = umac_readl(priv, UMAC_CMD); 1802 reg |= CMD_SW_RESET; 1803 umac_writel(priv, reg, UMAC_CMD); 1804 udelay(10); 1805 reg = umac_readl(priv, UMAC_CMD); 1806 reg &= ~CMD_SW_RESET; 1807 umac_writel(priv, reg, UMAC_CMD); 1808 } 1809 1810 static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1811 const unsigned char *addr) 1812 { 1813 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1814 addr[3]; 1815 u32 mac1 = (addr[4] << 8) | addr[5]; 1816 1817 if (!priv->is_lite) { 1818 umac_writel(priv, mac0, UMAC_MAC0); 1819 umac_writel(priv, mac1, UMAC_MAC1); 1820 } else { 1821 gib_writel(priv, mac0, GIB_MAC0); 1822 gib_writel(priv, mac1, GIB_MAC1); 1823 } 1824 } 1825 1826 static void topctrl_flush(struct bcm_sysport_priv *priv) 1827 { 1828 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1829 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1830 mdelay(1); 1831 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1832 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1833 } 1834 1835 static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1836 { 1837 struct bcm_sysport_priv *priv = netdev_priv(dev); 1838 struct sockaddr *addr = p; 1839 1840 if (!is_valid_ether_addr(addr->sa_data)) 1841 return -EINVAL; 1842 1843 eth_hw_addr_set(dev, addr->sa_data); 1844 1845 /* interface is disabled, changes to MAC will be reflected on next 1846 * open call 1847 */ 1848 if (!netif_running(dev)) 1849 return 0; 1850 1851 umac_set_hw_addr(priv, dev->dev_addr); 1852 1853 return 0; 1854 } 1855 1856 static void bcm_sysport_get_stats64(struct net_device *dev, 1857 struct rtnl_link_stats64 *stats) 1858 { 1859 struct bcm_sysport_priv *priv = netdev_priv(dev); 1860 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1861 unsigned int start; 1862 1863 netdev_stats_to_stats64(stats, &dev->stats); 1864 1865 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, 1866 &stats->tx_packets); 1867 1868 do { 1869 start = u64_stats_fetch_begin(&priv->syncp); 1870 stats->rx_packets = stats64->rx_packets; 1871 stats->rx_bytes = stats64->rx_bytes; 1872 } while (u64_stats_fetch_retry(&priv->syncp, start)); 1873 } 1874 1875 static void bcm_sysport_netif_start(struct net_device *dev) 1876 { 1877 struct bcm_sysport_priv *priv = netdev_priv(dev); 1878 1879 /* Enable NAPI */ 1880 bcm_sysport_init_dim(priv, bcm_sysport_dim_work); 1881 bcm_sysport_init_rx_coalesce(priv); 1882 napi_enable(&priv->napi); 1883 1884 /* Enable RX interrupt and TX ring full interrupt */ 1885 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1886 1887 phy_start(dev->phydev); 1888 1889 /* Enable TX interrupts for the TXQs */ 1890 if (!priv->is_lite) 1891 intrl2_1_mask_clear(priv, 0xffffffff); 1892 else 1893 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1894 } 1895 1896 static void rbuf_init(struct bcm_sysport_priv *priv) 1897 { 1898 u32 reg; 1899 1900 reg = rbuf_readl(priv, RBUF_CONTROL); 1901 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1902 /* Set a correct RSB format on SYSTEMPORT Lite */ 1903 if (priv->is_lite) 1904 reg &= ~RBUF_RSB_SWAP1; 1905 1906 /* Set a correct RSB format based on host endian */ 1907 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1908 reg |= RBUF_RSB_SWAP0; 1909 else 1910 reg &= ~RBUF_RSB_SWAP0; 1911 rbuf_writel(priv, reg, RBUF_CONTROL); 1912 } 1913 1914 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1915 { 1916 intrl2_0_mask_set(priv, 0xffffffff); 1917 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1918 if (!priv->is_lite) { 1919 intrl2_1_mask_set(priv, 0xffffffff); 1920 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1921 } 1922 } 1923 1924 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1925 { 1926 u32 reg; 1927 1928 reg = gib_readl(priv, GIB_CONTROL); 1929 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1930 if (netdev_uses_dsa(priv->netdev)) { 1931 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1932 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1933 } 1934 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1935 reg |= 12 << GIB_IPG_LEN_SHIFT; 1936 gib_writel(priv, reg, GIB_CONTROL); 1937 } 1938 1939 static int bcm_sysport_open(struct net_device *dev) 1940 { 1941 struct bcm_sysport_priv *priv = netdev_priv(dev); 1942 struct phy_device *phydev; 1943 unsigned int i; 1944 int ret; 1945 1946 clk_prepare_enable(priv->clk); 1947 1948 /* Reset UniMAC */ 1949 umac_reset(priv); 1950 1951 /* Flush TX and RX FIFOs at TOPCTRL level */ 1952 topctrl_flush(priv); 1953 1954 /* Disable the UniMAC RX/TX */ 1955 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1956 1957 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1958 rbuf_init(priv); 1959 1960 /* Set maximum frame length */ 1961 if (!priv->is_lite) 1962 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1963 else 1964 gib_set_pad_extension(priv); 1965 1966 /* Apply features again in case we changed them while interface was 1967 * down 1968 */ 1969 bcm_sysport_set_features(dev, dev->features); 1970 1971 /* Set MAC address */ 1972 umac_set_hw_addr(priv, dev->dev_addr); 1973 1974 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1975 0, priv->phy_interface); 1976 if (!phydev) { 1977 netdev_err(dev, "could not attach to PHY\n"); 1978 ret = -ENODEV; 1979 goto out_clk_disable; 1980 } 1981 1982 /* Indicate that the MAC is responsible for PHY PM */ 1983 phydev->mac_managed_pm = true; 1984 1985 /* Reset house keeping link status */ 1986 priv->old_duplex = -1; 1987 priv->old_link = -1; 1988 priv->old_pause = -1; 1989 1990 /* mask all interrupts and request them */ 1991 bcm_sysport_mask_all_intrs(priv); 1992 1993 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1994 if (ret) { 1995 netdev_err(dev, "failed to request RX interrupt\n"); 1996 goto out_phy_disconnect; 1997 } 1998 1999 if (!priv->is_lite) { 2000 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 2001 dev->name, dev); 2002 if (ret) { 2003 netdev_err(dev, "failed to request TX interrupt\n"); 2004 goto out_free_irq0; 2005 } 2006 } 2007 2008 /* Initialize both hardware and software ring */ 2009 spin_lock_init(&priv->desc_lock); 2010 for (i = 0; i < dev->num_tx_queues; i++) { 2011 ret = bcm_sysport_init_tx_ring(priv, i); 2012 if (ret) { 2013 netdev_err(dev, "failed to initialize TX ring %d\n", 2014 i); 2015 goto out_free_tx_ring; 2016 } 2017 } 2018 2019 /* Initialize linked-list */ 2020 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2021 2022 /* Initialize RX ring */ 2023 ret = bcm_sysport_init_rx_ring(priv); 2024 if (ret) { 2025 netdev_err(dev, "failed to initialize RX ring\n"); 2026 goto out_free_rx_ring; 2027 } 2028 2029 /* Turn on RDMA */ 2030 ret = rdma_enable_set(priv, 1); 2031 if (ret) 2032 goto out_free_rx_ring; 2033 2034 /* Turn on TDMA */ 2035 ret = tdma_enable_set(priv, 1); 2036 if (ret) 2037 goto out_clear_rx_int; 2038 2039 /* Turn on UniMAC TX/RX */ 2040 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 2041 2042 bcm_sysport_netif_start(dev); 2043 2044 netif_tx_start_all_queues(dev); 2045 2046 return 0; 2047 2048 out_clear_rx_int: 2049 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 2050 out_free_rx_ring: 2051 bcm_sysport_fini_rx_ring(priv); 2052 out_free_tx_ring: 2053 for (i = 0; i < dev->num_tx_queues; i++) 2054 bcm_sysport_fini_tx_ring(priv, i); 2055 if (!priv->is_lite) 2056 free_irq(priv->irq1, dev); 2057 out_free_irq0: 2058 free_irq(priv->irq0, dev); 2059 out_phy_disconnect: 2060 phy_disconnect(phydev); 2061 out_clk_disable: 2062 clk_disable_unprepare(priv->clk); 2063 return ret; 2064 } 2065 2066 static void bcm_sysport_netif_stop(struct net_device *dev) 2067 { 2068 struct bcm_sysport_priv *priv = netdev_priv(dev); 2069 2070 /* stop all software from updating hardware */ 2071 netif_tx_disable(dev); 2072 napi_disable(&priv->napi); 2073 cancel_work_sync(&priv->dim.dim.work); 2074 phy_stop(dev->phydev); 2075 2076 /* mask all interrupts */ 2077 bcm_sysport_mask_all_intrs(priv); 2078 } 2079 2080 static int bcm_sysport_stop(struct net_device *dev) 2081 { 2082 struct bcm_sysport_priv *priv = netdev_priv(dev); 2083 unsigned int i; 2084 int ret; 2085 2086 bcm_sysport_netif_stop(dev); 2087 2088 /* Disable UniMAC RX */ 2089 umac_enable_set(priv, CMD_RX_EN, 0); 2090 2091 ret = tdma_enable_set(priv, 0); 2092 if (ret) { 2093 netdev_err(dev, "timeout disabling RDMA\n"); 2094 return ret; 2095 } 2096 2097 /* Wait for a maximum packet size to be drained */ 2098 usleep_range(2000, 3000); 2099 2100 ret = rdma_enable_set(priv, 0); 2101 if (ret) { 2102 netdev_err(dev, "timeout disabling TDMA\n"); 2103 return ret; 2104 } 2105 2106 /* Disable UniMAC TX */ 2107 umac_enable_set(priv, CMD_TX_EN, 0); 2108 2109 /* Free RX/TX rings SW structures */ 2110 for (i = 0; i < dev->num_tx_queues; i++) 2111 bcm_sysport_fini_tx_ring(priv, i); 2112 bcm_sysport_fini_rx_ring(priv); 2113 2114 free_irq(priv->irq0, dev); 2115 if (!priv->is_lite) 2116 free_irq(priv->irq1, dev); 2117 2118 /* Disconnect from PHY */ 2119 phy_disconnect(dev->phydev); 2120 2121 clk_disable_unprepare(priv->clk); 2122 2123 return 0; 2124 } 2125 2126 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, 2127 u64 location) 2128 { 2129 unsigned int index; 2130 u32 reg; 2131 2132 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2133 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2134 reg >>= RXCHK_BRCM_TAG_CID_SHIFT; 2135 reg &= RXCHK_BRCM_TAG_CID_MASK; 2136 if (reg == location) 2137 return index; 2138 } 2139 2140 return -EINVAL; 2141 } 2142 2143 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, 2144 struct ethtool_rxnfc *nfc) 2145 { 2146 int index; 2147 2148 /* This is not a rule that we know about */ 2149 index = bcm_sysport_rule_find(priv, nfc->fs.location); 2150 if (index < 0) 2151 return -EOPNOTSUPP; 2152 2153 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; 2154 2155 return 0; 2156 } 2157 2158 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, 2159 struct ethtool_rxnfc *nfc) 2160 { 2161 unsigned int index; 2162 u32 reg; 2163 2164 /* We cannot match locations greater than what the classification ID 2165 * permits (256 entries) 2166 */ 2167 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) 2168 return -E2BIG; 2169 2170 /* We cannot support flows that are not destined for a wake-up */ 2171 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2172 return -EOPNOTSUPP; 2173 2174 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2175 if (index >= RXCHK_BRCM_TAG_MAX) 2176 /* All filters are already in use, we cannot match more rules */ 2177 return -ENOSPC; 2178 2179 /* Location is the classification ID, and index is the position 2180 * within one of our 8 possible filters to be programmed 2181 */ 2182 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2183 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); 2184 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; 2185 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); 2186 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 2187 2188 priv->filters_loc[index] = nfc->fs.location; 2189 set_bit(index, priv->filters); 2190 2191 return 0; 2192 } 2193 2194 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, 2195 u64 location) 2196 { 2197 int index; 2198 2199 /* This is not a rule that we know about */ 2200 index = bcm_sysport_rule_find(priv, location); 2201 if (index < 0) 2202 return -EOPNOTSUPP; 2203 2204 /* No need to disable this filter if it was enabled, this will 2205 * be taken care of during suspend time by bcm_sysport_suspend_to_wol 2206 */ 2207 clear_bit(index, priv->filters); 2208 priv->filters_loc[index] = 0; 2209 2210 return 0; 2211 } 2212 2213 static int bcm_sysport_get_rxnfc(struct net_device *dev, 2214 struct ethtool_rxnfc *nfc, u32 *rule_locs) 2215 { 2216 struct bcm_sysport_priv *priv = netdev_priv(dev); 2217 int ret = -EOPNOTSUPP; 2218 2219 switch (nfc->cmd) { 2220 case ETHTOOL_GRXCLSRULE: 2221 ret = bcm_sysport_rule_get(priv, nfc); 2222 break; 2223 default: 2224 break; 2225 } 2226 2227 return ret; 2228 } 2229 2230 static int bcm_sysport_set_rxnfc(struct net_device *dev, 2231 struct ethtool_rxnfc *nfc) 2232 { 2233 struct bcm_sysport_priv *priv = netdev_priv(dev); 2234 int ret = -EOPNOTSUPP; 2235 2236 switch (nfc->cmd) { 2237 case ETHTOOL_SRXCLSRLINS: 2238 ret = bcm_sysport_rule_set(priv, nfc); 2239 break; 2240 case ETHTOOL_SRXCLSRLDEL: 2241 ret = bcm_sysport_rule_del(priv, nfc->fs.location); 2242 break; 2243 default: 2244 break; 2245 } 2246 2247 return ret; 2248 } 2249 2250 static const struct ethtool_ops bcm_sysport_ethtool_ops = { 2251 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2252 ETHTOOL_COALESCE_MAX_FRAMES | 2253 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2254 .get_drvinfo = bcm_sysport_get_drvinfo, 2255 .get_msglevel = bcm_sysport_get_msglvl, 2256 .set_msglevel = bcm_sysport_set_msglvl, 2257 .get_link = ethtool_op_get_link, 2258 .get_strings = bcm_sysport_get_strings, 2259 .get_ethtool_stats = bcm_sysport_get_stats, 2260 .get_sset_count = bcm_sysport_get_sset_count, 2261 .get_wol = bcm_sysport_get_wol, 2262 .set_wol = bcm_sysport_set_wol, 2263 .get_coalesce = bcm_sysport_get_coalesce, 2264 .set_coalesce = bcm_sysport_set_coalesce, 2265 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2266 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2267 .get_rxnfc = bcm_sysport_get_rxnfc, 2268 .set_rxnfc = bcm_sysport_set_rxnfc, 2269 }; 2270 2271 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2272 struct net_device *sb_dev) 2273 { 2274 struct bcm_sysport_priv *priv = netdev_priv(dev); 2275 u16 queue = skb_get_queue_mapping(skb); 2276 struct bcm_sysport_tx_ring *tx_ring; 2277 unsigned int q, port; 2278 2279 if (!netdev_uses_dsa(dev)) 2280 return netdev_pick_tx(dev, skb, NULL); 2281 2282 /* DSA tagging layer will have configured the correct queue */ 2283 q = BRCM_TAG_GET_QUEUE(queue); 2284 port = BRCM_TAG_GET_PORT(queue); 2285 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2286 2287 if (unlikely(!tx_ring)) 2288 return netdev_pick_tx(dev, skb, NULL); 2289 2290 return tx_ring->index; 2291 } 2292 2293 static const struct net_device_ops bcm_sysport_netdev_ops = { 2294 .ndo_start_xmit = bcm_sysport_xmit, 2295 .ndo_tx_timeout = bcm_sysport_tx_timeout, 2296 .ndo_open = bcm_sysport_open, 2297 .ndo_stop = bcm_sysport_stop, 2298 .ndo_set_features = bcm_sysport_set_features, 2299 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 2300 .ndo_set_mac_address = bcm_sysport_change_mac, 2301 #ifdef CONFIG_NET_POLL_CONTROLLER 2302 .ndo_poll_controller = bcm_sysport_poll_controller, 2303 #endif 2304 .ndo_get_stats64 = bcm_sysport_get_stats64, 2305 .ndo_select_queue = bcm_sysport_select_queue, 2306 }; 2307 2308 static int bcm_sysport_map_queues(struct net_device *dev, 2309 struct net_device *slave_dev) 2310 { 2311 struct dsa_port *dp = dsa_port_from_netdev(slave_dev); 2312 struct bcm_sysport_priv *priv = netdev_priv(dev); 2313 struct bcm_sysport_tx_ring *ring; 2314 unsigned int num_tx_queues; 2315 unsigned int q, qp, port; 2316 2317 /* We can't be setting up queue inspection for non directly attached 2318 * switches 2319 */ 2320 if (dp->ds->index) 2321 return 0; 2322 2323 port = dp->index; 2324 2325 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a 2326 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of 2327 * per-port (slave_dev) network devices queue, we achieve just that. 2328 * This need to happen now before any slave network device is used such 2329 * it accurately reflects the number of real TX queues. 2330 */ 2331 if (priv->is_lite) 2332 netif_set_real_num_tx_queues(slave_dev, 2333 slave_dev->num_tx_queues / 2); 2334 2335 num_tx_queues = slave_dev->real_num_tx_queues; 2336 2337 if (priv->per_port_num_tx_queues && 2338 priv->per_port_num_tx_queues != num_tx_queues) 2339 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2340 2341 priv->per_port_num_tx_queues = num_tx_queues; 2342 2343 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; 2344 q++) { 2345 ring = &priv->tx_rings[q]; 2346 2347 if (ring->inspect) 2348 continue; 2349 2350 /* Just remember the mapping actual programming done 2351 * during bcm_sysport_init_tx_ring 2352 */ 2353 ring->switch_queue = qp; 2354 ring->switch_port = port; 2355 ring->inspect = true; 2356 priv->ring_map[qp + port * num_tx_queues] = ring; 2357 qp++; 2358 } 2359 2360 return 0; 2361 } 2362 2363 static int bcm_sysport_unmap_queues(struct net_device *dev, 2364 struct net_device *slave_dev) 2365 { 2366 struct dsa_port *dp = dsa_port_from_netdev(slave_dev); 2367 struct bcm_sysport_priv *priv = netdev_priv(dev); 2368 struct bcm_sysport_tx_ring *ring; 2369 unsigned int num_tx_queues; 2370 unsigned int q, qp, port; 2371 2372 port = dp->index; 2373 2374 num_tx_queues = slave_dev->real_num_tx_queues; 2375 2376 for (q = 0; q < dev->num_tx_queues; q++) { 2377 ring = &priv->tx_rings[q]; 2378 2379 if (ring->switch_port != port) 2380 continue; 2381 2382 if (!ring->inspect) 2383 continue; 2384 2385 ring->inspect = false; 2386 qp = ring->switch_queue; 2387 priv->ring_map[qp + port * num_tx_queues] = NULL; 2388 } 2389 2390 return 0; 2391 } 2392 2393 static int bcm_sysport_netdevice_event(struct notifier_block *nb, 2394 unsigned long event, void *ptr) 2395 { 2396 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2397 struct netdev_notifier_changeupper_info *info = ptr; 2398 struct bcm_sysport_priv *priv; 2399 int ret = 0; 2400 2401 priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier); 2402 if (priv->netdev != dev) 2403 return NOTIFY_DONE; 2404 2405 switch (event) { 2406 case NETDEV_CHANGEUPPER: 2407 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2408 return NOTIFY_DONE; 2409 2410 if (!dsa_user_dev_check(info->upper_dev)) 2411 return NOTIFY_DONE; 2412 2413 if (info->linking) 2414 ret = bcm_sysport_map_queues(dev, info->upper_dev); 2415 else 2416 ret = bcm_sysport_unmap_queues(dev, info->upper_dev); 2417 break; 2418 } 2419 2420 return notifier_from_errno(ret); 2421 } 2422 2423 #define REV_FMT "v%2x.%02x" 2424 2425 static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 2426 [SYSTEMPORT] = { 2427 .is_lite = false, 2428 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 2429 }, 2430 [SYSTEMPORT_LITE] = { 2431 .is_lite = true, 2432 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 2433 }, 2434 }; 2435 2436 static const struct of_device_id bcm_sysport_of_match[] = { 2437 { .compatible = "brcm,systemportlite-v1.00", 2438 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 2439 { .compatible = "brcm,systemport-v1.00", 2440 .data = &bcm_sysport_params[SYSTEMPORT] }, 2441 { .compatible = "brcm,systemport", 2442 .data = &bcm_sysport_params[SYSTEMPORT] }, 2443 { /* sentinel */ } 2444 }; 2445 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2446 2447 static int bcm_sysport_probe(struct platform_device *pdev) 2448 { 2449 const struct bcm_sysport_hw_params *params; 2450 const struct of_device_id *of_id = NULL; 2451 struct bcm_sysport_priv *priv; 2452 struct device_node *dn; 2453 struct net_device *dev; 2454 u32 txq, rxq; 2455 int ret; 2456 2457 dn = pdev->dev.of_node; 2458 of_id = of_match_node(bcm_sysport_of_match, dn); 2459 if (!of_id || !of_id->data) 2460 return -EINVAL; 2461 2462 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2463 if (ret) 2464 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2465 if (ret) { 2466 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); 2467 return ret; 2468 } 2469 2470 /* Fairly quickly we need to know the type of adapter we have */ 2471 params = of_id->data; 2472 2473 /* Read the Transmit/Receive Queue properties */ 2474 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2475 txq = TDMA_NUM_RINGS; 2476 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2477 rxq = 1; 2478 2479 /* Sanity check the number of transmit queues */ 2480 if (!txq || txq > TDMA_NUM_RINGS) 2481 return -EINVAL; 2482 2483 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2484 if (!dev) 2485 return -ENOMEM; 2486 2487 /* Initialize private members */ 2488 priv = netdev_priv(dev); 2489 2490 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); 2491 if (IS_ERR(priv->clk)) { 2492 ret = PTR_ERR(priv->clk); 2493 goto err_free_netdev; 2494 } 2495 2496 /* Allocate number of TX rings */ 2497 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2498 sizeof(struct bcm_sysport_tx_ring), 2499 GFP_KERNEL); 2500 if (!priv->tx_rings) { 2501 ret = -ENOMEM; 2502 goto err_free_netdev; 2503 } 2504 2505 priv->is_lite = params->is_lite; 2506 priv->num_rx_desc_words = params->num_rx_desc_words; 2507 2508 priv->irq0 = platform_get_irq(pdev, 0); 2509 if (!priv->is_lite) { 2510 priv->irq1 = platform_get_irq(pdev, 1); 2511 priv->wol_irq = platform_get_irq_optional(pdev, 2); 2512 } else { 2513 priv->wol_irq = platform_get_irq_optional(pdev, 1); 2514 } 2515 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2516 ret = -EINVAL; 2517 goto err_free_netdev; 2518 } 2519 2520 priv->base = devm_platform_ioremap_resource(pdev, 0); 2521 if (IS_ERR(priv->base)) { 2522 ret = PTR_ERR(priv->base); 2523 goto err_free_netdev; 2524 } 2525 2526 priv->netdev = dev; 2527 priv->pdev = pdev; 2528 2529 ret = of_get_phy_mode(dn, &priv->phy_interface); 2530 /* Default to GMII interface mode */ 2531 if (ret) 2532 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2533 2534 /* In the case of a fixed PHY, the DT node associated 2535 * to the PHY is the Ethernet MAC DT node. 2536 */ 2537 if (of_phy_is_fixed_link(dn)) { 2538 ret = of_phy_register_fixed_link(dn); 2539 if (ret) { 2540 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2541 goto err_free_netdev; 2542 } 2543 2544 priv->phy_dn = dn; 2545 } 2546 2547 /* Initialize netdevice members */ 2548 ret = of_get_ethdev_address(dn, dev); 2549 if (ret) { 2550 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2551 eth_hw_addr_random(dev); 2552 } 2553 2554 SET_NETDEV_DEV(dev, &pdev->dev); 2555 dev_set_drvdata(&pdev->dev, dev); 2556 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2557 dev->netdev_ops = &bcm_sysport_netdev_ops; 2558 netif_napi_add(dev, &priv->napi, bcm_sysport_poll); 2559 2560 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2561 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2562 NETIF_F_HW_VLAN_CTAG_TX; 2563 dev->hw_features |= dev->features; 2564 dev->vlan_features |= dev->features; 2565 dev->max_mtu = UMAC_MAX_MTU_SIZE; 2566 2567 /* Request the WOL interrupt and advertise suspend if available */ 2568 priv->wol_irq_disabled = 1; 2569 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2570 bcm_sysport_wol_isr, 0, dev->name, priv); 2571 if (!ret) 2572 device_set_wakeup_capable(&pdev->dev, 1); 2573 2574 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); 2575 if (IS_ERR(priv->wol_clk)) { 2576 ret = PTR_ERR(priv->wol_clk); 2577 goto err_deregister_fixed_link; 2578 } 2579 2580 /* Set the needed headroom once and for all */ 2581 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2582 dev->needed_headroom += sizeof(struct bcm_tsb); 2583 2584 /* libphy will adjust the link state accordingly */ 2585 netif_carrier_off(dev); 2586 2587 priv->rx_max_coalesced_frames = 1; 2588 u64_stats_init(&priv->syncp); 2589 2590 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event; 2591 2592 ret = register_netdevice_notifier(&priv->netdev_notifier); 2593 if (ret) { 2594 dev_err(&pdev->dev, "failed to register DSA notifier\n"); 2595 goto err_deregister_fixed_link; 2596 } 2597 2598 ret = register_netdev(dev); 2599 if (ret) { 2600 dev_err(&pdev->dev, "failed to register net_device\n"); 2601 goto err_deregister_notifier; 2602 } 2603 2604 clk_prepare_enable(priv->clk); 2605 2606 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2607 dev_info(&pdev->dev, 2608 "Broadcom SYSTEMPORT%s " REV_FMT 2609 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2610 priv->is_lite ? " Lite" : "", 2611 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2612 priv->irq0, priv->irq1, txq, rxq); 2613 2614 clk_disable_unprepare(priv->clk); 2615 2616 return 0; 2617 2618 err_deregister_notifier: 2619 unregister_netdevice_notifier(&priv->netdev_notifier); 2620 err_deregister_fixed_link: 2621 if (of_phy_is_fixed_link(dn)) 2622 of_phy_deregister_fixed_link(dn); 2623 err_free_netdev: 2624 free_netdev(dev); 2625 return ret; 2626 } 2627 2628 static void bcm_sysport_remove(struct platform_device *pdev) 2629 { 2630 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2631 struct bcm_sysport_priv *priv = netdev_priv(dev); 2632 struct device_node *dn = pdev->dev.of_node; 2633 2634 /* Not much to do, ndo_close has been called 2635 * and we use managed allocations 2636 */ 2637 unregister_netdevice_notifier(&priv->netdev_notifier); 2638 unregister_netdev(dev); 2639 if (of_phy_is_fixed_link(dn)) 2640 of_phy_deregister_fixed_link(dn); 2641 free_netdev(dev); 2642 dev_set_drvdata(&pdev->dev, NULL); 2643 } 2644 2645 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2646 { 2647 struct net_device *ndev = priv->netdev; 2648 unsigned int timeout = 1000; 2649 unsigned int index, i = 0; 2650 u32 reg; 2651 2652 reg = umac_readl(priv, UMAC_MPD_CTRL); 2653 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2654 reg |= MPD_EN; 2655 reg &= ~PSW_EN; 2656 if (priv->wolopts & WAKE_MAGICSECURE) { 2657 /* Program the SecureOn password */ 2658 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), 2659 UMAC_PSW_MS); 2660 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), 2661 UMAC_PSW_LS); 2662 reg |= PSW_EN; 2663 } 2664 umac_writel(priv, reg, UMAC_MPD_CTRL); 2665 2666 if (priv->wolopts & WAKE_FILTER) { 2667 /* Turn on ACPI matching to steal packets from RBUF */ 2668 reg = rbuf_readl(priv, RBUF_CONTROL); 2669 if (priv->is_lite) 2670 reg |= RBUF_ACPI_EN_LITE; 2671 else 2672 reg |= RBUF_ACPI_EN; 2673 rbuf_writel(priv, reg, RBUF_CONTROL); 2674 2675 /* Enable RXCHK, active filters and Broadcom tag matching */ 2676 reg = rxchk_readl(priv, RXCHK_CONTROL); 2677 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 2678 RXCHK_BRCM_TAG_MATCH_SHIFT); 2679 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2680 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); 2681 i++; 2682 } 2683 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; 2684 rxchk_writel(priv, reg, RXCHK_CONTROL); 2685 } 2686 2687 /* Make sure RBUF entered WoL mode as result */ 2688 do { 2689 reg = rbuf_readl(priv, RBUF_STATUS); 2690 if (reg & RBUF_WOL_MODE) 2691 break; 2692 2693 udelay(10); 2694 } while (timeout-- > 0); 2695 2696 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2697 if (!timeout) { 2698 mpd_enable_set(priv, false); 2699 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2700 return -ETIMEDOUT; 2701 } 2702 2703 /* UniMAC receive needs to be turned on */ 2704 umac_enable_set(priv, CMD_RX_EN, 1); 2705 2706 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2707 2708 return 0; 2709 } 2710 2711 static int __maybe_unused bcm_sysport_suspend(struct device *d) 2712 { 2713 struct net_device *dev = dev_get_drvdata(d); 2714 struct bcm_sysport_priv *priv = netdev_priv(dev); 2715 unsigned int i; 2716 int ret = 0; 2717 u32 reg; 2718 2719 if (!netif_running(dev)) 2720 return 0; 2721 2722 netif_device_detach(dev); 2723 2724 bcm_sysport_netif_stop(dev); 2725 2726 phy_suspend(dev->phydev); 2727 2728 /* Disable UniMAC RX */ 2729 umac_enable_set(priv, CMD_RX_EN, 0); 2730 2731 ret = rdma_enable_set(priv, 0); 2732 if (ret) { 2733 netdev_err(dev, "RDMA timeout!\n"); 2734 return ret; 2735 } 2736 2737 /* Disable RXCHK if enabled */ 2738 if (priv->rx_chk_en) { 2739 reg = rxchk_readl(priv, RXCHK_CONTROL); 2740 reg &= ~RXCHK_EN; 2741 rxchk_writel(priv, reg, RXCHK_CONTROL); 2742 } 2743 2744 /* Flush RX pipe */ 2745 if (!priv->wolopts) 2746 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2747 2748 ret = tdma_enable_set(priv, 0); 2749 if (ret) { 2750 netdev_err(dev, "TDMA timeout!\n"); 2751 return ret; 2752 } 2753 2754 /* Wait for a packet boundary */ 2755 usleep_range(2000, 3000); 2756 2757 umac_enable_set(priv, CMD_TX_EN, 0); 2758 2759 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2760 2761 /* Free RX/TX rings SW structures */ 2762 for (i = 0; i < dev->num_tx_queues; i++) 2763 bcm_sysport_fini_tx_ring(priv, i); 2764 bcm_sysport_fini_rx_ring(priv); 2765 2766 /* Get prepared for Wake-on-LAN */ 2767 if (device_may_wakeup(d) && priv->wolopts) { 2768 clk_prepare_enable(priv->wol_clk); 2769 ret = bcm_sysport_suspend_to_wol(priv); 2770 } 2771 2772 clk_disable_unprepare(priv->clk); 2773 2774 return ret; 2775 } 2776 2777 static int __maybe_unused bcm_sysport_resume(struct device *d) 2778 { 2779 struct net_device *dev = dev_get_drvdata(d); 2780 struct bcm_sysport_priv *priv = netdev_priv(dev); 2781 unsigned int i; 2782 int ret; 2783 2784 if (!netif_running(dev)) 2785 return 0; 2786 2787 clk_prepare_enable(priv->clk); 2788 if (priv->wolopts) 2789 clk_disable_unprepare(priv->wol_clk); 2790 2791 umac_reset(priv); 2792 2793 /* Disable the UniMAC RX/TX */ 2794 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 2795 2796 /* We may have been suspended and never received a WOL event that 2797 * would turn off MPD detection, take care of that now 2798 */ 2799 bcm_sysport_resume_from_wol(priv); 2800 2801 /* Initialize both hardware and software ring */ 2802 for (i = 0; i < dev->num_tx_queues; i++) { 2803 ret = bcm_sysport_init_tx_ring(priv, i); 2804 if (ret) { 2805 netdev_err(dev, "failed to initialize TX ring %d\n", 2806 i); 2807 goto out_free_tx_rings; 2808 } 2809 } 2810 2811 /* Initialize linked-list */ 2812 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2813 2814 /* Initialize RX ring */ 2815 ret = bcm_sysport_init_rx_ring(priv); 2816 if (ret) { 2817 netdev_err(dev, "failed to initialize RX ring\n"); 2818 goto out_free_rx_ring; 2819 } 2820 2821 /* RX pipe enable */ 2822 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2823 2824 ret = rdma_enable_set(priv, 1); 2825 if (ret) { 2826 netdev_err(dev, "failed to enable RDMA\n"); 2827 goto out_free_rx_ring; 2828 } 2829 2830 /* Restore enabled features */ 2831 bcm_sysport_set_features(dev, dev->features); 2832 2833 rbuf_init(priv); 2834 2835 /* Set maximum frame length */ 2836 if (!priv->is_lite) 2837 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2838 else 2839 gib_set_pad_extension(priv); 2840 2841 /* Set MAC address */ 2842 umac_set_hw_addr(priv, dev->dev_addr); 2843 2844 umac_enable_set(priv, CMD_RX_EN, 1); 2845 2846 /* TX pipe enable */ 2847 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2848 2849 umac_enable_set(priv, CMD_TX_EN, 1); 2850 2851 ret = tdma_enable_set(priv, 1); 2852 if (ret) { 2853 netdev_err(dev, "TDMA timeout!\n"); 2854 goto out_free_rx_ring; 2855 } 2856 2857 phy_resume(dev->phydev); 2858 2859 bcm_sysport_netif_start(dev); 2860 2861 netif_device_attach(dev); 2862 2863 return 0; 2864 2865 out_free_rx_ring: 2866 bcm_sysport_fini_rx_ring(priv); 2867 out_free_tx_rings: 2868 for (i = 0; i < dev->num_tx_queues; i++) 2869 bcm_sysport_fini_tx_ring(priv, i); 2870 clk_disable_unprepare(priv->clk); 2871 return ret; 2872 } 2873 2874 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2875 bcm_sysport_suspend, bcm_sysport_resume); 2876 2877 static struct platform_driver bcm_sysport_driver = { 2878 .probe = bcm_sysport_probe, 2879 .remove = bcm_sysport_remove, 2880 .driver = { 2881 .name = "brcm-systemport", 2882 .of_match_table = bcm_sysport_of_match, 2883 .pm = &bcm_sysport_pm_ops, 2884 }, 2885 }; 2886 module_platform_driver(bcm_sysport_driver); 2887 2888 MODULE_AUTHOR("Broadcom Corporation"); 2889 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2890 MODULE_ALIAS("platform:brcm-systemport"); 2891 MODULE_LICENSE("GPL"); 2892