1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020 NovaTech LLC 4 * George McCollister <george.mccollister@gmail.com> 5 */ 6 7 #include <net/dsa.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_bridge.h> 10 #include <linux/of_device.h> 11 #include <linux/netdev_features.h> 12 #include <linux/if_hsr.h> 13 #include "xrs700x.h" 14 #include "xrs700x_reg.h" 15 16 #define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000) 17 18 #define XRS7000X_SUPPORTED_HSR_FEATURES \ 19 (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \ 20 NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP) 21 22 #define XRS7003E_ID 0x100 23 #define XRS7003F_ID 0x101 24 #define XRS7004E_ID 0x200 25 #define XRS7004F_ID 0x201 26 27 const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3}; 28 EXPORT_SYMBOL(xrs7003e_info); 29 30 const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3}; 31 EXPORT_SYMBOL(xrs7003f_info); 32 33 const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4}; 34 EXPORT_SYMBOL(xrs7004e_info); 35 36 const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4}; 37 EXPORT_SYMBOL(xrs7004f_info); 38 39 struct xrs700x_regfield { 40 struct reg_field rf; 41 struct regmap_field **rmf; 42 }; 43 44 struct xrs700x_mib { 45 unsigned int offset; 46 const char *name; 47 int stats64_offset; 48 }; 49 50 #define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1} 51 #define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)} 52 53 static const struct xrs700x_mib xrs700x_mibs[] = { 54 XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes), 55 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"), 56 XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets), 57 XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets), 58 XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast), 59 XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors), 60 XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors), 61 XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors), 62 XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors), 63 XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors), 64 XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors), 65 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"), 66 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"), 67 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"), 68 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"), 69 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"), 70 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"), 71 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"), 72 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"), 73 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"), 74 XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes), 75 XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets), 76 XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets), 77 XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets), 78 XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"), 79 XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped), 80 XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped), 81 }; 82 83 static const u8 eth_hsrsup_addr[ETH_ALEN] = { 84 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00}; 85 86 static void xrs700x_get_strings(struct dsa_switch *ds, int port, 87 u32 stringset, u8 *data) 88 { 89 int i; 90 91 if (stringset != ETH_SS_STATS) 92 return; 93 94 for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) { 95 strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN); 96 data += ETH_GSTRING_LEN; 97 } 98 } 99 100 static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset) 101 { 102 if (sset != ETH_SS_STATS) 103 return -EOPNOTSUPP; 104 105 return ARRAY_SIZE(xrs700x_mibs); 106 } 107 108 static void xrs700x_read_port_counters(struct xrs700x *priv, int port) 109 { 110 struct xrs700x_port *p = &priv->ports[port]; 111 struct rtnl_link_stats64 stats; 112 int i; 113 114 memset(&stats, 0, sizeof(stats)); 115 116 mutex_lock(&p->mib_mutex); 117 118 /* Capture counter values */ 119 regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1); 120 121 for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) { 122 unsigned int high = 0, low = 0, reg; 123 124 reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port; 125 regmap_read(priv->regmap, reg, &low); 126 regmap_read(priv->regmap, reg + 2, &high); 127 128 p->mib_data[i] += (high << 16) | low; 129 130 if (xrs700x_mibs[i].stats64_offset >= 0) { 131 u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset; 132 *(u64 *)s += p->mib_data[i]; 133 } 134 } 135 136 /* multicast must be added to rx_packets (which already includes 137 * unicast and broadcast) 138 */ 139 stats.rx_packets += stats.multicast; 140 141 u64_stats_update_begin(&p->syncp); 142 p->stats64 = stats; 143 u64_stats_update_end(&p->syncp); 144 145 mutex_unlock(&p->mib_mutex); 146 } 147 148 static void xrs700x_mib_work(struct work_struct *work) 149 { 150 struct xrs700x *priv = container_of(work, struct xrs700x, 151 mib_work.work); 152 int i; 153 154 for (i = 0; i < priv->ds->num_ports; i++) 155 xrs700x_read_port_counters(priv, i); 156 157 schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL); 158 } 159 160 static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port, 161 u64 *data) 162 { 163 struct xrs700x *priv = ds->priv; 164 struct xrs700x_port *p = &priv->ports[port]; 165 166 xrs700x_read_port_counters(priv, port); 167 168 mutex_lock(&p->mib_mutex); 169 memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs)); 170 mutex_unlock(&p->mib_mutex); 171 } 172 173 static void xrs700x_get_stats64(struct dsa_switch *ds, int port, 174 struct rtnl_link_stats64 *s) 175 { 176 struct xrs700x *priv = ds->priv; 177 struct xrs700x_port *p = &priv->ports[port]; 178 unsigned int start; 179 180 do { 181 start = u64_stats_fetch_begin(&p->syncp); 182 *s = p->stats64; 183 } while (u64_stats_fetch_retry(&p->syncp, start)); 184 } 185 186 static int xrs700x_setup_regmap_range(struct xrs700x *priv) 187 { 188 struct xrs700x_regfield regfields[] = { 189 { 190 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1, 191 priv->ds->num_ports, 192 XRS_PORT_OFFSET), 193 .rmf = &priv->ps_forward 194 }, 195 { 196 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3, 197 priv->ds->num_ports, 198 XRS_PORT_OFFSET), 199 .rmf = &priv->ps_management 200 }, 201 { 202 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9, 203 priv->ds->num_ports, 204 XRS_PORT_OFFSET), 205 .rmf = &priv->ps_sel_speed 206 }, 207 { 208 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11, 209 priv->ds->num_ports, 210 XRS_PORT_OFFSET), 211 .rmf = &priv->ps_cur_speed 212 } 213 }; 214 int i = 0; 215 216 for (; i < ARRAY_SIZE(regfields); i++) { 217 *regfields[i].rmf = devm_regmap_field_alloc(priv->dev, 218 priv->regmap, 219 regfields[i].rf); 220 if (IS_ERR(*regfields[i].rmf)) 221 return PTR_ERR(*regfields[i].rmf); 222 } 223 224 return 0; 225 } 226 227 static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds, 228 int port, 229 enum dsa_tag_protocol m) 230 { 231 return DSA_TAG_PROTO_XRS700X; 232 } 233 234 static int xrs700x_reset(struct dsa_switch *ds) 235 { 236 struct xrs700x *priv = ds->priv; 237 unsigned int val; 238 int ret; 239 240 ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET); 241 if (ret) 242 goto error; 243 244 ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL, 245 val, !(val & XRS_GENERAL_RESET), 246 10, 1000); 247 error: 248 if (ret) { 249 dev_err_ratelimited(priv->dev, "error resetting switch: %d\n", 250 ret); 251 } 252 253 return ret; 254 } 255 256 static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port, 257 u8 state) 258 { 259 struct xrs700x *priv = ds->priv; 260 unsigned int bpdus = 1; 261 unsigned int val; 262 263 switch (state) { 264 case BR_STATE_DISABLED: 265 bpdus = 0; 266 fallthrough; 267 case BR_STATE_BLOCKING: 268 case BR_STATE_LISTENING: 269 val = XRS_PORT_DISABLED; 270 break; 271 case BR_STATE_LEARNING: 272 val = XRS_PORT_LEARNING; 273 break; 274 case BR_STATE_FORWARDING: 275 val = XRS_PORT_FORWARDING; 276 break; 277 default: 278 dev_err(ds->dev, "invalid STP state: %d\n", state); 279 return; 280 } 281 282 regmap_fields_write(priv->ps_forward, port, val); 283 284 /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf() 285 * which allows BPDU forwarding to the CPU port when the front facing 286 * port is in disabled/learning state. 287 */ 288 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus); 289 290 dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n", 291 __func__, port, state, val); 292 } 293 294 /* Add an inbound policy filter which matches the BPDU destination MAC 295 * and forwards to the CPU port. Leave the policy disabled, it will be 296 * enabled as needed. 297 */ 298 static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port) 299 { 300 struct xrs700x *priv = ds->priv; 301 unsigned int val = 0; 302 int i = 0; 303 int ret; 304 305 /* Compare all 48 bits of the destination MAC address. */ 306 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2); 307 if (ret) 308 return ret; 309 310 /* match BPDU destination 01:80:c2:00:00:00 */ 311 for (i = 0; i < sizeof(eth_stp_addr); i += 2) { 312 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i, 313 eth_stp_addr[i] | 314 (eth_stp_addr[i + 1] << 8)); 315 if (ret) 316 return ret; 317 } 318 319 /* Mirror BPDU to CPU port */ 320 for (i = 0; i < ds->num_ports; i++) { 321 if (dsa_is_cpu_port(ds, i)) 322 val |= BIT(i); 323 } 324 325 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val); 326 if (ret) 327 return ret; 328 329 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0); 330 if (ret) 331 return ret; 332 333 return 0; 334 } 335 336 /* Add an inbound policy filter which matches the HSR/PRP supervision MAC 337 * range and forwards to the CPU port without discarding duplicates. 338 * This is required to correctly populate the HSR/PRP node_table. 339 * Leave the policy disabled, it will be enabled as needed. 340 */ 341 static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port, 342 int fwdport) 343 { 344 struct xrs700x *priv = ds->priv; 345 unsigned int val = 0; 346 int i = 0; 347 int ret; 348 349 /* Compare 40 bits of the destination MAC address. */ 350 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2); 351 if (ret) 352 return ret; 353 354 /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */ 355 for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) { 356 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i, 357 eth_hsrsup_addr[i] | 358 (eth_hsrsup_addr[i + 1] << 8)); 359 if (ret) 360 return ret; 361 } 362 363 /* Mirror HSR/PRP supervision to CPU port */ 364 for (i = 0; i < ds->num_ports; i++) { 365 if (dsa_is_cpu_port(ds, i)) 366 val |= BIT(i); 367 } 368 369 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val); 370 if (ret) 371 return ret; 372 373 if (fwdport >= 0) 374 val |= BIT(fwdport); 375 376 /* Allow must be set prevent duplicate discard */ 377 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val); 378 if (ret) 379 return ret; 380 381 return 0; 382 } 383 384 static int xrs700x_port_setup(struct dsa_switch *ds, int port) 385 { 386 bool cpu_port = dsa_is_cpu_port(ds, port); 387 struct xrs700x *priv = ds->priv; 388 unsigned int val = 0; 389 int ret, i; 390 391 xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED); 392 393 /* Disable forwarding to non-CPU ports */ 394 for (i = 0; i < ds->num_ports; i++) { 395 if (!dsa_is_cpu_port(ds, i)) 396 val |= BIT(i); 397 } 398 399 /* 1 = Disable forwarding to the port */ 400 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val); 401 if (ret) 402 return ret; 403 404 val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL; 405 ret = regmap_fields_write(priv->ps_management, port, val); 406 if (ret) 407 return ret; 408 409 if (!cpu_port) { 410 ret = xrs700x_port_add_bpdu_ipf(ds, port); 411 if (ret) 412 return ret; 413 } 414 415 return 0; 416 } 417 418 static int xrs700x_setup(struct dsa_switch *ds) 419 { 420 struct xrs700x *priv = ds->priv; 421 int ret, i; 422 423 ret = xrs700x_reset(ds); 424 if (ret) 425 return ret; 426 427 for (i = 0; i < ds->num_ports; i++) { 428 ret = xrs700x_port_setup(ds, i); 429 if (ret) 430 return ret; 431 } 432 433 schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL); 434 435 return 0; 436 } 437 438 static void xrs700x_teardown(struct dsa_switch *ds) 439 { 440 struct xrs700x *priv = ds->priv; 441 442 cancel_delayed_work_sync(&priv->mib_work); 443 } 444 445 static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port, 446 struct phylink_config *config) 447 { 448 switch (port) { 449 case 0: 450 __set_bit(PHY_INTERFACE_MODE_RMII, 451 config->supported_interfaces); 452 config->mac_capabilities = MAC_10FD | MAC_100FD; 453 break; 454 455 case 1: 456 case 2: 457 case 3: 458 phy_interface_set_rgmii(config->supported_interfaces); 459 config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD; 460 break; 461 462 default: 463 dev_err(ds->dev, "Unsupported port: %i\n", port); 464 break; 465 } 466 } 467 468 static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, 469 unsigned int mode, phy_interface_t interface, 470 struct phy_device *phydev, 471 int speed, int duplex, 472 bool tx_pause, bool rx_pause) 473 { 474 struct xrs700x *priv = ds->priv; 475 unsigned int val; 476 477 switch (speed) { 478 case SPEED_1000: 479 val = XRS_PORT_SPEED_1000; 480 break; 481 case SPEED_100: 482 val = XRS_PORT_SPEED_100; 483 break; 484 case SPEED_10: 485 val = XRS_PORT_SPEED_10; 486 break; 487 default: 488 return; 489 } 490 491 regmap_fields_write(priv->ps_sel_speed, port, val); 492 493 dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n", 494 __func__, port, mode, speed); 495 } 496 497 static int xrs700x_bridge_common(struct dsa_switch *ds, int port, 498 struct dsa_bridge bridge, bool join) 499 { 500 unsigned int i, cpu_mask = 0, mask = 0; 501 struct xrs700x *priv = ds->priv; 502 int ret; 503 504 for (i = 0; i < ds->num_ports; i++) { 505 if (dsa_is_cpu_port(ds, i)) 506 continue; 507 508 cpu_mask |= BIT(i); 509 510 if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 511 continue; 512 513 mask |= BIT(i); 514 } 515 516 for (i = 0; i < ds->num_ports; i++) { 517 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 518 continue; 519 520 /* 1 = Disable forwarding to the port */ 521 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask); 522 if (ret) 523 return ret; 524 } 525 526 if (!join) { 527 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), 528 cpu_mask); 529 if (ret) 530 return ret; 531 } 532 533 return 0; 534 } 535 536 static int xrs700x_bridge_join(struct dsa_switch *ds, int port, 537 struct dsa_bridge bridge, bool *tx_fwd_offload) 538 { 539 return xrs700x_bridge_common(ds, port, bridge, true); 540 } 541 542 static void xrs700x_bridge_leave(struct dsa_switch *ds, int port, 543 struct dsa_bridge bridge) 544 { 545 xrs700x_bridge_common(ds, port, bridge, false); 546 } 547 548 static int xrs700x_hsr_join(struct dsa_switch *ds, int port, 549 struct net_device *hsr) 550 { 551 unsigned int val = XRS_HSR_CFG_HSR_PRP; 552 struct dsa_port *partner = NULL, *dp; 553 struct xrs700x *priv = ds->priv; 554 struct net_device *slave; 555 int ret, i, hsr_pair[2]; 556 enum hsr_version ver; 557 bool fwd = false; 558 559 ret = hsr_get_version(hsr, &ver); 560 if (ret) 561 return ret; 562 563 /* Only ports 1 and 2 can be HSR/PRP redundant ports. */ 564 if (port != 1 && port != 2) 565 return -EOPNOTSUPP; 566 567 if (ver == HSR_V1) 568 val |= XRS_HSR_CFG_HSR; 569 else if (ver == PRP_V1) 570 val |= XRS_HSR_CFG_PRP; 571 else 572 return -EOPNOTSUPP; 573 574 dsa_hsr_foreach_port(dp, ds, hsr) { 575 if (dp->index != port) { 576 partner = dp; 577 break; 578 } 579 } 580 581 /* We can't enable redundancy on the switch until both 582 * redundant ports have signed up. 583 */ 584 if (!partner) 585 return 0; 586 587 regmap_fields_write(priv->ps_forward, partner->index, 588 XRS_PORT_DISABLED); 589 regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED); 590 591 regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 592 val | XRS_HSR_CFG_LANID_A); 593 regmap_write(priv->regmap, XRS_HSR_CFG(port), 594 val | XRS_HSR_CFG_LANID_B); 595 596 /* Clear bits for both redundant ports (HSR only) and the CPU port to 597 * enable forwarding. 598 */ 599 val = GENMASK(ds->num_ports - 1, 0); 600 if (ver == HSR_V1) { 601 val &= ~BIT(partner->index); 602 val &= ~BIT(port); 603 fwd = true; 604 } 605 val &= ~BIT(dsa_upstream_port(ds, port)); 606 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val); 607 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val); 608 609 regmap_fields_write(priv->ps_forward, partner->index, 610 XRS_PORT_FORWARDING); 611 regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING); 612 613 /* Enable inbound policy which allows HSR/PRP supervision forwarding 614 * to the CPU port without discarding duplicates. Continue to 615 * forward to redundant ports when in HSR mode while discarding 616 * duplicates. 617 */ 618 ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1); 619 if (ret) 620 return ret; 621 622 ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1); 623 if (ret) 624 return ret; 625 626 regmap_update_bits(priv->regmap, 627 XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1); 628 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1); 629 630 hsr_pair[0] = port; 631 hsr_pair[1] = partner->index; 632 for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { 633 slave = dsa_to_port(ds, hsr_pair[i])->slave; 634 slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES; 635 } 636 637 return 0; 638 } 639 640 static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, 641 struct net_device *hsr) 642 { 643 struct dsa_port *partner = NULL, *dp; 644 struct xrs700x *priv = ds->priv; 645 struct net_device *slave; 646 int i, hsr_pair[2]; 647 unsigned int val; 648 649 dsa_hsr_foreach_port(dp, ds, hsr) { 650 if (dp->index != port) { 651 partner = dp; 652 break; 653 } 654 } 655 656 if (!partner) 657 return 0; 658 659 regmap_fields_write(priv->ps_forward, partner->index, 660 XRS_PORT_DISABLED); 661 regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED); 662 663 regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0); 664 regmap_write(priv->regmap, XRS_HSR_CFG(port), 0); 665 666 /* Clear bit for the CPU port to enable forwarding. */ 667 val = GENMASK(ds->num_ports - 1, 0); 668 val &= ~BIT(dsa_upstream_port(ds, port)); 669 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val); 670 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val); 671 672 regmap_fields_write(priv->ps_forward, partner->index, 673 XRS_PORT_FORWARDING); 674 regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING); 675 676 /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf() 677 * which allows HSR/PRP supervision forwarding to the CPU port without 678 * discarding duplicates. 679 */ 680 regmap_update_bits(priv->regmap, 681 XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0); 682 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0); 683 684 hsr_pair[0] = port; 685 hsr_pair[1] = partner->index; 686 for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { 687 slave = dsa_to_port(ds, hsr_pair[i])->slave; 688 slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES; 689 } 690 691 return 0; 692 } 693 694 static const struct dsa_switch_ops xrs700x_ops = { 695 .get_tag_protocol = xrs700x_get_tag_protocol, 696 .setup = xrs700x_setup, 697 .teardown = xrs700x_teardown, 698 .port_stp_state_set = xrs700x_port_stp_state_set, 699 .phylink_get_caps = xrs700x_phylink_get_caps, 700 .phylink_mac_link_up = xrs700x_mac_link_up, 701 .get_strings = xrs700x_get_strings, 702 .get_sset_count = xrs700x_get_sset_count, 703 .get_ethtool_stats = xrs700x_get_ethtool_stats, 704 .get_stats64 = xrs700x_get_stats64, 705 .port_bridge_join = xrs700x_bridge_join, 706 .port_bridge_leave = xrs700x_bridge_leave, 707 .port_hsr_join = xrs700x_hsr_join, 708 .port_hsr_leave = xrs700x_hsr_leave, 709 }; 710 711 static int xrs700x_detect(struct xrs700x *priv) 712 { 713 const struct xrs700x_info *info; 714 unsigned int id; 715 int ret; 716 717 ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id); 718 if (ret) { 719 dev_err(priv->dev, "error %d while reading switch id.\n", 720 ret); 721 return ret; 722 } 723 724 info = of_device_get_match_data(priv->dev); 725 if (!info) 726 return -EINVAL; 727 728 if (info->id == id) { 729 priv->ds->num_ports = info->num_ports; 730 dev_info(priv->dev, "%s detected.\n", info->name); 731 return 0; 732 } 733 734 dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n", 735 info->id, id); 736 737 return -ENODEV; 738 } 739 740 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv) 741 { 742 struct dsa_switch *ds; 743 struct xrs700x *priv; 744 745 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 746 if (!ds) 747 return NULL; 748 749 ds->dev = base; 750 751 priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL); 752 if (!priv) 753 return NULL; 754 755 INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work); 756 757 ds->ops = &xrs700x_ops; 758 ds->priv = priv; 759 priv->dev = base; 760 761 priv->ds = ds; 762 priv->priv = devpriv; 763 764 return priv; 765 } 766 EXPORT_SYMBOL(xrs700x_switch_alloc); 767 768 static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port) 769 { 770 struct xrs700x_port *p = &priv->ports[port]; 771 772 p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs), 773 sizeof(*p->mib_data), GFP_KERNEL); 774 if (!p->mib_data) 775 return -ENOMEM; 776 777 mutex_init(&p->mib_mutex); 778 u64_stats_init(&p->syncp); 779 780 return 0; 781 } 782 783 int xrs700x_switch_register(struct xrs700x *priv) 784 { 785 int ret; 786 int i; 787 788 ret = xrs700x_detect(priv); 789 if (ret) 790 return ret; 791 792 ret = xrs700x_setup_regmap_range(priv); 793 if (ret) 794 return ret; 795 796 priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports, 797 sizeof(*priv->ports), GFP_KERNEL); 798 if (!priv->ports) 799 return -ENOMEM; 800 801 for (i = 0; i < priv->ds->num_ports; i++) { 802 ret = xrs700x_alloc_port_mib(priv, i); 803 if (ret) 804 return ret; 805 } 806 807 return dsa_register_switch(priv->ds); 808 } 809 EXPORT_SYMBOL(xrs700x_switch_register); 810 811 void xrs700x_switch_remove(struct xrs700x *priv) 812 { 813 dsa_unregister_switch(priv->ds); 814 } 815 EXPORT_SYMBOL(xrs700x_switch_remove); 816 817 void xrs700x_switch_shutdown(struct xrs700x *priv) 818 { 819 dsa_switch_shutdown(priv->ds); 820 } 821 EXPORT_SYMBOL(xrs700x_switch_shutdown); 822 823 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>"); 824 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver"); 825 MODULE_LICENSE("GPL v2"); 826