1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 26 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 27 unsigned int startup_delay) 28 { 29 gpiod_set_value_cansleep(gpio, 1); 30 /* Wait for minimum reset pulse length */ 31 msleep(pulse_len); 32 gpiod_set_value_cansleep(gpio, 0); 33 /* Wait until chip is ready after reset */ 34 msleep(startup_delay); 35 } 36 37 static void 38 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 39 int from, int to, bool allow) 40 { 41 if (allow) { 42 l2_fwd[from].bc_domain |= BIT(to); 43 l2_fwd[from].reach_port |= BIT(to); 44 l2_fwd[from].fl_domain |= BIT(to); 45 } else { 46 l2_fwd[from].bc_domain &= ~BIT(to); 47 l2_fwd[from].reach_port &= ~BIT(to); 48 l2_fwd[from].fl_domain &= ~BIT(to); 49 } 50 } 51 52 /* Structure used to temporarily transport device tree 53 * settings into sja1105_setup 54 */ 55 struct sja1105_dt_port { 56 phy_interface_t phy_mode; 57 sja1105_mii_role_t role; 58 }; 59 60 static int sja1105_init_mac_settings(struct sja1105_private *priv) 61 { 62 struct sja1105_mac_config_entry default_mac = { 63 /* Enable all 8 priority queues on egress. 64 * Every queue i holds top[i] - base[i] frames. 65 * Sum of top[i] - base[i] is 511 (max hardware limit). 66 */ 67 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 68 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 69 .enabled = {true, true, true, true, true, true, true, true}, 70 /* Keep standard IFG of 12 bytes on egress. */ 71 .ifg = 0, 72 /* Always put the MAC speed in automatic mode, where it can be 73 * retrieved from the PHY object through phylib and 74 * sja1105_adjust_port_config. 75 */ 76 .speed = SJA1105_SPEED_AUTO, 77 /* No static correction for 1-step 1588 events */ 78 .tp_delin = 0, 79 .tp_delout = 0, 80 /* Disable aging for critical TTEthernet traffic */ 81 .maxage = 0xFF, 82 /* Internal VLAN (pvid) to apply to untagged ingress */ 83 .vlanprio = 0, 84 .vlanid = 0, 85 .ing_mirr = false, 86 .egr_mirr = false, 87 /* Don't drop traffic with other EtherType than ETH_P_IP */ 88 .drpnona664 = false, 89 /* Don't drop double-tagged traffic */ 90 .drpdtag = false, 91 /* Don't drop untagged traffic */ 92 .drpuntag = false, 93 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 94 .retag = false, 95 /* Disable learning and I/O on user ports by default - 96 * STP will enable it. 97 */ 98 .dyn_learn = false, 99 .egress = false, 100 .ingress = false, 101 }; 102 struct sja1105_mac_config_entry *mac; 103 struct sja1105_table *table; 104 int i; 105 106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 107 108 /* Discard previous MAC Configuration Table */ 109 if (table->entry_count) { 110 kfree(table->entries); 111 table->entry_count = 0; 112 } 113 114 table->entries = kcalloc(SJA1105_NUM_PORTS, 115 table->ops->unpacked_entry_size, GFP_KERNEL); 116 if (!table->entries) 117 return -ENOMEM; 118 119 /* Override table based on phylib DT bindings */ 120 table->entry_count = SJA1105_NUM_PORTS; 121 122 mac = table->entries; 123 124 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 125 mac[i] = default_mac; 126 if (i == dsa_upstream_port(priv->ds, i)) { 127 /* STP doesn't get called for CPU port, so we need to 128 * set the I/O parameters statically. 129 */ 130 mac[i].dyn_learn = true; 131 mac[i].ingress = true; 132 mac[i].egress = true; 133 } 134 } 135 136 return 0; 137 } 138 139 static int sja1105_init_mii_settings(struct sja1105_private *priv, 140 struct sja1105_dt_port *ports) 141 { 142 struct device *dev = &priv->spidev->dev; 143 struct sja1105_xmii_params_entry *mii; 144 struct sja1105_table *table; 145 int i; 146 147 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 148 149 /* Discard previous xMII Mode Parameters Table */ 150 if (table->entry_count) { 151 kfree(table->entries); 152 table->entry_count = 0; 153 } 154 155 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 156 table->ops->unpacked_entry_size, GFP_KERNEL); 157 if (!table->entries) 158 return -ENOMEM; 159 160 /* Override table based on phylib DT bindings */ 161 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 162 163 mii = table->entries; 164 165 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 166 switch (ports[i].phy_mode) { 167 case PHY_INTERFACE_MODE_MII: 168 mii->xmii_mode[i] = XMII_MODE_MII; 169 break; 170 case PHY_INTERFACE_MODE_RMII: 171 mii->xmii_mode[i] = XMII_MODE_RMII; 172 break; 173 case PHY_INTERFACE_MODE_RGMII: 174 case PHY_INTERFACE_MODE_RGMII_ID: 175 case PHY_INTERFACE_MODE_RGMII_RXID: 176 case PHY_INTERFACE_MODE_RGMII_TXID: 177 mii->xmii_mode[i] = XMII_MODE_RGMII; 178 break; 179 default: 180 dev_err(dev, "Unsupported PHY mode %s!\n", 181 phy_modes(ports[i].phy_mode)); 182 } 183 184 mii->phy_mac[i] = ports[i].role; 185 } 186 return 0; 187 } 188 189 static int sja1105_init_static_fdb(struct sja1105_private *priv) 190 { 191 struct sja1105_table *table; 192 193 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 194 195 /* We only populate the FDB table through dynamic 196 * L2 Address Lookup entries 197 */ 198 if (table->entry_count) { 199 kfree(table->entries); 200 table->entry_count = 0; 201 } 202 return 0; 203 } 204 205 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 206 { 207 struct sja1105_table *table; 208 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 209 /* Learned FDB entries are forgotten after 300 seconds */ 210 .maxage = SJA1105_AGEING_TIME_MS(300000), 211 /* All entries within a FDB bin are available for learning */ 212 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 213 /* And the P/Q/R/S equivalent setting: */ 214 .start_dynspc = 0, 215 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 216 .poly = 0x97, 217 /* This selects between Independent VLAN Learning (IVL) and 218 * Shared VLAN Learning (SVL) 219 */ 220 .shared_learn = false, 221 /* Don't discard management traffic based on ENFPORT - 222 * we don't perform SMAC port enforcement anyway, so 223 * what we are setting here doesn't matter. 224 */ 225 .no_enf_hostprt = false, 226 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 227 * Maybe correlate with no_linklocal_learn from bridge driver? 228 */ 229 .no_mgmt_learn = true, 230 /* P/Q/R/S only */ 231 .use_static = true, 232 /* Dynamically learned FDB entries can overwrite other (older) 233 * dynamic FDB entries 234 */ 235 .owr_dyn = true, 236 .drpnolearn = true, 237 }; 238 239 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 240 241 if (table->entry_count) { 242 kfree(table->entries); 243 table->entry_count = 0; 244 } 245 246 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 247 table->ops->unpacked_entry_size, GFP_KERNEL); 248 if (!table->entries) 249 return -ENOMEM; 250 251 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 252 253 /* This table only has a single entry */ 254 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 255 default_l2_lookup_params; 256 257 return 0; 258 } 259 260 static int sja1105_init_static_vlan(struct sja1105_private *priv) 261 { 262 struct sja1105_table *table; 263 struct sja1105_vlan_lookup_entry pvid = { 264 .ving_mirr = 0, 265 .vegr_mirr = 0, 266 .vmemb_port = 0, 267 .vlan_bc = 0, 268 .tag_port = 0, 269 .vlanid = 0, 270 }; 271 int i; 272 273 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 274 275 /* The static VLAN table will only contain the initial pvid of 0. 276 * All other VLANs are to be configured through dynamic entries, 277 * and kept in the static configuration table as backing memory. 278 * The pvid of 0 is sufficient to pass traffic while the ports are 279 * standalone and when vlan_filtering is disabled. When filtering 280 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets 281 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge 282 * vlan' even when vlan_filtering is off, but it has no effect. 283 */ 284 if (table->entry_count) { 285 kfree(table->entries); 286 table->entry_count = 0; 287 } 288 289 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 290 GFP_KERNEL); 291 if (!table->entries) 292 return -ENOMEM; 293 294 table->entry_count = 1; 295 296 /* VLAN ID 0: all DT-defined ports are members; no restrictions on 297 * forwarding; always transmit priority-tagged frames as untagged. 298 */ 299 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 300 pvid.vmemb_port |= BIT(i); 301 pvid.vlan_bc |= BIT(i); 302 pvid.tag_port &= ~BIT(i); 303 } 304 305 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 306 return 0; 307 } 308 309 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 310 { 311 struct sja1105_l2_forwarding_entry *l2fwd; 312 struct sja1105_table *table; 313 int i, j; 314 315 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 316 317 if (table->entry_count) { 318 kfree(table->entries); 319 table->entry_count = 0; 320 } 321 322 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 323 table->ops->unpacked_entry_size, GFP_KERNEL); 324 if (!table->entries) 325 return -ENOMEM; 326 327 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 328 329 l2fwd = table->entries; 330 331 /* First 5 entries define the forwarding rules */ 332 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 333 unsigned int upstream = dsa_upstream_port(priv->ds, i); 334 335 for (j = 0; j < SJA1105_NUM_TC; j++) 336 l2fwd[i].vlan_pmap[j] = j; 337 338 if (i == upstream) 339 continue; 340 341 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 342 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 343 } 344 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 345 * Create a one-to-one mapping. 346 */ 347 for (i = 0; i < SJA1105_NUM_TC; i++) 348 for (j = 0; j < SJA1105_NUM_PORTS; j++) 349 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 350 351 return 0; 352 } 353 354 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 355 { 356 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 357 /* Disallow dynamic reconfiguration of vlan_pmap */ 358 .max_dynp = 0, 359 /* Use a single memory partition for all ingress queues */ 360 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 361 }; 362 struct sja1105_table *table; 363 364 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 365 366 if (table->entry_count) { 367 kfree(table->entries); 368 table->entry_count = 0; 369 } 370 371 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 372 table->ops->unpacked_entry_size, GFP_KERNEL); 373 if (!table->entries) 374 return -ENOMEM; 375 376 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 377 378 /* This table only has a single entry */ 379 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 380 default_l2fwd_params; 381 382 return 0; 383 } 384 385 static int sja1105_init_general_params(struct sja1105_private *priv) 386 { 387 struct sja1105_general_params_entry default_general_params = { 388 /* Disallow dynamic changing of the mirror port */ 389 .mirr_ptacu = 0, 390 .switchid = priv->ds->index, 391 /* Priority queue for link-local frames trapped to CPU */ 392 .hostprio = 0, 393 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 394 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 395 .incl_srcpt1 = true, 396 .send_meta1 = false, 397 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 398 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 399 .incl_srcpt0 = true, 400 .send_meta0 = false, 401 /* The destination for traffic matching mac_fltres1 and 402 * mac_fltres0 on all ports except host_port. Such traffic 403 * receieved on host_port itself would be dropped, except 404 * by installing a temporary 'management route' 405 */ 406 .host_port = dsa_upstream_port(priv->ds, 0), 407 /* Same as host port */ 408 .mirr_port = dsa_upstream_port(priv->ds, 0), 409 /* Link-local traffic received on casc_port will be forwarded 410 * to host_port without embedding the source port and device ID 411 * info in the destination MAC address (presumably because it 412 * is a cascaded port and a downstream SJA switch already did 413 * that). Default to an invalid port (to disable the feature) 414 * and overwrite this if we find any DSA (cascaded) ports. 415 */ 416 .casc_port = SJA1105_NUM_PORTS, 417 /* No TTEthernet */ 418 .vllupformat = 0, 419 .vlmarker = 0, 420 .vlmask = 0, 421 /* Only update correctionField for 1-step PTP (L2 transport) */ 422 .ignore2stf = 0, 423 /* Forcefully disable VLAN filtering by telling 424 * the switch that VLAN has a different EtherType. 425 */ 426 .tpid = ETH_P_SJA1105, 427 .tpid2 = ETH_P_SJA1105, 428 }; 429 struct sja1105_table *table; 430 int i, k = 0; 431 432 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 433 if (dsa_is_dsa_port(priv->ds, i)) 434 default_general_params.casc_port = i; 435 else if (dsa_is_user_port(priv->ds, i)) 436 priv->ports[i].mgmt_slot = k++; 437 } 438 439 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 440 441 if (table->entry_count) { 442 kfree(table->entries); 443 table->entry_count = 0; 444 } 445 446 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 447 table->ops->unpacked_entry_size, GFP_KERNEL); 448 if (!table->entries) 449 return -ENOMEM; 450 451 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 452 453 /* This table only has a single entry */ 454 ((struct sja1105_general_params_entry *)table->entries)[0] = 455 default_general_params; 456 457 return 0; 458 } 459 460 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 461 462 static inline void 463 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, 464 int index) 465 { 466 policing[index].sharindx = index; 467 policing[index].smax = 65535; /* Burst size in bytes */ 468 policing[index].rate = SJA1105_RATE_MBPS(1000); 469 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 470 policing[index].partition = 0; 471 } 472 473 static int sja1105_init_l2_policing(struct sja1105_private *priv) 474 { 475 struct sja1105_l2_policing_entry *policing; 476 struct sja1105_table *table; 477 int i, j, k; 478 479 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 480 481 /* Discard previous L2 Policing Table */ 482 if (table->entry_count) { 483 kfree(table->entries); 484 table->entry_count = 0; 485 } 486 487 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 488 table->ops->unpacked_entry_size, GFP_KERNEL); 489 if (!table->entries) 490 return -ENOMEM; 491 492 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 493 494 policing = table->entries; 495 496 /* k sweeps through all unicast policers (0-39). 497 * bcast sweeps through policers 40-44. 498 */ 499 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { 500 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; 501 502 for (j = 0; j < SJA1105_NUM_TC; j++, k++) 503 sja1105_setup_policer(policing, k); 504 505 /* Set up this port's policer for broadcast traffic */ 506 sja1105_setup_policer(policing, bcast); 507 } 508 return 0; 509 } 510 511 static int sja1105_static_config_load(struct sja1105_private *priv, 512 struct sja1105_dt_port *ports) 513 { 514 int rc; 515 516 sja1105_static_config_free(&priv->static_config); 517 rc = sja1105_static_config_init(&priv->static_config, 518 priv->info->static_ops, 519 priv->info->device_id); 520 if (rc) 521 return rc; 522 523 /* Build static configuration */ 524 rc = sja1105_init_mac_settings(priv); 525 if (rc < 0) 526 return rc; 527 rc = sja1105_init_mii_settings(priv, ports); 528 if (rc < 0) 529 return rc; 530 rc = sja1105_init_static_fdb(priv); 531 if (rc < 0) 532 return rc; 533 rc = sja1105_init_static_vlan(priv); 534 if (rc < 0) 535 return rc; 536 rc = sja1105_init_l2_lookup_params(priv); 537 if (rc < 0) 538 return rc; 539 rc = sja1105_init_l2_forwarding(priv); 540 if (rc < 0) 541 return rc; 542 rc = sja1105_init_l2_forwarding_params(priv); 543 if (rc < 0) 544 return rc; 545 rc = sja1105_init_l2_policing(priv); 546 if (rc < 0) 547 return rc; 548 rc = sja1105_init_general_params(priv); 549 if (rc < 0) 550 return rc; 551 552 /* Send initial configuration to hardware via SPI */ 553 return sja1105_static_config_upload(priv); 554 } 555 556 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 557 const struct sja1105_dt_port *ports) 558 { 559 int i; 560 561 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 562 if (ports->role == XMII_MAC) 563 continue; 564 565 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 566 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 567 priv->rgmii_rx_delay[i] = true; 568 569 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 570 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 571 priv->rgmii_tx_delay[i] = true; 572 573 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 574 !priv->info->setup_rgmii_delay) 575 return -EINVAL; 576 } 577 return 0; 578 } 579 580 static int sja1105_parse_ports_node(struct sja1105_private *priv, 581 struct sja1105_dt_port *ports, 582 struct device_node *ports_node) 583 { 584 struct device *dev = &priv->spidev->dev; 585 struct device_node *child; 586 587 for_each_child_of_node(ports_node, child) { 588 struct device_node *phy_node; 589 int phy_mode; 590 u32 index; 591 592 /* Get switch port number from DT */ 593 if (of_property_read_u32(child, "reg", &index) < 0) { 594 dev_err(dev, "Port number not defined in device tree " 595 "(property \"reg\")\n"); 596 return -ENODEV; 597 } 598 599 /* Get PHY mode from DT */ 600 phy_mode = of_get_phy_mode(child); 601 if (phy_mode < 0) { 602 dev_err(dev, "Failed to read phy-mode or " 603 "phy-interface-type property for port %d\n", 604 index); 605 return -ENODEV; 606 } 607 ports[index].phy_mode = phy_mode; 608 609 phy_node = of_parse_phandle(child, "phy-handle", 0); 610 if (!phy_node) { 611 if (!of_phy_is_fixed_link(child)) { 612 dev_err(dev, "phy-handle or fixed-link " 613 "properties missing!\n"); 614 return -ENODEV; 615 } 616 /* phy-handle is missing, but fixed-link isn't. 617 * So it's a fixed link. Default to PHY role. 618 */ 619 ports[index].role = XMII_PHY; 620 } else { 621 /* phy-handle present => put port in MAC role */ 622 ports[index].role = XMII_MAC; 623 of_node_put(phy_node); 624 } 625 626 /* The MAC/PHY role can be overridden with explicit bindings */ 627 if (of_property_read_bool(child, "sja1105,role-mac")) 628 ports[index].role = XMII_MAC; 629 else if (of_property_read_bool(child, "sja1105,role-phy")) 630 ports[index].role = XMII_PHY; 631 } 632 633 return 0; 634 } 635 636 static int sja1105_parse_dt(struct sja1105_private *priv, 637 struct sja1105_dt_port *ports) 638 { 639 struct device *dev = &priv->spidev->dev; 640 struct device_node *switch_node = dev->of_node; 641 struct device_node *ports_node; 642 int rc; 643 644 ports_node = of_get_child_by_name(switch_node, "ports"); 645 if (!ports_node) { 646 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 647 return -ENODEV; 648 } 649 650 rc = sja1105_parse_ports_node(priv, ports, ports_node); 651 of_node_put(ports_node); 652 653 return rc; 654 } 655 656 /* Convert back and forth MAC speed from Mbps to SJA1105 encoding */ 657 static int sja1105_speed[] = { 658 [SJA1105_SPEED_AUTO] = 0, 659 [SJA1105_SPEED_10MBPS] = 10, 660 [SJA1105_SPEED_100MBPS] = 100, 661 [SJA1105_SPEED_1000MBPS] = 1000, 662 }; 663 664 /* Set link speed and enable/disable traffic I/O in the MAC configuration 665 * for a specific port. 666 * 667 * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed. 668 * @enabled: Manage Rx and Tx settings for this port. If false, overrides the 669 * settings from the STP state, but not persistently (does not 670 * overwrite the static MAC info for this port). 671 */ 672 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 673 int speed_mbps, bool enabled) 674 { 675 struct sja1105_mac_config_entry dyn_mac; 676 struct sja1105_xmii_params_entry *mii; 677 struct sja1105_mac_config_entry *mac; 678 struct device *dev = priv->ds->dev; 679 sja1105_phy_interface_t phy_mode; 680 sja1105_speed_t speed; 681 int rc; 682 683 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 684 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 685 686 switch (speed_mbps) { 687 case 0: 688 /* No speed update requested */ 689 speed = SJA1105_SPEED_AUTO; 690 break; 691 case 10: 692 speed = SJA1105_SPEED_10MBPS; 693 break; 694 case 100: 695 speed = SJA1105_SPEED_100MBPS; 696 break; 697 case 1000: 698 speed = SJA1105_SPEED_1000MBPS; 699 break; 700 default: 701 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 702 return -EINVAL; 703 } 704 705 /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC 706 * configuration table, since this will be used for the clocking setup, 707 * and we no longer need to store it in the static config (already told 708 * hardware we want auto during upload phase). 709 */ 710 mac[port].speed = speed; 711 712 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 713 * tables. On E/T, MAC reconfig tables are not readable, only writable. 714 * We have to *know* what the MAC looks like. For the sake of keeping 715 * the code common, we'll use the static configuration tables as a 716 * reasonable approximation for both E/T and P/Q/R/S. 717 */ 718 dyn_mac = mac[port]; 719 dyn_mac.ingress = enabled && mac[port].ingress; 720 dyn_mac.egress = enabled && mac[port].egress; 721 722 /* Write to the dynamic reconfiguration tables */ 723 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, 724 port, &dyn_mac, true); 725 if (rc < 0) { 726 dev_err(dev, "Failed to write MAC config: %d\n", rc); 727 return rc; 728 } 729 730 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 731 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 732 * RMII no change of the clock setup is required. Actually, changing 733 * the clock setup does interrupt the clock signal for a certain time 734 * which causes trouble for all PHYs relying on this signal. 735 */ 736 if (!enabled) 737 return 0; 738 739 phy_mode = mii->xmii_mode[port]; 740 if (phy_mode != XMII_MODE_RGMII) 741 return 0; 742 743 return sja1105_clocking_setup_port(priv, port); 744 } 745 746 static void sja1105_mac_config(struct dsa_switch *ds, int port, 747 unsigned int link_an_mode, 748 const struct phylink_link_state *state) 749 { 750 struct sja1105_private *priv = ds->priv; 751 752 if (!state->link) 753 sja1105_adjust_port_config(priv, port, 0, false); 754 else 755 sja1105_adjust_port_config(priv, port, state->speed, true); 756 } 757 758 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 759 unsigned long *supported, 760 struct phylink_link_state *state) 761 { 762 /* Construct a new mask which exhaustively contains all link features 763 * supported by the MAC, and then apply that (logical AND) to what will 764 * be sent to the PHY for "marketing". 765 */ 766 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 767 struct sja1105_private *priv = ds->priv; 768 struct sja1105_xmii_params_entry *mii; 769 770 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 771 772 /* The MAC does not support pause frames, and also doesn't 773 * support half-duplex traffic modes. 774 */ 775 phylink_set(mask, Autoneg); 776 phylink_set(mask, MII); 777 phylink_set(mask, 10baseT_Full); 778 phylink_set(mask, 100baseT_Full); 779 if (mii->xmii_mode[port] == XMII_MODE_RGMII) 780 phylink_set(mask, 1000baseT_Full); 781 782 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 783 bitmap_and(state->advertising, state->advertising, mask, 784 __ETHTOOL_LINK_MODE_MASK_NBITS); 785 } 786 787 /* First-generation switches have a 4-way set associative TCAM that 788 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 789 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 790 * For the placement of a newly learnt FDB entry, the switch selects the bin 791 * based on a hash function, and the way within that bin incrementally. 792 */ 793 static inline int sja1105et_fdb_index(int bin, int way) 794 { 795 return bin * SJA1105ET_FDB_BIN_SIZE + way; 796 } 797 798 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 799 const u8 *addr, u16 vid, 800 struct sja1105_l2_lookup_entry *match, 801 int *last_unused) 802 { 803 int way; 804 805 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 806 struct sja1105_l2_lookup_entry l2_lookup = {0}; 807 int index = sja1105et_fdb_index(bin, way); 808 809 /* Skip unused entries, optionally marking them 810 * into the return value 811 */ 812 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 813 index, &l2_lookup)) { 814 if (last_unused) 815 *last_unused = way; 816 continue; 817 } 818 819 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 820 l2_lookup.vlanid == vid) { 821 if (match) 822 *match = l2_lookup; 823 return way; 824 } 825 } 826 /* Return an invalid entry index if not found */ 827 return -1; 828 } 829 830 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 831 const unsigned char *addr, u16 vid) 832 { 833 struct sja1105_l2_lookup_entry l2_lookup = {0}; 834 struct sja1105_private *priv = ds->priv; 835 struct device *dev = ds->dev; 836 int last_unused = -1; 837 int bin, way; 838 839 bin = sja1105et_fdb_hash(priv, addr, vid); 840 841 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 842 &l2_lookup, &last_unused); 843 if (way >= 0) { 844 /* We have an FDB entry. Is our port in the destination 845 * mask? If yes, we need to do nothing. If not, we need 846 * to rewrite the entry by adding this port to it. 847 */ 848 if (l2_lookup.destports & BIT(port)) 849 return 0; 850 l2_lookup.destports |= BIT(port); 851 } else { 852 int index = sja1105et_fdb_index(bin, way); 853 854 /* We don't have an FDB entry. We construct a new one and 855 * try to find a place for it within the FDB table. 856 */ 857 l2_lookup.macaddr = ether_addr_to_u64(addr); 858 l2_lookup.destports = BIT(port); 859 l2_lookup.vlanid = vid; 860 861 if (last_unused >= 0) { 862 way = last_unused; 863 } else { 864 /* Bin is full, need to evict somebody. 865 * Choose victim at random. If you get these messages 866 * often, you may need to consider changing the 867 * distribution function: 868 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 869 */ 870 get_random_bytes(&way, sizeof(u8)); 871 way %= SJA1105ET_FDB_BIN_SIZE; 872 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 873 bin, addr, way); 874 /* Evict entry */ 875 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 876 index, NULL, false); 877 } 878 } 879 l2_lookup.index = sja1105et_fdb_index(bin, way); 880 881 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 882 l2_lookup.index, &l2_lookup, 883 true); 884 } 885 886 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 887 const unsigned char *addr, u16 vid) 888 { 889 struct sja1105_l2_lookup_entry l2_lookup = {0}; 890 struct sja1105_private *priv = ds->priv; 891 int index, bin, way; 892 bool keep; 893 894 bin = sja1105et_fdb_hash(priv, addr, vid); 895 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 896 &l2_lookup, NULL); 897 if (way < 0) 898 return 0; 899 index = sja1105et_fdb_index(bin, way); 900 901 /* We have an FDB entry. Is our port in the destination mask? If yes, 902 * we need to remove it. If the resulting port mask becomes empty, we 903 * need to completely evict the FDB entry. 904 * Otherwise we just write it back. 905 */ 906 l2_lookup.destports &= ~BIT(port); 907 908 if (l2_lookup.destports) 909 keep = true; 910 else 911 keep = false; 912 913 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 914 index, &l2_lookup, keep); 915 } 916 917 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 918 const unsigned char *addr, u16 vid) 919 { 920 struct sja1105_l2_lookup_entry l2_lookup = {0}; 921 struct sja1105_private *priv = ds->priv; 922 int rc, i; 923 924 /* Search for an existing entry in the FDB table */ 925 l2_lookup.macaddr = ether_addr_to_u64(addr); 926 l2_lookup.vlanid = vid; 927 l2_lookup.iotag = SJA1105_S_TAG; 928 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 929 l2_lookup.mask_vlanid = VLAN_VID_MASK; 930 l2_lookup.mask_iotag = BIT(0); 931 l2_lookup.destports = BIT(port); 932 933 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 934 SJA1105_SEARCH, &l2_lookup); 935 if (rc == 0) { 936 /* Found and this port is already in the entry's 937 * port mask => job done 938 */ 939 if (l2_lookup.destports & BIT(port)) 940 return 0; 941 /* l2_lookup.index is populated by the switch in case it 942 * found something. 943 */ 944 l2_lookup.destports |= BIT(port); 945 goto skip_finding_an_index; 946 } 947 948 /* Not found, so try to find an unused spot in the FDB. 949 * This is slightly inefficient because the strategy is knock-knock at 950 * every possible position from 0 to 1023. 951 */ 952 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 953 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 954 i, NULL); 955 if (rc < 0) 956 break; 957 } 958 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 959 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 960 return -EINVAL; 961 } 962 l2_lookup.index = i; 963 964 skip_finding_an_index: 965 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 966 l2_lookup.index, &l2_lookup, 967 true); 968 } 969 970 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 971 const unsigned char *addr, u16 vid) 972 { 973 struct sja1105_l2_lookup_entry l2_lookup = {0}; 974 struct sja1105_private *priv = ds->priv; 975 bool keep; 976 int rc; 977 978 l2_lookup.macaddr = ether_addr_to_u64(addr); 979 l2_lookup.vlanid = vid; 980 l2_lookup.iotag = SJA1105_S_TAG; 981 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 982 l2_lookup.mask_vlanid = VLAN_VID_MASK; 983 l2_lookup.mask_iotag = BIT(0); 984 l2_lookup.destports = BIT(port); 985 986 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 987 SJA1105_SEARCH, &l2_lookup); 988 if (rc < 0) 989 return 0; 990 991 l2_lookup.destports &= ~BIT(port); 992 993 /* Decide whether we remove just this port from the FDB entry, 994 * or if we remove it completely. 995 */ 996 if (l2_lookup.destports) 997 keep = true; 998 else 999 keep = false; 1000 1001 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1002 l2_lookup.index, &l2_lookup, keep); 1003 } 1004 1005 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1006 const unsigned char *addr, u16 vid) 1007 { 1008 struct sja1105_private *priv = ds->priv; 1009 int rc; 1010 1011 /* Since we make use of VLANs even when the bridge core doesn't tell us 1012 * to, translate these FDB entries into the correct dsa_8021q ones. 1013 */ 1014 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) { 1015 unsigned int upstream = dsa_upstream_port(priv->ds, port); 1016 u16 tx_vid = dsa_8021q_tx_vid(ds, port); 1017 u16 rx_vid = dsa_8021q_rx_vid(ds, port); 1018 1019 rc = priv->info->fdb_add_cmd(ds, port, addr, tx_vid); 1020 if (rc < 0) 1021 return rc; 1022 return priv->info->fdb_add_cmd(ds, upstream, addr, rx_vid); 1023 } 1024 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1025 } 1026 1027 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1028 const unsigned char *addr, u16 vid) 1029 { 1030 struct sja1105_private *priv = ds->priv; 1031 int rc; 1032 1033 /* Since we make use of VLANs even when the bridge core doesn't tell us 1034 * to, translate these FDB entries into the correct dsa_8021q ones. 1035 */ 1036 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) { 1037 unsigned int upstream = dsa_upstream_port(priv->ds, port); 1038 u16 tx_vid = dsa_8021q_tx_vid(ds, port); 1039 u16 rx_vid = dsa_8021q_rx_vid(ds, port); 1040 1041 rc = priv->info->fdb_del_cmd(ds, port, addr, tx_vid); 1042 if (rc < 0) 1043 return rc; 1044 return priv->info->fdb_del_cmd(ds, upstream, addr, rx_vid); 1045 } 1046 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1047 } 1048 1049 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1050 dsa_fdb_dump_cb_t *cb, void *data) 1051 { 1052 struct sja1105_private *priv = ds->priv; 1053 struct device *dev = ds->dev; 1054 int i; 1055 1056 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1057 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1058 u8 macaddr[ETH_ALEN]; 1059 int rc; 1060 1061 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1062 i, &l2_lookup); 1063 /* No fdb entry at i, not an issue */ 1064 if (rc == -ENOENT) 1065 continue; 1066 if (rc) { 1067 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1068 return rc; 1069 } 1070 1071 /* FDB dump callback is per port. This means we have to 1072 * disregard a valid entry if it's not for this port, even if 1073 * only to revisit it later. This is inefficient because the 1074 * 1024-sized FDB table needs to be traversed 4 times through 1075 * SPI during a 'bridge fdb show' command. 1076 */ 1077 if (!(l2_lookup.destports & BIT(port))) 1078 continue; 1079 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1080 1081 /* We need to hide the dsa_8021q VLAN from the user. 1082 * Convert the TX VID into the pvid that is active in 1083 * standalone and non-vlan_filtering modes, aka 1. 1084 * The RX VID is applied on the CPU port, which is not seen by 1085 * the bridge core anyway, so there's nothing to hide. 1086 */ 1087 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1088 l2_lookup.vlanid = 1; 1089 cb(macaddr, l2_lookup.vlanid, false, data); 1090 } 1091 return 0; 1092 } 1093 1094 /* This callback needs to be present */ 1095 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1096 const struct switchdev_obj_port_mdb *mdb) 1097 { 1098 return 0; 1099 } 1100 1101 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1102 const struct switchdev_obj_port_mdb *mdb) 1103 { 1104 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1105 } 1106 1107 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1108 const struct switchdev_obj_port_mdb *mdb) 1109 { 1110 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1111 } 1112 1113 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1114 struct net_device *br, bool member) 1115 { 1116 struct sja1105_l2_forwarding_entry *l2_fwd; 1117 struct sja1105_private *priv = ds->priv; 1118 int i, rc; 1119 1120 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1121 1122 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1123 /* Add this port to the forwarding matrix of the 1124 * other ports in the same bridge, and viceversa. 1125 */ 1126 if (!dsa_is_user_port(ds, i)) 1127 continue; 1128 /* For the ports already under the bridge, only one thing needs 1129 * to be done, and that is to add this port to their 1130 * reachability domain. So we can perform the SPI write for 1131 * them immediately. However, for this port itself (the one 1132 * that is new to the bridge), we need to add all other ports 1133 * to its reachability domain. So we do that incrementally in 1134 * this loop, and perform the SPI write only at the end, once 1135 * the domain contains all other bridge ports. 1136 */ 1137 if (i == port) 1138 continue; 1139 if (dsa_to_port(ds, i)->bridge_dev != br) 1140 continue; 1141 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1142 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1143 1144 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1145 i, &l2_fwd[i], true); 1146 if (rc < 0) 1147 return rc; 1148 } 1149 1150 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1151 port, &l2_fwd[port], true); 1152 } 1153 1154 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1155 u8 state) 1156 { 1157 struct sja1105_private *priv = ds->priv; 1158 struct sja1105_mac_config_entry *mac; 1159 1160 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1161 1162 switch (state) { 1163 case BR_STATE_DISABLED: 1164 case BR_STATE_BLOCKING: 1165 /* From UM10944 description of DRPDTAG (why put this there?): 1166 * "Management traffic flows to the port regardless of the state 1167 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1168 * At the moment no difference between DISABLED and BLOCKING. 1169 */ 1170 mac[port].ingress = false; 1171 mac[port].egress = false; 1172 mac[port].dyn_learn = false; 1173 break; 1174 case BR_STATE_LISTENING: 1175 mac[port].ingress = true; 1176 mac[port].egress = false; 1177 mac[port].dyn_learn = false; 1178 break; 1179 case BR_STATE_LEARNING: 1180 mac[port].ingress = true; 1181 mac[port].egress = false; 1182 mac[port].dyn_learn = true; 1183 break; 1184 case BR_STATE_FORWARDING: 1185 mac[port].ingress = true; 1186 mac[port].egress = true; 1187 mac[port].dyn_learn = true; 1188 break; 1189 default: 1190 dev_err(ds->dev, "invalid STP state: %d\n", state); 1191 return; 1192 } 1193 1194 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1195 &mac[port], true); 1196 } 1197 1198 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1199 struct net_device *br) 1200 { 1201 return sja1105_bridge_member(ds, port, br, true); 1202 } 1203 1204 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1205 struct net_device *br) 1206 { 1207 sja1105_bridge_member(ds, port, br, false); 1208 } 1209 1210 static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port) 1211 { 1212 struct sja1105_mac_config_entry *mac; 1213 1214 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1215 1216 if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn) 1217 return BR_STATE_BLOCKING; 1218 if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn) 1219 return BR_STATE_LISTENING; 1220 if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn) 1221 return BR_STATE_LEARNING; 1222 if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn) 1223 return BR_STATE_FORWARDING; 1224 /* This is really an error condition if the MAC was in none of the STP 1225 * states above. But treating the port as disabled does nothing, which 1226 * is adequate, and it also resets the MAC to a known state later on. 1227 */ 1228 return BR_STATE_DISABLED; 1229 } 1230 1231 /* For situations where we need to change a setting at runtime that is only 1232 * available through the static configuration, resetting the switch in order 1233 * to upload the new static config is unavoidable. Back up the settings we 1234 * modify at runtime (currently only MAC) and restore them after uploading, 1235 * such that this operation is relatively seamless. 1236 */ 1237 static int sja1105_static_config_reload(struct sja1105_private *priv) 1238 { 1239 struct sja1105_mac_config_entry *mac; 1240 int speed_mbps[SJA1105_NUM_PORTS]; 1241 u8 stp_state[SJA1105_NUM_PORTS]; 1242 int rc, i; 1243 1244 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1245 1246 /* Back up settings changed by sja1105_adjust_port_config and 1247 * sja1105_bridge_stp_state_set and restore their defaults. 1248 */ 1249 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1250 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1251 mac[i].speed = SJA1105_SPEED_AUTO; 1252 if (i == dsa_upstream_port(priv->ds, i)) { 1253 mac[i].ingress = true; 1254 mac[i].egress = true; 1255 mac[i].dyn_learn = true; 1256 } else { 1257 stp_state[i] = sja1105_stp_state_get(priv, i); 1258 mac[i].ingress = false; 1259 mac[i].egress = false; 1260 mac[i].dyn_learn = false; 1261 } 1262 } 1263 1264 /* Reset switch and send updated static configuration */ 1265 rc = sja1105_static_config_upload(priv); 1266 if (rc < 0) 1267 goto out; 1268 1269 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1270 * For these interfaces there is no dynamic configuration 1271 * needed, since PLLs have same settings at all speeds. 1272 */ 1273 rc = sja1105_clocking_setup(priv); 1274 if (rc < 0) 1275 goto out; 1276 1277 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1278 bool enabled = (speed_mbps[i] != 0); 1279 1280 if (i != dsa_upstream_port(priv->ds, i)) 1281 sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]); 1282 1283 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i], 1284 enabled); 1285 if (rc < 0) 1286 goto out; 1287 } 1288 out: 1289 return rc; 1290 } 1291 1292 /* The TPID setting belongs to the General Parameters table, 1293 * which can only be partially reconfigured at runtime (and not the TPID). 1294 * So a switch reset is required. 1295 */ 1296 static int sja1105_change_tpid(struct sja1105_private *priv, 1297 u16 tpid, u16 tpid2) 1298 { 1299 struct sja1105_general_params_entry *general_params; 1300 struct sja1105_table *table; 1301 1302 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1303 general_params = table->entries; 1304 general_params->tpid = tpid; 1305 general_params->tpid2 = tpid2; 1306 return sja1105_static_config_reload(priv); 1307 } 1308 1309 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1310 { 1311 struct sja1105_mac_config_entry *mac; 1312 1313 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1314 1315 mac[port].vlanid = pvid; 1316 1317 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1318 &mac[port], true); 1319 } 1320 1321 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1322 { 1323 struct sja1105_vlan_lookup_entry *vlan; 1324 int count, i; 1325 1326 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1327 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1328 1329 for (i = 0; i < count; i++) 1330 if (vlan[i].vlanid == vid) 1331 return i; 1332 1333 /* Return an invalid entry index if not found */ 1334 return -1; 1335 } 1336 1337 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1338 bool enabled, bool untagged) 1339 { 1340 struct sja1105_vlan_lookup_entry *vlan; 1341 struct sja1105_table *table; 1342 bool keep = true; 1343 int match, rc; 1344 1345 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1346 1347 match = sja1105_is_vlan_configured(priv, vid); 1348 if (match < 0) { 1349 /* Can't delete a missing entry. */ 1350 if (!enabled) 1351 return 0; 1352 rc = sja1105_table_resize(table, table->entry_count + 1); 1353 if (rc) 1354 return rc; 1355 match = table->entry_count - 1; 1356 } 1357 /* Assign pointer after the resize (it's new memory) */ 1358 vlan = table->entries; 1359 vlan[match].vlanid = vid; 1360 if (enabled) { 1361 vlan[match].vlan_bc |= BIT(port); 1362 vlan[match].vmemb_port |= BIT(port); 1363 } else { 1364 vlan[match].vlan_bc &= ~BIT(port); 1365 vlan[match].vmemb_port &= ~BIT(port); 1366 } 1367 /* Also unset tag_port if removing this VLAN was requested, 1368 * just so we don't have a confusing bitmap (no practical purpose). 1369 */ 1370 if (untagged || !enabled) 1371 vlan[match].tag_port &= ~BIT(port); 1372 else 1373 vlan[match].tag_port |= BIT(port); 1374 /* If there's no port left as member of this VLAN, 1375 * it's time for it to go. 1376 */ 1377 if (!vlan[match].vmemb_port) 1378 keep = false; 1379 1380 dev_dbg(priv->ds->dev, 1381 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1382 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1383 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1384 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1385 1386 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1387 &vlan[match], keep); 1388 if (rc < 0) 1389 return rc; 1390 1391 if (!keep) 1392 return sja1105_table_delete_entry(table, match); 1393 1394 return 0; 1395 } 1396 1397 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1398 { 1399 int rc, i; 1400 1401 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1402 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1403 if (rc < 0) { 1404 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1405 i, rc); 1406 return rc; 1407 } 1408 } 1409 dev_info(ds->dev, "%s switch tagging\n", 1410 enabled ? "Enabled" : "Disabled"); 1411 return 0; 1412 } 1413 1414 static enum dsa_tag_protocol 1415 sja1105_get_tag_protocol(struct dsa_switch *ds, int port) 1416 { 1417 return DSA_TAG_PROTO_SJA1105; 1418 } 1419 1420 /* This callback needs to be present */ 1421 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1422 const struct switchdev_obj_port_vlan *vlan) 1423 { 1424 return 0; 1425 } 1426 1427 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1428 { 1429 struct sja1105_private *priv = ds->priv; 1430 int rc; 1431 1432 if (enabled) 1433 /* Enable VLAN filtering. */ 1434 rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD); 1435 else 1436 /* Disable VLAN filtering. */ 1437 rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105); 1438 if (rc) 1439 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1440 1441 /* Switch port identification based on 802.1Q is only passable 1442 * if we are not under a vlan_filtering bridge. So make sure 1443 * the two configurations are mutually exclusive. 1444 */ 1445 return sja1105_setup_8021q_tagging(ds, !enabled); 1446 } 1447 1448 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1449 const struct switchdev_obj_port_vlan *vlan) 1450 { 1451 struct sja1105_private *priv = ds->priv; 1452 u16 vid; 1453 int rc; 1454 1455 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1456 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1457 BRIDGE_VLAN_INFO_UNTAGGED); 1458 if (rc < 0) { 1459 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1460 vid, port, rc); 1461 return; 1462 } 1463 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1464 rc = sja1105_pvid_apply(ds->priv, port, vid); 1465 if (rc < 0) { 1466 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1467 vid, port, rc); 1468 return; 1469 } 1470 } 1471 } 1472 } 1473 1474 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1475 const struct switchdev_obj_port_vlan *vlan) 1476 { 1477 struct sja1105_private *priv = ds->priv; 1478 u16 vid; 1479 int rc; 1480 1481 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1482 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1483 BRIDGE_VLAN_INFO_UNTAGGED); 1484 if (rc < 0) { 1485 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1486 vid, port, rc); 1487 return rc; 1488 } 1489 } 1490 return 0; 1491 } 1492 1493 /* The programming model for the SJA1105 switch is "all-at-once" via static 1494 * configuration tables. Some of these can be dynamically modified at runtime, 1495 * but not the xMII mode parameters table. 1496 * Furthermode, some PHYs may not have crystals for generating their clocks 1497 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1498 * ref_clk pin. So port clocking needs to be initialized early, before 1499 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1500 * Setting correct PHY link speed does not matter now. 1501 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1502 * bindings are not yet parsed by DSA core. We need to parse early so that we 1503 * can populate the xMII mode parameters table. 1504 */ 1505 static int sja1105_setup(struct dsa_switch *ds) 1506 { 1507 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1508 struct sja1105_private *priv = ds->priv; 1509 int rc; 1510 1511 rc = sja1105_parse_dt(priv, ports); 1512 if (rc < 0) { 1513 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1514 return rc; 1515 } 1516 1517 /* Error out early if internal delays are required through DT 1518 * and we can't apply them. 1519 */ 1520 rc = sja1105_parse_rgmii_delays(priv, ports); 1521 if (rc < 0) { 1522 dev_err(ds->dev, "RGMII delay not supported\n"); 1523 return rc; 1524 } 1525 1526 /* Create and send configuration down to device */ 1527 rc = sja1105_static_config_load(priv, ports); 1528 if (rc < 0) { 1529 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1530 return rc; 1531 } 1532 /* Configure the CGU (PHY link modes and speeds) */ 1533 rc = sja1105_clocking_setup(priv); 1534 if (rc < 0) { 1535 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1536 return rc; 1537 } 1538 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1539 * The only thing we can do to disable it is lie about what the 802.1Q 1540 * EtherType is. 1541 * So it will still try to apply VLAN filtering, but all ingress 1542 * traffic (except frames received with EtherType of ETH_P_SJA1105) 1543 * will be internally tagged with a distorted VLAN header where the 1544 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 1545 */ 1546 ds->vlan_filtering_is_global = true; 1547 1548 /* The DSA/switchdev model brings up switch ports in standalone mode by 1549 * default, and that means vlan_filtering is 0 since they're not under 1550 * a bridge, so it's safe to set up switch tagging at this time. 1551 */ 1552 return sja1105_setup_8021q_tagging(ds, true); 1553 } 1554 1555 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 1556 struct sk_buff *skb) 1557 { 1558 struct sja1105_mgmt_entry mgmt_route = {0}; 1559 struct sja1105_private *priv = ds->priv; 1560 struct ethhdr *hdr; 1561 int timeout = 10; 1562 int rc; 1563 1564 hdr = eth_hdr(skb); 1565 1566 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 1567 mgmt_route.destports = BIT(port); 1568 mgmt_route.enfport = 1; 1569 1570 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1571 slot, &mgmt_route, true); 1572 if (rc < 0) { 1573 kfree_skb(skb); 1574 return rc; 1575 } 1576 1577 /* Transfer skb to the host port. */ 1578 dsa_enqueue_skb(skb, ds->ports[port].slave); 1579 1580 /* Wait until the switch has processed the frame */ 1581 do { 1582 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 1583 slot, &mgmt_route); 1584 if (rc < 0) { 1585 dev_err_ratelimited(priv->ds->dev, 1586 "failed to poll for mgmt route\n"); 1587 continue; 1588 } 1589 1590 /* UM10944: The ENFPORT flag of the respective entry is 1591 * cleared when a match is found. The host can use this 1592 * flag as an acknowledgment. 1593 */ 1594 cpu_relax(); 1595 } while (mgmt_route.enfport && --timeout); 1596 1597 if (!timeout) { 1598 /* Clean up the management route so that a follow-up 1599 * frame may not match on it by mistake. 1600 * This is only hardware supported on P/Q/R/S - on E/T it is 1601 * a no-op and we are silently discarding the -EOPNOTSUPP. 1602 */ 1603 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1604 slot, &mgmt_route, false); 1605 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 1606 } 1607 1608 return NETDEV_TX_OK; 1609 } 1610 1611 /* Deferred work is unfortunately necessary because setting up the management 1612 * route cannot be done from atomit context (SPI transfer takes a sleepable 1613 * lock on the bus) 1614 */ 1615 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, 1616 struct sk_buff *skb) 1617 { 1618 struct sja1105_private *priv = ds->priv; 1619 struct sja1105_port *sp = &priv->ports[port]; 1620 int slot = sp->mgmt_slot; 1621 1622 /* The tragic fact about the switch having 4x2 slots for installing 1623 * management routes is that all of them except one are actually 1624 * useless. 1625 * If 2 slots are simultaneously configured for two BPDUs sent to the 1626 * same (multicast) DMAC but on different egress ports, the switch 1627 * would confuse them and redirect first frame it receives on the CPU 1628 * port towards the port configured on the numerically first slot 1629 * (therefore wrong port), then second received frame on second slot 1630 * (also wrong port). 1631 * So for all practical purposes, there needs to be a lock that 1632 * prevents that from happening. The slot used here is utterly useless 1633 * (could have simply been 0 just as fine), but we are doing it 1634 * nonetheless, in case a smarter idea ever comes up in the future. 1635 */ 1636 mutex_lock(&priv->mgmt_lock); 1637 1638 sja1105_mgmt_xmit(ds, port, slot, skb); 1639 1640 mutex_unlock(&priv->mgmt_lock); 1641 return NETDEV_TX_OK; 1642 } 1643 1644 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 1645 * which cannot be reconfigured at runtime. So a switch reset is required. 1646 */ 1647 static int sja1105_set_ageing_time(struct dsa_switch *ds, 1648 unsigned int ageing_time) 1649 { 1650 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1651 struct sja1105_private *priv = ds->priv; 1652 struct sja1105_table *table; 1653 unsigned int maxage; 1654 1655 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1656 l2_lookup_params = table->entries; 1657 1658 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 1659 1660 if (l2_lookup_params->maxage == maxage) 1661 return 0; 1662 1663 l2_lookup_params->maxage = maxage; 1664 1665 return sja1105_static_config_reload(priv); 1666 } 1667 1668 static const struct dsa_switch_ops sja1105_switch_ops = { 1669 .get_tag_protocol = sja1105_get_tag_protocol, 1670 .setup = sja1105_setup, 1671 .set_ageing_time = sja1105_set_ageing_time, 1672 .phylink_validate = sja1105_phylink_validate, 1673 .phylink_mac_config = sja1105_mac_config, 1674 .get_strings = sja1105_get_strings, 1675 .get_ethtool_stats = sja1105_get_ethtool_stats, 1676 .get_sset_count = sja1105_get_sset_count, 1677 .port_fdb_dump = sja1105_fdb_dump, 1678 .port_fdb_add = sja1105_fdb_add, 1679 .port_fdb_del = sja1105_fdb_del, 1680 .port_bridge_join = sja1105_bridge_join, 1681 .port_bridge_leave = sja1105_bridge_leave, 1682 .port_stp_state_set = sja1105_bridge_stp_state_set, 1683 .port_vlan_prepare = sja1105_vlan_prepare, 1684 .port_vlan_filtering = sja1105_vlan_filtering, 1685 .port_vlan_add = sja1105_vlan_add, 1686 .port_vlan_del = sja1105_vlan_del, 1687 .port_mdb_prepare = sja1105_mdb_prepare, 1688 .port_mdb_add = sja1105_mdb_add, 1689 .port_mdb_del = sja1105_mdb_del, 1690 .port_deferred_xmit = sja1105_port_deferred_xmit, 1691 }; 1692 1693 static int sja1105_check_device_id(struct sja1105_private *priv) 1694 { 1695 const struct sja1105_regs *regs = priv->info->regs; 1696 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 1697 struct device *dev = &priv->spidev->dev; 1698 u64 device_id; 1699 u64 part_no; 1700 int rc; 1701 1702 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id, 1703 &device_id, SJA1105_SIZE_DEVICE_ID); 1704 if (rc < 0) 1705 return rc; 1706 1707 if (device_id != priv->info->device_id) { 1708 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n", 1709 priv->info->device_id, device_id); 1710 return -ENODEV; 1711 } 1712 1713 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id, 1714 prod_id, SJA1105_SIZE_DEVICE_ID); 1715 if (rc < 0) 1716 return rc; 1717 1718 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 1719 1720 if (part_no != priv->info->part_no) { 1721 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 1722 priv->info->part_no, part_no); 1723 return -ENODEV; 1724 } 1725 1726 return 0; 1727 } 1728 1729 static int sja1105_probe(struct spi_device *spi) 1730 { 1731 struct device *dev = &spi->dev; 1732 struct sja1105_private *priv; 1733 struct dsa_switch *ds; 1734 int rc, i; 1735 1736 if (!dev->of_node) { 1737 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 1738 return -EINVAL; 1739 } 1740 1741 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 1742 if (!priv) 1743 return -ENOMEM; 1744 1745 /* Configure the optional reset pin and bring up switch */ 1746 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 1747 if (IS_ERR(priv->reset_gpio)) 1748 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 1749 else 1750 sja1105_hw_reset(priv->reset_gpio, 1, 1); 1751 1752 /* Populate our driver private structure (priv) based on 1753 * the device tree node that was probed (spi) 1754 */ 1755 priv->spidev = spi; 1756 spi_set_drvdata(spi, priv); 1757 1758 /* Configure the SPI bus */ 1759 spi->bits_per_word = 8; 1760 rc = spi_setup(spi); 1761 if (rc < 0) { 1762 dev_err(dev, "Could not init SPI\n"); 1763 return rc; 1764 } 1765 1766 priv->info = of_device_get_match_data(dev); 1767 1768 /* Detect hardware device */ 1769 rc = sja1105_check_device_id(priv); 1770 if (rc < 0) { 1771 dev_err(dev, "Device ID check failed: %d\n", rc); 1772 return rc; 1773 } 1774 1775 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 1776 1777 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS); 1778 if (!ds) 1779 return -ENOMEM; 1780 1781 ds->ops = &sja1105_switch_ops; 1782 ds->priv = priv; 1783 priv->ds = ds; 1784 1785 /* Connections between dsa_port and sja1105_port */ 1786 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1787 struct sja1105_port *sp = &priv->ports[i]; 1788 1789 ds->ports[i].priv = sp; 1790 sp->dp = &ds->ports[i]; 1791 } 1792 mutex_init(&priv->mgmt_lock); 1793 1794 return dsa_register_switch(priv->ds); 1795 } 1796 1797 static int sja1105_remove(struct spi_device *spi) 1798 { 1799 struct sja1105_private *priv = spi_get_drvdata(spi); 1800 1801 dsa_unregister_switch(priv->ds); 1802 sja1105_static_config_free(&priv->static_config); 1803 return 0; 1804 } 1805 1806 static const struct of_device_id sja1105_dt_ids[] = { 1807 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 1808 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 1809 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 1810 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 1811 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 1812 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 1813 { /* sentinel */ }, 1814 }; 1815 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 1816 1817 static struct spi_driver sja1105_driver = { 1818 .driver = { 1819 .name = "sja1105", 1820 .owner = THIS_MODULE, 1821 .of_match_table = of_match_ptr(sja1105_dt_ids), 1822 }, 1823 .probe = sja1105_probe, 1824 .remove = sja1105_remove, 1825 }; 1826 1827 module_spi_driver(sja1105_driver); 1828 1829 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 1830 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 1831 MODULE_DESCRIPTION("SJA1105 Driver"); 1832 MODULE_LICENSE("GPL v2"); 1833