1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 29 unsigned int startup_delay) 30 { 31 gpiod_set_value_cansleep(gpio, 1); 32 /* Wait for minimum reset pulse length */ 33 msleep(pulse_len); 34 gpiod_set_value_cansleep(gpio, 0); 35 /* Wait until chip is ready after reset */ 36 msleep(startup_delay); 37 } 38 39 static void 40 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 41 int from, int to, bool allow) 42 { 43 if (allow) { 44 l2_fwd[from].bc_domain |= BIT(to); 45 l2_fwd[from].reach_port |= BIT(to); 46 l2_fwd[from].fl_domain |= BIT(to); 47 } else { 48 l2_fwd[from].bc_domain &= ~BIT(to); 49 l2_fwd[from].reach_port &= ~BIT(to); 50 l2_fwd[from].fl_domain &= ~BIT(to); 51 } 52 } 53 54 /* Structure used to temporarily transport device tree 55 * settings into sja1105_setup 56 */ 57 struct sja1105_dt_port { 58 phy_interface_t phy_mode; 59 sja1105_mii_role_t role; 60 }; 61 62 static int sja1105_init_mac_settings(struct sja1105_private *priv) 63 { 64 struct sja1105_mac_config_entry default_mac = { 65 /* Enable all 8 priority queues on egress. 66 * Every queue i holds top[i] - base[i] frames. 67 * Sum of top[i] - base[i] is 511 (max hardware limit). 68 */ 69 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 70 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 71 .enabled = {true, true, true, true, true, true, true, true}, 72 /* Keep standard IFG of 12 bytes on egress. */ 73 .ifg = 0, 74 /* Always put the MAC speed in automatic mode, where it can be 75 * adjusted at runtime by PHYLINK. 76 */ 77 .speed = SJA1105_SPEED_AUTO, 78 /* No static correction for 1-step 1588 events */ 79 .tp_delin = 0, 80 .tp_delout = 0, 81 /* Disable aging for critical TTEthernet traffic */ 82 .maxage = 0xFF, 83 /* Internal VLAN (pvid) to apply to untagged ingress */ 84 .vlanprio = 0, 85 .vlanid = 1, 86 .ing_mirr = false, 87 .egr_mirr = false, 88 /* Don't drop traffic with other EtherType than ETH_P_IP */ 89 .drpnona664 = false, 90 /* Don't drop double-tagged traffic */ 91 .drpdtag = false, 92 /* Don't drop untagged traffic */ 93 .drpuntag = false, 94 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 95 .retag = false, 96 /* Disable learning and I/O on user ports by default - 97 * STP will enable it. 98 */ 99 .dyn_learn = false, 100 .egress = false, 101 .ingress = false, 102 }; 103 struct sja1105_mac_config_entry *mac; 104 struct sja1105_table *table; 105 int i; 106 107 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 108 109 /* Discard previous MAC Configuration Table */ 110 if (table->entry_count) { 111 kfree(table->entries); 112 table->entry_count = 0; 113 } 114 115 table->entries = kcalloc(SJA1105_NUM_PORTS, 116 table->ops->unpacked_entry_size, GFP_KERNEL); 117 if (!table->entries) 118 return -ENOMEM; 119 120 table->entry_count = SJA1105_NUM_PORTS; 121 122 mac = table->entries; 123 124 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 125 mac[i] = default_mac; 126 if (i == dsa_upstream_port(priv->ds, i)) { 127 /* STP doesn't get called for CPU port, so we need to 128 * set the I/O parameters statically. 129 */ 130 mac[i].dyn_learn = true; 131 mac[i].ingress = true; 132 mac[i].egress = true; 133 } 134 } 135 136 return 0; 137 } 138 139 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port) 140 { 141 if (priv->info->part_no != SJA1105R_PART_NO && 142 priv->info->part_no != SJA1105S_PART_NO) 143 return false; 144 145 if (port != SJA1105_SGMII_PORT) 146 return false; 147 148 if (dsa_is_unused_port(priv->ds, port)) 149 return false; 150 151 return true; 152 } 153 154 static int sja1105_init_mii_settings(struct sja1105_private *priv, 155 struct sja1105_dt_port *ports) 156 { 157 struct device *dev = &priv->spidev->dev; 158 struct sja1105_xmii_params_entry *mii; 159 struct sja1105_table *table; 160 int i; 161 162 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 163 164 /* Discard previous xMII Mode Parameters Table */ 165 if (table->entry_count) { 166 kfree(table->entries); 167 table->entry_count = 0; 168 } 169 170 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 171 table->ops->unpacked_entry_size, GFP_KERNEL); 172 if (!table->entries) 173 return -ENOMEM; 174 175 /* Override table based on PHYLINK DT bindings */ 176 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 177 178 mii = table->entries; 179 180 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 181 if (dsa_is_unused_port(priv->ds, i)) 182 continue; 183 184 switch (ports[i].phy_mode) { 185 case PHY_INTERFACE_MODE_MII: 186 mii->xmii_mode[i] = XMII_MODE_MII; 187 break; 188 case PHY_INTERFACE_MODE_RMII: 189 mii->xmii_mode[i] = XMII_MODE_RMII; 190 break; 191 case PHY_INTERFACE_MODE_RGMII: 192 case PHY_INTERFACE_MODE_RGMII_ID: 193 case PHY_INTERFACE_MODE_RGMII_RXID: 194 case PHY_INTERFACE_MODE_RGMII_TXID: 195 mii->xmii_mode[i] = XMII_MODE_RGMII; 196 break; 197 case PHY_INTERFACE_MODE_SGMII: 198 if (!sja1105_supports_sgmii(priv, i)) 199 return -EINVAL; 200 mii->xmii_mode[i] = XMII_MODE_SGMII; 201 break; 202 default: 203 dev_err(dev, "Unsupported PHY mode %s!\n", 204 phy_modes(ports[i].phy_mode)); 205 } 206 207 /* Even though the SerDes port is able to drive SGMII autoneg 208 * like a PHY would, from the perspective of the XMII tables, 209 * the SGMII port should always be put in MAC mode. 210 */ 211 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII) 212 mii->phy_mac[i] = XMII_MAC; 213 else 214 mii->phy_mac[i] = ports[i].role; 215 } 216 return 0; 217 } 218 219 static int sja1105_init_static_fdb(struct sja1105_private *priv) 220 { 221 struct sja1105_table *table; 222 223 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 224 225 /* We only populate the FDB table through dynamic 226 * L2 Address Lookup entries 227 */ 228 if (table->entry_count) { 229 kfree(table->entries); 230 table->entry_count = 0; 231 } 232 return 0; 233 } 234 235 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 236 { 237 struct sja1105_table *table; 238 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 239 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 240 /* Learned FDB entries are forgotten after 300 seconds */ 241 .maxage = SJA1105_AGEING_TIME_MS(300000), 242 /* All entries within a FDB bin are available for learning */ 243 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 244 /* And the P/Q/R/S equivalent setting: */ 245 .start_dynspc = 0, 246 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 247 max_fdb_entries, max_fdb_entries, }, 248 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 249 .poly = 0x97, 250 /* This selects between Independent VLAN Learning (IVL) and 251 * Shared VLAN Learning (SVL) 252 */ 253 .shared_learn = true, 254 /* Don't discard management traffic based on ENFPORT - 255 * we don't perform SMAC port enforcement anyway, so 256 * what we are setting here doesn't matter. 257 */ 258 .no_enf_hostprt = false, 259 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 260 * Maybe correlate with no_linklocal_learn from bridge driver? 261 */ 262 .no_mgmt_learn = true, 263 /* P/Q/R/S only */ 264 .use_static = true, 265 /* Dynamically learned FDB entries can overwrite other (older) 266 * dynamic FDB entries 267 */ 268 .owr_dyn = true, 269 .drpnolearn = true, 270 }; 271 272 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 273 274 if (table->entry_count) { 275 kfree(table->entries); 276 table->entry_count = 0; 277 } 278 279 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 280 table->ops->unpacked_entry_size, GFP_KERNEL); 281 if (!table->entries) 282 return -ENOMEM; 283 284 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 285 286 /* This table only has a single entry */ 287 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 288 default_l2_lookup_params; 289 290 return 0; 291 } 292 293 static int sja1105_init_static_vlan(struct sja1105_private *priv) 294 { 295 struct sja1105_table *table; 296 struct sja1105_vlan_lookup_entry pvid = { 297 .ving_mirr = 0, 298 .vegr_mirr = 0, 299 .vmemb_port = 0, 300 .vlan_bc = 0, 301 .tag_port = 0, 302 .vlanid = 1, 303 }; 304 int i; 305 306 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 307 308 /* The static VLAN table will only contain the initial pvid of 1. 309 * All other VLANs are to be configured through dynamic entries, 310 * and kept in the static configuration table as backing memory. 311 */ 312 if (table->entry_count) { 313 kfree(table->entries); 314 table->entry_count = 0; 315 } 316 317 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 318 GFP_KERNEL); 319 if (!table->entries) 320 return -ENOMEM; 321 322 table->entry_count = 1; 323 324 /* VLAN 1: all DT-defined ports are members; no restrictions on 325 * forwarding; always transmit priority-tagged frames as untagged. 326 */ 327 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 328 pvid.vmemb_port |= BIT(i); 329 pvid.vlan_bc |= BIT(i); 330 pvid.tag_port &= ~BIT(i); 331 } 332 333 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 334 return 0; 335 } 336 337 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 338 { 339 struct sja1105_l2_forwarding_entry *l2fwd; 340 struct sja1105_table *table; 341 int i, j; 342 343 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 344 345 if (table->entry_count) { 346 kfree(table->entries); 347 table->entry_count = 0; 348 } 349 350 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 351 table->ops->unpacked_entry_size, GFP_KERNEL); 352 if (!table->entries) 353 return -ENOMEM; 354 355 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 356 357 l2fwd = table->entries; 358 359 /* First 5 entries define the forwarding rules */ 360 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 361 unsigned int upstream = dsa_upstream_port(priv->ds, i); 362 363 for (j = 0; j < SJA1105_NUM_TC; j++) 364 l2fwd[i].vlan_pmap[j] = j; 365 366 if (i == upstream) 367 continue; 368 369 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 370 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 371 } 372 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 373 * Create a one-to-one mapping. 374 */ 375 for (i = 0; i < SJA1105_NUM_TC; i++) 376 for (j = 0; j < SJA1105_NUM_PORTS; j++) 377 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 378 379 return 0; 380 } 381 382 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 383 { 384 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 385 /* Disallow dynamic reconfiguration of vlan_pmap */ 386 .max_dynp = 0, 387 /* Use a single memory partition for all ingress queues */ 388 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 389 }; 390 struct sja1105_table *table; 391 392 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 393 394 if (table->entry_count) { 395 kfree(table->entries); 396 table->entry_count = 0; 397 } 398 399 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 400 table->ops->unpacked_entry_size, GFP_KERNEL); 401 if (!table->entries) 402 return -ENOMEM; 403 404 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 405 406 /* This table only has a single entry */ 407 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 408 default_l2fwd_params; 409 410 return 0; 411 } 412 413 static int sja1105_init_general_params(struct sja1105_private *priv) 414 { 415 struct sja1105_general_params_entry default_general_params = { 416 /* Allow dynamic changing of the mirror port */ 417 .mirr_ptacu = true, 418 .switchid = priv->ds->index, 419 /* Priority queue for link-local management frames 420 * (both ingress to and egress from CPU - PTP, STP etc) 421 */ 422 .hostprio = 7, 423 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 424 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 425 .incl_srcpt1 = false, 426 .send_meta1 = false, 427 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 428 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 429 .incl_srcpt0 = false, 430 .send_meta0 = false, 431 /* The destination for traffic matching mac_fltres1 and 432 * mac_fltres0 on all ports except host_port. Such traffic 433 * receieved on host_port itself would be dropped, except 434 * by installing a temporary 'management route' 435 */ 436 .host_port = dsa_upstream_port(priv->ds, 0), 437 /* Default to an invalid value */ 438 .mirr_port = SJA1105_NUM_PORTS, 439 /* Link-local traffic received on casc_port will be forwarded 440 * to host_port without embedding the source port and device ID 441 * info in the destination MAC address (presumably because it 442 * is a cascaded port and a downstream SJA switch already did 443 * that). Default to an invalid port (to disable the feature) 444 * and overwrite this if we find any DSA (cascaded) ports. 445 */ 446 .casc_port = SJA1105_NUM_PORTS, 447 /* No TTEthernet */ 448 .vllupformat = 0, 449 .vlmarker = 0, 450 .vlmask = 0, 451 /* Only update correctionField for 1-step PTP (L2 transport) */ 452 .ignore2stf = 0, 453 /* Forcefully disable VLAN filtering by telling 454 * the switch that VLAN has a different EtherType. 455 */ 456 .tpid = ETH_P_SJA1105, 457 .tpid2 = ETH_P_SJA1105, 458 }; 459 struct sja1105_table *table; 460 461 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 462 463 if (table->entry_count) { 464 kfree(table->entries); 465 table->entry_count = 0; 466 } 467 468 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 469 table->ops->unpacked_entry_size, GFP_KERNEL); 470 if (!table->entries) 471 return -ENOMEM; 472 473 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 474 475 /* This table only has a single entry */ 476 ((struct sja1105_general_params_entry *)table->entries)[0] = 477 default_general_params; 478 479 return 0; 480 } 481 482 static int sja1105_init_avb_params(struct sja1105_private *priv) 483 { 484 struct sja1105_avb_params_entry *avb; 485 struct sja1105_table *table; 486 487 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 488 489 /* Discard previous AVB Parameters Table */ 490 if (table->entry_count) { 491 kfree(table->entries); 492 table->entry_count = 0; 493 } 494 495 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 496 table->ops->unpacked_entry_size, GFP_KERNEL); 497 if (!table->entries) 498 return -ENOMEM; 499 500 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 501 502 avb = table->entries; 503 504 /* Configure the MAC addresses for meta frames */ 505 avb->destmeta = SJA1105_META_DMAC; 506 avb->srcmeta = SJA1105_META_SMAC; 507 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 508 * default. This is because there might be boards with a hardware 509 * layout where enabling the pin as output might cause an electrical 510 * clash. On E/T the pin is always an output, which the board designers 511 * probably already knew, so even if there are going to be electrical 512 * issues, there's nothing we can do. 513 */ 514 avb->cas_master = false; 515 516 return 0; 517 } 518 519 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 520 521 static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, 522 int index) 523 { 524 policing[index].sharindx = index; 525 policing[index].smax = 65535; /* Burst size in bytes */ 526 policing[index].rate = SJA1105_RATE_MBPS(1000); 527 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 528 policing[index].partition = 0; 529 } 530 531 static int sja1105_init_l2_policing(struct sja1105_private *priv) 532 { 533 struct sja1105_l2_policing_entry *policing; 534 struct sja1105_table *table; 535 int i, j, k; 536 537 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 538 539 /* Discard previous L2 Policing Table */ 540 if (table->entry_count) { 541 kfree(table->entries); 542 table->entry_count = 0; 543 } 544 545 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 546 table->ops->unpacked_entry_size, GFP_KERNEL); 547 if (!table->entries) 548 return -ENOMEM; 549 550 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 551 552 policing = table->entries; 553 554 /* k sweeps through all unicast policers (0-39). 555 * bcast sweeps through policers 40-44. 556 */ 557 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { 558 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; 559 560 for (j = 0; j < SJA1105_NUM_TC; j++, k++) 561 sja1105_setup_policer(policing, k); 562 563 /* Set up this port's policer for broadcast traffic */ 564 sja1105_setup_policer(policing, bcast); 565 } 566 return 0; 567 } 568 569 static int sja1105_static_config_load(struct sja1105_private *priv, 570 struct sja1105_dt_port *ports) 571 { 572 int rc; 573 574 sja1105_static_config_free(&priv->static_config); 575 rc = sja1105_static_config_init(&priv->static_config, 576 priv->info->static_ops, 577 priv->info->device_id); 578 if (rc) 579 return rc; 580 581 /* Build static configuration */ 582 rc = sja1105_init_mac_settings(priv); 583 if (rc < 0) 584 return rc; 585 rc = sja1105_init_mii_settings(priv, ports); 586 if (rc < 0) 587 return rc; 588 rc = sja1105_init_static_fdb(priv); 589 if (rc < 0) 590 return rc; 591 rc = sja1105_init_static_vlan(priv); 592 if (rc < 0) 593 return rc; 594 rc = sja1105_init_l2_lookup_params(priv); 595 if (rc < 0) 596 return rc; 597 rc = sja1105_init_l2_forwarding(priv); 598 if (rc < 0) 599 return rc; 600 rc = sja1105_init_l2_forwarding_params(priv); 601 if (rc < 0) 602 return rc; 603 rc = sja1105_init_l2_policing(priv); 604 if (rc < 0) 605 return rc; 606 rc = sja1105_init_general_params(priv); 607 if (rc < 0) 608 return rc; 609 rc = sja1105_init_avb_params(priv); 610 if (rc < 0) 611 return rc; 612 613 /* Send initial configuration to hardware via SPI */ 614 return sja1105_static_config_upload(priv); 615 } 616 617 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 618 const struct sja1105_dt_port *ports) 619 { 620 int i; 621 622 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 623 if (ports[i].role == XMII_MAC) 624 continue; 625 626 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 627 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 628 priv->rgmii_rx_delay[i] = true; 629 630 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 631 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 632 priv->rgmii_tx_delay[i] = true; 633 634 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 635 !priv->info->setup_rgmii_delay) 636 return -EINVAL; 637 } 638 return 0; 639 } 640 641 static int sja1105_parse_ports_node(struct sja1105_private *priv, 642 struct sja1105_dt_port *ports, 643 struct device_node *ports_node) 644 { 645 struct device *dev = &priv->spidev->dev; 646 struct device_node *child; 647 648 for_each_available_child_of_node(ports_node, child) { 649 struct device_node *phy_node; 650 phy_interface_t phy_mode; 651 u32 index; 652 int err; 653 654 /* Get switch port number from DT */ 655 if (of_property_read_u32(child, "reg", &index) < 0) { 656 dev_err(dev, "Port number not defined in device tree " 657 "(property \"reg\")\n"); 658 of_node_put(child); 659 return -ENODEV; 660 } 661 662 /* Get PHY mode from DT */ 663 err = of_get_phy_mode(child, &phy_mode); 664 if (err) { 665 dev_err(dev, "Failed to read phy-mode or " 666 "phy-interface-type property for port %d\n", 667 index); 668 of_node_put(child); 669 return -ENODEV; 670 } 671 ports[index].phy_mode = phy_mode; 672 673 phy_node = of_parse_phandle(child, "phy-handle", 0); 674 if (!phy_node) { 675 if (!of_phy_is_fixed_link(child)) { 676 dev_err(dev, "phy-handle or fixed-link " 677 "properties missing!\n"); 678 of_node_put(child); 679 return -ENODEV; 680 } 681 /* phy-handle is missing, but fixed-link isn't. 682 * So it's a fixed link. Default to PHY role. 683 */ 684 ports[index].role = XMII_PHY; 685 } else { 686 /* phy-handle present => put port in MAC role */ 687 ports[index].role = XMII_MAC; 688 of_node_put(phy_node); 689 } 690 691 /* The MAC/PHY role can be overridden with explicit bindings */ 692 if (of_property_read_bool(child, "sja1105,role-mac")) 693 ports[index].role = XMII_MAC; 694 else if (of_property_read_bool(child, "sja1105,role-phy")) 695 ports[index].role = XMII_PHY; 696 } 697 698 return 0; 699 } 700 701 static int sja1105_parse_dt(struct sja1105_private *priv, 702 struct sja1105_dt_port *ports) 703 { 704 struct device *dev = &priv->spidev->dev; 705 struct device_node *switch_node = dev->of_node; 706 struct device_node *ports_node; 707 int rc; 708 709 ports_node = of_get_child_by_name(switch_node, "ports"); 710 if (!ports_node) { 711 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 712 return -ENODEV; 713 } 714 715 rc = sja1105_parse_ports_node(priv, ports, ports_node); 716 of_node_put(ports_node); 717 718 return rc; 719 } 720 721 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg) 722 { 723 const struct sja1105_regs *regs = priv->info->regs; 724 u32 val; 725 int rc; 726 727 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val, 728 NULL); 729 if (rc < 0) 730 return rc; 731 732 return val; 733 } 734 735 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg, 736 u16 pcs_val) 737 { 738 const struct sja1105_regs *regs = priv->info->regs; 739 u32 val = pcs_val; 740 int rc; 741 742 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val, 743 NULL); 744 if (rc < 0) 745 return rc; 746 747 return val; 748 } 749 750 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, 751 bool an_enabled, bool an_master) 752 { 753 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 754 755 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 756 * stop the clock during LPI mode, make the MAC reconfigure 757 * autonomously after PCS autoneg is done, flush the internal FIFOs. 758 */ 759 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 | 760 SJA1105_DC1_CLOCK_STOP_EN | 761 SJA1105_DC1_MAC_AUTO_SW | 762 SJA1105_DC1_INIT); 763 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 764 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE); 765 /* AUTONEG_CONTROL: Use SGMII autoneg */ 766 if (an_master) 767 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 768 sja1105_sgmii_write(priv, SJA1105_AC, ac); 769 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 770 * sja1105_sgmii_pcs_force_speed must be called later for the link 771 * to become operational. 772 */ 773 if (an_enabled) 774 sja1105_sgmii_write(priv, MII_BMCR, 775 BMCR_ANENABLE | BMCR_ANRESTART); 776 } 777 778 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 779 int speed) 780 { 781 int pcs_speed; 782 783 switch (speed) { 784 case SPEED_1000: 785 pcs_speed = BMCR_SPEED1000; 786 break; 787 case SPEED_100: 788 pcs_speed = BMCR_SPEED100; 789 break; 790 case SPEED_10: 791 pcs_speed = BMCR_SPEED10; 792 break; 793 default: 794 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 795 return; 796 } 797 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX); 798 } 799 800 /* Convert link speed from SJA1105 to ethtool encoding */ 801 static int sja1105_speed[] = { 802 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 803 [SJA1105_SPEED_10MBPS] = SPEED_10, 804 [SJA1105_SPEED_100MBPS] = SPEED_100, 805 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 806 }; 807 808 /* Set link speed in the MAC configuration for a specific port. */ 809 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 810 int speed_mbps) 811 { 812 struct sja1105_xmii_params_entry *mii; 813 struct sja1105_mac_config_entry *mac; 814 struct device *dev = priv->ds->dev; 815 sja1105_phy_interface_t phy_mode; 816 sja1105_speed_t speed; 817 int rc; 818 819 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 820 * tables. On E/T, MAC reconfig tables are not readable, only writable. 821 * We have to *know* what the MAC looks like. For the sake of keeping 822 * the code common, we'll use the static configuration tables as a 823 * reasonable approximation for both E/T and P/Q/R/S. 824 */ 825 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 826 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 827 828 switch (speed_mbps) { 829 case SPEED_UNKNOWN: 830 /* PHYLINK called sja1105_mac_config() to inform us about 831 * the state->interface, but AN has not completed and the 832 * speed is not yet valid. UM10944.pdf says that setting 833 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 834 * ok for power consumption in case AN will never complete - 835 * otherwise PHYLINK should come back with a new update. 836 */ 837 speed = SJA1105_SPEED_AUTO; 838 break; 839 case SPEED_10: 840 speed = SJA1105_SPEED_10MBPS; 841 break; 842 case SPEED_100: 843 speed = SJA1105_SPEED_100MBPS; 844 break; 845 case SPEED_1000: 846 speed = SJA1105_SPEED_1000MBPS; 847 break; 848 default: 849 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 850 return -EINVAL; 851 } 852 853 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 854 * table, since this will be used for the clocking setup, and we no 855 * longer need to store it in the static config (already told hardware 856 * we want auto during upload phase). 857 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 858 * we need to configure the PCS only (if even that). 859 */ 860 if (sja1105_supports_sgmii(priv, port)) 861 mac[port].speed = SJA1105_SPEED_1000MBPS; 862 else 863 mac[port].speed = speed; 864 865 /* Write to the dynamic reconfiguration tables */ 866 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 867 &mac[port], true); 868 if (rc < 0) { 869 dev_err(dev, "Failed to write MAC config: %d\n", rc); 870 return rc; 871 } 872 873 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 874 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 875 * RMII no change of the clock setup is required. Actually, changing 876 * the clock setup does interrupt the clock signal for a certain time 877 * which causes trouble for all PHYs relying on this signal. 878 */ 879 phy_mode = mii->xmii_mode[port]; 880 if (phy_mode != XMII_MODE_RGMII) 881 return 0; 882 883 return sja1105_clocking_setup_port(priv, port); 884 } 885 886 /* The SJA1105 MAC programming model is through the static config (the xMII 887 * Mode table cannot be dynamically reconfigured), and we have to program 888 * that early (earlier than PHYLINK calls us, anyway). 889 * So just error out in case the connected PHY attempts to change the initial 890 * system interface MII protocol from what is defined in the DT, at least for 891 * now. 892 */ 893 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 894 phy_interface_t interface) 895 { 896 struct sja1105_xmii_params_entry *mii; 897 sja1105_phy_interface_t phy_mode; 898 899 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 900 phy_mode = mii->xmii_mode[port]; 901 902 switch (interface) { 903 case PHY_INTERFACE_MODE_MII: 904 return (phy_mode != XMII_MODE_MII); 905 case PHY_INTERFACE_MODE_RMII: 906 return (phy_mode != XMII_MODE_RMII); 907 case PHY_INTERFACE_MODE_RGMII: 908 case PHY_INTERFACE_MODE_RGMII_ID: 909 case PHY_INTERFACE_MODE_RGMII_RXID: 910 case PHY_INTERFACE_MODE_RGMII_TXID: 911 return (phy_mode != XMII_MODE_RGMII); 912 case PHY_INTERFACE_MODE_SGMII: 913 return (phy_mode != XMII_MODE_SGMII); 914 default: 915 return true; 916 } 917 } 918 919 static void sja1105_mac_config(struct dsa_switch *ds, int port, 920 unsigned int mode, 921 const struct phylink_link_state *state) 922 { 923 struct sja1105_private *priv = ds->priv; 924 bool is_sgmii = sja1105_supports_sgmii(priv, port); 925 926 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 927 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 928 phy_modes(state->interface)); 929 return; 930 } 931 932 if (phylink_autoneg_inband(mode) && !is_sgmii) { 933 dev_err(ds->dev, "In-band AN not supported!\n"); 934 return; 935 } 936 937 if (is_sgmii) 938 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode), 939 false); 940 } 941 942 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 943 unsigned int mode, 944 phy_interface_t interface) 945 { 946 sja1105_inhibit_tx(ds->priv, BIT(port), true); 947 } 948 949 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 950 unsigned int mode, 951 phy_interface_t interface, 952 struct phy_device *phydev, 953 int speed, int duplex, 954 bool tx_pause, bool rx_pause) 955 { 956 struct sja1105_private *priv = ds->priv; 957 958 sja1105_adjust_port_config(priv, port, speed); 959 960 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode)) 961 sja1105_sgmii_pcs_force_speed(priv, speed); 962 963 sja1105_inhibit_tx(priv, BIT(port), false); 964 } 965 966 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 967 unsigned long *supported, 968 struct phylink_link_state *state) 969 { 970 /* Construct a new mask which exhaustively contains all link features 971 * supported by the MAC, and then apply that (logical AND) to what will 972 * be sent to the PHY for "marketing". 973 */ 974 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 975 struct sja1105_private *priv = ds->priv; 976 struct sja1105_xmii_params_entry *mii; 977 978 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 979 980 /* include/linux/phylink.h says: 981 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 982 * expects the MAC driver to return all supported link modes. 983 */ 984 if (state->interface != PHY_INTERFACE_MODE_NA && 985 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 986 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 987 return; 988 } 989 990 /* The MAC does not support pause frames, and also doesn't 991 * support half-duplex traffic modes. 992 */ 993 phylink_set(mask, Autoneg); 994 phylink_set(mask, MII); 995 phylink_set(mask, 10baseT_Full); 996 phylink_set(mask, 100baseT_Full); 997 phylink_set(mask, 100baseT1_Full); 998 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 999 mii->xmii_mode[port] == XMII_MODE_SGMII) 1000 phylink_set(mask, 1000baseT_Full); 1001 1002 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1003 bitmap_and(state->advertising, state->advertising, mask, 1004 __ETHTOOL_LINK_MODE_MASK_NBITS); 1005 } 1006 1007 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1008 struct phylink_link_state *state) 1009 { 1010 struct sja1105_private *priv = ds->priv; 1011 int ais; 1012 1013 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1014 ais = sja1105_sgmii_read(priv, SJA1105_AIS); 1015 if (ais < 0) 1016 return ais; 1017 1018 switch (SJA1105_AIS_SPEED(ais)) { 1019 case 0: 1020 state->speed = SPEED_10; 1021 break; 1022 case 1: 1023 state->speed = SPEED_100; 1024 break; 1025 case 2: 1026 state->speed = SPEED_1000; 1027 break; 1028 default: 1029 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1030 SJA1105_AIS_SPEED(ais)); 1031 } 1032 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1033 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1034 state->link = SJA1105_AIS_LINK_STATUS(ais); 1035 1036 return 0; 1037 } 1038 1039 static int 1040 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1041 const struct sja1105_l2_lookup_entry *requested) 1042 { 1043 struct sja1105_l2_lookup_entry *l2_lookup; 1044 struct sja1105_table *table; 1045 int i; 1046 1047 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1048 l2_lookup = table->entries; 1049 1050 for (i = 0; i < table->entry_count; i++) 1051 if (l2_lookup[i].macaddr == requested->macaddr && 1052 l2_lookup[i].vlanid == requested->vlanid && 1053 l2_lookup[i].destports & BIT(port)) 1054 return i; 1055 1056 return -1; 1057 } 1058 1059 /* We want FDB entries added statically through the bridge command to persist 1060 * across switch resets, which are a common thing during normal SJA1105 1061 * operation. So we have to back them up in the static configuration tables 1062 * and hence apply them on next static config upload... yay! 1063 */ 1064 static int 1065 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1066 const struct sja1105_l2_lookup_entry *requested, 1067 bool keep) 1068 { 1069 struct sja1105_l2_lookup_entry *l2_lookup; 1070 struct sja1105_table *table; 1071 int rc, match; 1072 1073 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1074 1075 match = sja1105_find_static_fdb_entry(priv, port, requested); 1076 if (match < 0) { 1077 /* Can't delete a missing entry. */ 1078 if (!keep) 1079 return 0; 1080 1081 /* No match => new entry */ 1082 rc = sja1105_table_resize(table, table->entry_count + 1); 1083 if (rc) 1084 return rc; 1085 1086 match = table->entry_count - 1; 1087 } 1088 1089 /* Assign pointer after the resize (it may be new memory) */ 1090 l2_lookup = table->entries; 1091 1092 /* We have a match. 1093 * If the job was to add this FDB entry, it's already done (mostly 1094 * anyway, since the port forwarding mask may have changed, case in 1095 * which we update it). 1096 * Otherwise we have to delete it. 1097 */ 1098 if (keep) { 1099 l2_lookup[match] = *requested; 1100 return 0; 1101 } 1102 1103 /* To remove, the strategy is to overwrite the element with 1104 * the last one, and then reduce the array size by 1 1105 */ 1106 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1107 return sja1105_table_resize(table, table->entry_count - 1); 1108 } 1109 1110 /* First-generation switches have a 4-way set associative TCAM that 1111 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1112 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1113 * For the placement of a newly learnt FDB entry, the switch selects the bin 1114 * based on a hash function, and the way within that bin incrementally. 1115 */ 1116 static int sja1105et_fdb_index(int bin, int way) 1117 { 1118 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1119 } 1120 1121 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1122 const u8 *addr, u16 vid, 1123 struct sja1105_l2_lookup_entry *match, 1124 int *last_unused) 1125 { 1126 int way; 1127 1128 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1129 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1130 int index = sja1105et_fdb_index(bin, way); 1131 1132 /* Skip unused entries, optionally marking them 1133 * into the return value 1134 */ 1135 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1136 index, &l2_lookup)) { 1137 if (last_unused) 1138 *last_unused = way; 1139 continue; 1140 } 1141 1142 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1143 l2_lookup.vlanid == vid) { 1144 if (match) 1145 *match = l2_lookup; 1146 return way; 1147 } 1148 } 1149 /* Return an invalid entry index if not found */ 1150 return -1; 1151 } 1152 1153 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1154 const unsigned char *addr, u16 vid) 1155 { 1156 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1157 struct sja1105_private *priv = ds->priv; 1158 struct device *dev = ds->dev; 1159 int last_unused = -1; 1160 int bin, way, rc; 1161 1162 bin = sja1105et_fdb_hash(priv, addr, vid); 1163 1164 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1165 &l2_lookup, &last_unused); 1166 if (way >= 0) { 1167 /* We have an FDB entry. Is our port in the destination 1168 * mask? If yes, we need to do nothing. If not, we need 1169 * to rewrite the entry by adding this port to it. 1170 */ 1171 if (l2_lookup.destports & BIT(port)) 1172 return 0; 1173 l2_lookup.destports |= BIT(port); 1174 } else { 1175 int index = sja1105et_fdb_index(bin, way); 1176 1177 /* We don't have an FDB entry. We construct a new one and 1178 * try to find a place for it within the FDB table. 1179 */ 1180 l2_lookup.macaddr = ether_addr_to_u64(addr); 1181 l2_lookup.destports = BIT(port); 1182 l2_lookup.vlanid = vid; 1183 1184 if (last_unused >= 0) { 1185 way = last_unused; 1186 } else { 1187 /* Bin is full, need to evict somebody. 1188 * Choose victim at random. If you get these messages 1189 * often, you may need to consider changing the 1190 * distribution function: 1191 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1192 */ 1193 get_random_bytes(&way, sizeof(u8)); 1194 way %= SJA1105ET_FDB_BIN_SIZE; 1195 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1196 bin, addr, way); 1197 /* Evict entry */ 1198 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1199 index, NULL, false); 1200 } 1201 } 1202 l2_lookup.index = sja1105et_fdb_index(bin, way); 1203 1204 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1205 l2_lookup.index, &l2_lookup, 1206 true); 1207 if (rc < 0) 1208 return rc; 1209 1210 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1211 } 1212 1213 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1214 const unsigned char *addr, u16 vid) 1215 { 1216 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1217 struct sja1105_private *priv = ds->priv; 1218 int index, bin, way, rc; 1219 bool keep; 1220 1221 bin = sja1105et_fdb_hash(priv, addr, vid); 1222 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1223 &l2_lookup, NULL); 1224 if (way < 0) 1225 return 0; 1226 index = sja1105et_fdb_index(bin, way); 1227 1228 /* We have an FDB entry. Is our port in the destination mask? If yes, 1229 * we need to remove it. If the resulting port mask becomes empty, we 1230 * need to completely evict the FDB entry. 1231 * Otherwise we just write it back. 1232 */ 1233 l2_lookup.destports &= ~BIT(port); 1234 1235 if (l2_lookup.destports) 1236 keep = true; 1237 else 1238 keep = false; 1239 1240 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1241 index, &l2_lookup, keep); 1242 if (rc < 0) 1243 return rc; 1244 1245 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1246 } 1247 1248 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1249 const unsigned char *addr, u16 vid) 1250 { 1251 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1252 struct sja1105_private *priv = ds->priv; 1253 int rc, i; 1254 1255 /* Search for an existing entry in the FDB table */ 1256 l2_lookup.macaddr = ether_addr_to_u64(addr); 1257 l2_lookup.vlanid = vid; 1258 l2_lookup.iotag = SJA1105_S_TAG; 1259 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1260 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1261 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1262 l2_lookup.mask_iotag = BIT(0); 1263 } else { 1264 l2_lookup.mask_vlanid = 0; 1265 l2_lookup.mask_iotag = 0; 1266 } 1267 l2_lookup.destports = BIT(port); 1268 1269 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1270 SJA1105_SEARCH, &l2_lookup); 1271 if (rc == 0) { 1272 /* Found and this port is already in the entry's 1273 * port mask => job done 1274 */ 1275 if (l2_lookup.destports & BIT(port)) 1276 return 0; 1277 /* l2_lookup.index is populated by the switch in case it 1278 * found something. 1279 */ 1280 l2_lookup.destports |= BIT(port); 1281 goto skip_finding_an_index; 1282 } 1283 1284 /* Not found, so try to find an unused spot in the FDB. 1285 * This is slightly inefficient because the strategy is knock-knock at 1286 * every possible position from 0 to 1023. 1287 */ 1288 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1289 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1290 i, NULL); 1291 if (rc < 0) 1292 break; 1293 } 1294 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1295 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1296 return -EINVAL; 1297 } 1298 l2_lookup.lockeds = true; 1299 l2_lookup.index = i; 1300 1301 skip_finding_an_index: 1302 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1303 l2_lookup.index, &l2_lookup, 1304 true); 1305 if (rc < 0) 1306 return rc; 1307 1308 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1309 } 1310 1311 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1312 const unsigned char *addr, u16 vid) 1313 { 1314 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1315 struct sja1105_private *priv = ds->priv; 1316 bool keep; 1317 int rc; 1318 1319 l2_lookup.macaddr = ether_addr_to_u64(addr); 1320 l2_lookup.vlanid = vid; 1321 l2_lookup.iotag = SJA1105_S_TAG; 1322 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1323 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1324 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1325 l2_lookup.mask_iotag = BIT(0); 1326 } else { 1327 l2_lookup.mask_vlanid = 0; 1328 l2_lookup.mask_iotag = 0; 1329 } 1330 l2_lookup.destports = BIT(port); 1331 1332 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1333 SJA1105_SEARCH, &l2_lookup); 1334 if (rc < 0) 1335 return 0; 1336 1337 l2_lookup.destports &= ~BIT(port); 1338 1339 /* Decide whether we remove just this port from the FDB entry, 1340 * or if we remove it completely. 1341 */ 1342 if (l2_lookup.destports) 1343 keep = true; 1344 else 1345 keep = false; 1346 1347 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1348 l2_lookup.index, &l2_lookup, keep); 1349 if (rc < 0) 1350 return rc; 1351 1352 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1353 } 1354 1355 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1356 const unsigned char *addr, u16 vid) 1357 { 1358 struct sja1105_private *priv = ds->priv; 1359 1360 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1361 * so the switch still does some VLAN processing internally. 1362 * But Shared VLAN Learning (SVL) is also active, and it will take 1363 * care of autonomous forwarding between the unique pvid's of each 1364 * port. Here we just make sure that users can't add duplicate FDB 1365 * entries when in this mode - the actual VID doesn't matter except 1366 * for what gets printed in 'bridge fdb show'. In the case of zero, 1367 * no VID gets printed at all. 1368 */ 1369 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1370 vid = 0; 1371 1372 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1373 } 1374 1375 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1376 const unsigned char *addr, u16 vid) 1377 { 1378 struct sja1105_private *priv = ds->priv; 1379 1380 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1381 vid = 0; 1382 1383 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1384 } 1385 1386 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1387 dsa_fdb_dump_cb_t *cb, void *data) 1388 { 1389 struct sja1105_private *priv = ds->priv; 1390 struct device *dev = ds->dev; 1391 int i; 1392 1393 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1394 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1395 u8 macaddr[ETH_ALEN]; 1396 int rc; 1397 1398 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1399 i, &l2_lookup); 1400 /* No fdb entry at i, not an issue */ 1401 if (rc == -ENOENT) 1402 continue; 1403 if (rc) { 1404 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1405 return rc; 1406 } 1407 1408 /* FDB dump callback is per port. This means we have to 1409 * disregard a valid entry if it's not for this port, even if 1410 * only to revisit it later. This is inefficient because the 1411 * 1024-sized FDB table needs to be traversed 4 times through 1412 * SPI during a 'bridge fdb show' command. 1413 */ 1414 if (!(l2_lookup.destports & BIT(port))) 1415 continue; 1416 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1417 1418 /* We need to hide the dsa_8021q VLANs from the user. */ 1419 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1420 l2_lookup.vlanid = 0; 1421 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1422 } 1423 return 0; 1424 } 1425 1426 /* This callback needs to be present */ 1427 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1428 const struct switchdev_obj_port_mdb *mdb) 1429 { 1430 return 0; 1431 } 1432 1433 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1434 const struct switchdev_obj_port_mdb *mdb) 1435 { 1436 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1437 } 1438 1439 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1440 const struct switchdev_obj_port_mdb *mdb) 1441 { 1442 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1443 } 1444 1445 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1446 struct net_device *br, bool member) 1447 { 1448 struct sja1105_l2_forwarding_entry *l2_fwd; 1449 struct sja1105_private *priv = ds->priv; 1450 int i, rc; 1451 1452 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1453 1454 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1455 /* Add this port to the forwarding matrix of the 1456 * other ports in the same bridge, and viceversa. 1457 */ 1458 if (!dsa_is_user_port(ds, i)) 1459 continue; 1460 /* For the ports already under the bridge, only one thing needs 1461 * to be done, and that is to add this port to their 1462 * reachability domain. So we can perform the SPI write for 1463 * them immediately. However, for this port itself (the one 1464 * that is new to the bridge), we need to add all other ports 1465 * to its reachability domain. So we do that incrementally in 1466 * this loop, and perform the SPI write only at the end, once 1467 * the domain contains all other bridge ports. 1468 */ 1469 if (i == port) 1470 continue; 1471 if (dsa_to_port(ds, i)->bridge_dev != br) 1472 continue; 1473 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1474 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1475 1476 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1477 i, &l2_fwd[i], true); 1478 if (rc < 0) 1479 return rc; 1480 } 1481 1482 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1483 port, &l2_fwd[port], true); 1484 } 1485 1486 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1487 u8 state) 1488 { 1489 struct sja1105_private *priv = ds->priv; 1490 struct sja1105_mac_config_entry *mac; 1491 1492 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1493 1494 switch (state) { 1495 case BR_STATE_DISABLED: 1496 case BR_STATE_BLOCKING: 1497 /* From UM10944 description of DRPDTAG (why put this there?): 1498 * "Management traffic flows to the port regardless of the state 1499 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1500 * At the moment no difference between DISABLED and BLOCKING. 1501 */ 1502 mac[port].ingress = false; 1503 mac[port].egress = false; 1504 mac[port].dyn_learn = false; 1505 break; 1506 case BR_STATE_LISTENING: 1507 mac[port].ingress = true; 1508 mac[port].egress = false; 1509 mac[port].dyn_learn = false; 1510 break; 1511 case BR_STATE_LEARNING: 1512 mac[port].ingress = true; 1513 mac[port].egress = false; 1514 mac[port].dyn_learn = true; 1515 break; 1516 case BR_STATE_FORWARDING: 1517 mac[port].ingress = true; 1518 mac[port].egress = true; 1519 mac[port].dyn_learn = true; 1520 break; 1521 default: 1522 dev_err(ds->dev, "invalid STP state: %d\n", state); 1523 return; 1524 } 1525 1526 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1527 &mac[port], true); 1528 } 1529 1530 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1531 struct net_device *br) 1532 { 1533 return sja1105_bridge_member(ds, port, br, true); 1534 } 1535 1536 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1537 struct net_device *br) 1538 { 1539 sja1105_bridge_member(ds, port, br, false); 1540 } 1541 1542 static const char * const sja1105_reset_reasons[] = { 1543 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1544 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1545 [SJA1105_AGEING_TIME] = "Ageing time", 1546 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1547 }; 1548 1549 /* For situations where we need to change a setting at runtime that is only 1550 * available through the static configuration, resetting the switch in order 1551 * to upload the new static config is unavoidable. Back up the settings we 1552 * modify at runtime (currently only MAC) and restore them after uploading, 1553 * such that this operation is relatively seamless. 1554 */ 1555 int sja1105_static_config_reload(struct sja1105_private *priv, 1556 enum sja1105_reset_reason reason) 1557 { 1558 struct ptp_system_timestamp ptp_sts_before; 1559 struct ptp_system_timestamp ptp_sts_after; 1560 struct sja1105_mac_config_entry *mac; 1561 int speed_mbps[SJA1105_NUM_PORTS]; 1562 struct dsa_switch *ds = priv->ds; 1563 s64 t1, t2, t3, t4; 1564 s64 t12, t34; 1565 u16 bmcr = 0; 1566 int rc, i; 1567 s64 now; 1568 1569 mutex_lock(&priv->mgmt_lock); 1570 1571 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1572 1573 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1574 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1575 * switch wants to see in the static config in order to allow us to 1576 * change it through the dynamic interface later. 1577 */ 1578 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1579 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1580 mac[i].speed = SJA1105_SPEED_AUTO; 1581 } 1582 1583 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) 1584 bmcr = sja1105_sgmii_read(priv, MII_BMCR); 1585 1586 /* No PTP operations can run right now */ 1587 mutex_lock(&priv->ptp_data.lock); 1588 1589 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1590 if (rc < 0) 1591 goto out_unlock_ptp; 1592 1593 /* Reset switch and send updated static configuration */ 1594 rc = sja1105_static_config_upload(priv); 1595 if (rc < 0) 1596 goto out_unlock_ptp; 1597 1598 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1599 if (rc < 0) 1600 goto out_unlock_ptp; 1601 1602 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1603 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1604 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1605 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1606 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1607 t12 = t1 + (t2 - t1) / 2; 1608 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1609 t34 = t3 + (t4 - t3) / 2; 1610 /* Advance PTPCLKVAL by the time it took since its readout */ 1611 now += (t34 - t12); 1612 1613 __sja1105_ptp_adjtime(ds, now); 1614 1615 out_unlock_ptp: 1616 mutex_unlock(&priv->ptp_data.lock); 1617 1618 dev_info(priv->ds->dev, 1619 "Reset switch and programmed static config. Reason: %s\n", 1620 sja1105_reset_reasons[reason]); 1621 1622 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1623 * For these interfaces there is no dynamic configuration 1624 * needed, since PLLs have same settings at all speeds. 1625 */ 1626 rc = sja1105_clocking_setup(priv); 1627 if (rc < 0) 1628 goto out; 1629 1630 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1631 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1632 if (rc < 0) 1633 goto out; 1634 } 1635 1636 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) { 1637 bool an_enabled = !!(bmcr & BMCR_ANENABLE); 1638 1639 sja1105_sgmii_pcs_config(priv, an_enabled, false); 1640 1641 if (!an_enabled) { 1642 int speed = SPEED_UNKNOWN; 1643 1644 if (bmcr & BMCR_SPEED1000) 1645 speed = SPEED_1000; 1646 else if (bmcr & BMCR_SPEED100) 1647 speed = SPEED_100; 1648 else if (bmcr & BMCR_SPEED10) 1649 speed = SPEED_10; 1650 1651 sja1105_sgmii_pcs_force_speed(priv, speed); 1652 } 1653 } 1654 out: 1655 mutex_unlock(&priv->mgmt_lock); 1656 1657 return rc; 1658 } 1659 1660 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1661 { 1662 struct sja1105_mac_config_entry *mac; 1663 1664 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1665 1666 mac[port].vlanid = pvid; 1667 1668 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1669 &mac[port], true); 1670 } 1671 1672 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1673 { 1674 struct sja1105_vlan_lookup_entry *vlan; 1675 int count, i; 1676 1677 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1678 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1679 1680 for (i = 0; i < count; i++) 1681 if (vlan[i].vlanid == vid) 1682 return i; 1683 1684 /* Return an invalid entry index if not found */ 1685 return -1; 1686 } 1687 1688 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1689 bool enabled, bool untagged) 1690 { 1691 struct sja1105_vlan_lookup_entry *vlan; 1692 struct sja1105_table *table; 1693 bool keep = true; 1694 int match, rc; 1695 1696 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1697 1698 match = sja1105_is_vlan_configured(priv, vid); 1699 if (match < 0) { 1700 /* Can't delete a missing entry. */ 1701 if (!enabled) 1702 return 0; 1703 rc = sja1105_table_resize(table, table->entry_count + 1); 1704 if (rc) 1705 return rc; 1706 match = table->entry_count - 1; 1707 } 1708 /* Assign pointer after the resize (it's new memory) */ 1709 vlan = table->entries; 1710 vlan[match].vlanid = vid; 1711 if (enabled) { 1712 vlan[match].vlan_bc |= BIT(port); 1713 vlan[match].vmemb_port |= BIT(port); 1714 } else { 1715 vlan[match].vlan_bc &= ~BIT(port); 1716 vlan[match].vmemb_port &= ~BIT(port); 1717 } 1718 /* Also unset tag_port if removing this VLAN was requested, 1719 * just so we don't have a confusing bitmap (no practical purpose). 1720 */ 1721 if (untagged || !enabled) 1722 vlan[match].tag_port &= ~BIT(port); 1723 else 1724 vlan[match].tag_port |= BIT(port); 1725 /* If there's no port left as member of this VLAN, 1726 * it's time for it to go. 1727 */ 1728 if (!vlan[match].vmemb_port) 1729 keep = false; 1730 1731 dev_dbg(priv->ds->dev, 1732 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1733 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1734 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1735 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1736 1737 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1738 &vlan[match], keep); 1739 if (rc < 0) 1740 return rc; 1741 1742 if (!keep) 1743 return sja1105_table_delete_entry(table, match); 1744 1745 return 0; 1746 } 1747 1748 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1749 { 1750 int rc, i; 1751 1752 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1753 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1754 if (rc < 0) { 1755 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1756 i, rc); 1757 return rc; 1758 } 1759 } 1760 dev_info(ds->dev, "%s switch tagging\n", 1761 enabled ? "Enabled" : "Disabled"); 1762 return 0; 1763 } 1764 1765 static enum dsa_tag_protocol 1766 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 1767 enum dsa_tag_protocol mp) 1768 { 1769 return DSA_TAG_PROTO_SJA1105; 1770 } 1771 1772 /* This callback needs to be present */ 1773 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1774 const struct switchdev_obj_port_vlan *vlan) 1775 { 1776 return 0; 1777 } 1778 1779 /* The TPID setting belongs to the General Parameters table, 1780 * which can only be partially reconfigured at runtime (and not the TPID). 1781 * So a switch reset is required. 1782 */ 1783 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1784 { 1785 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1786 struct sja1105_general_params_entry *general_params; 1787 struct sja1105_private *priv = ds->priv; 1788 struct sja1105_table *table; 1789 u16 tpid, tpid2; 1790 int rc; 1791 1792 if (enabled) { 1793 /* Enable VLAN filtering. */ 1794 tpid = ETH_P_8021Q; 1795 tpid2 = ETH_P_8021AD; 1796 } else { 1797 /* Disable VLAN filtering. */ 1798 tpid = ETH_P_SJA1105; 1799 tpid2 = ETH_P_SJA1105; 1800 } 1801 1802 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1803 general_params = table->entries; 1804 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1805 general_params->tpid = tpid; 1806 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1807 general_params->tpid2 = tpid2; 1808 /* When VLAN filtering is on, we need to at least be able to 1809 * decode management traffic through the "backup plan". 1810 */ 1811 general_params->incl_srcpt1 = enabled; 1812 general_params->incl_srcpt0 = enabled; 1813 1814 /* VLAN filtering => independent VLAN learning. 1815 * No VLAN filtering => shared VLAN learning. 1816 * 1817 * In shared VLAN learning mode, untagged traffic still gets 1818 * pvid-tagged, and the FDB table gets populated with entries 1819 * containing the "real" (pvid or from VLAN tag) VLAN ID. 1820 * However the switch performs a masked L2 lookup in the FDB, 1821 * effectively only looking up a frame's DMAC (and not VID) for the 1822 * forwarding decision. 1823 * 1824 * This is extremely convenient for us, because in modes with 1825 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 1826 * each front panel port. This is good for identification but breaks 1827 * learning badly - the VID of the learnt FDB entry is unique, aka 1828 * no frames coming from any other port are going to have it. So 1829 * for forwarding purposes, this is as though learning was broken 1830 * (all frames get flooded). 1831 */ 1832 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1833 l2_lookup_params = table->entries; 1834 l2_lookup_params->shared_learn = !enabled; 1835 1836 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 1837 if (rc) 1838 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1839 1840 /* Switch port identification based on 802.1Q is only passable 1841 * if we are not under a vlan_filtering bridge. So make sure 1842 * the two configurations are mutually exclusive. 1843 */ 1844 return sja1105_setup_8021q_tagging(ds, !enabled); 1845 } 1846 1847 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1848 const struct switchdev_obj_port_vlan *vlan) 1849 { 1850 struct sja1105_private *priv = ds->priv; 1851 u16 vid; 1852 int rc; 1853 1854 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1855 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1856 BRIDGE_VLAN_INFO_UNTAGGED); 1857 if (rc < 0) { 1858 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1859 vid, port, rc); 1860 return; 1861 } 1862 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1863 rc = sja1105_pvid_apply(ds->priv, port, vid); 1864 if (rc < 0) { 1865 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1866 vid, port, rc); 1867 return; 1868 } 1869 } 1870 } 1871 } 1872 1873 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1874 const struct switchdev_obj_port_vlan *vlan) 1875 { 1876 struct sja1105_private *priv = ds->priv; 1877 u16 vid; 1878 int rc; 1879 1880 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1881 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1882 BRIDGE_VLAN_INFO_UNTAGGED); 1883 if (rc < 0) { 1884 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1885 vid, port, rc); 1886 return rc; 1887 } 1888 } 1889 return 0; 1890 } 1891 1892 /* The programming model for the SJA1105 switch is "all-at-once" via static 1893 * configuration tables. Some of these can be dynamically modified at runtime, 1894 * but not the xMII mode parameters table. 1895 * Furthermode, some PHYs may not have crystals for generating their clocks 1896 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1897 * ref_clk pin. So port clocking needs to be initialized early, before 1898 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1899 * Setting correct PHY link speed does not matter now. 1900 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1901 * bindings are not yet parsed by DSA core. We need to parse early so that we 1902 * can populate the xMII mode parameters table. 1903 */ 1904 static int sja1105_setup(struct dsa_switch *ds) 1905 { 1906 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1907 struct sja1105_private *priv = ds->priv; 1908 int rc; 1909 1910 rc = sja1105_parse_dt(priv, ports); 1911 if (rc < 0) { 1912 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1913 return rc; 1914 } 1915 1916 /* Error out early if internal delays are required through DT 1917 * and we can't apply them. 1918 */ 1919 rc = sja1105_parse_rgmii_delays(priv, ports); 1920 if (rc < 0) { 1921 dev_err(ds->dev, "RGMII delay not supported\n"); 1922 return rc; 1923 } 1924 1925 rc = sja1105_ptp_clock_register(ds); 1926 if (rc < 0) { 1927 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 1928 return rc; 1929 } 1930 /* Create and send configuration down to device */ 1931 rc = sja1105_static_config_load(priv, ports); 1932 if (rc < 0) { 1933 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1934 return rc; 1935 } 1936 /* Configure the CGU (PHY link modes and speeds) */ 1937 rc = sja1105_clocking_setup(priv); 1938 if (rc < 0) { 1939 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1940 return rc; 1941 } 1942 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1943 * The only thing we can do to disable it is lie about what the 802.1Q 1944 * EtherType is. 1945 * So it will still try to apply VLAN filtering, but all ingress 1946 * traffic (except frames received with EtherType of ETH_P_SJA1105) 1947 * will be internally tagged with a distorted VLAN header where the 1948 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 1949 */ 1950 ds->vlan_filtering_is_global = true; 1951 1952 /* Advertise the 8 egress queues */ 1953 ds->num_tx_queues = SJA1105_NUM_TC; 1954 1955 /* The DSA/switchdev model brings up switch ports in standalone mode by 1956 * default, and that means vlan_filtering is 0 since they're not under 1957 * a bridge, so it's safe to set up switch tagging at this time. 1958 */ 1959 return sja1105_setup_8021q_tagging(ds, true); 1960 } 1961 1962 static void sja1105_teardown(struct dsa_switch *ds) 1963 { 1964 struct sja1105_private *priv = ds->priv; 1965 int port; 1966 1967 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 1968 struct sja1105_port *sp = &priv->ports[port]; 1969 1970 if (!dsa_is_user_port(ds, port)) 1971 continue; 1972 1973 if (sp->xmit_worker) 1974 kthread_destroy_worker(sp->xmit_worker); 1975 } 1976 1977 sja1105_tas_teardown(ds); 1978 sja1105_ptp_clock_unregister(ds); 1979 sja1105_static_config_free(&priv->static_config); 1980 } 1981 1982 static int sja1105_port_enable(struct dsa_switch *ds, int port, 1983 struct phy_device *phy) 1984 { 1985 struct net_device *slave; 1986 1987 if (!dsa_is_user_port(ds, port)) 1988 return 0; 1989 1990 slave = dsa_to_port(ds, port)->slave; 1991 1992 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1993 1994 return 0; 1995 } 1996 1997 static void sja1105_port_disable(struct dsa_switch *ds, int port) 1998 { 1999 struct sja1105_private *priv = ds->priv; 2000 struct sja1105_port *sp = &priv->ports[port]; 2001 2002 if (!dsa_is_user_port(ds, port)) 2003 return; 2004 2005 kthread_cancel_work_sync(&sp->xmit_work); 2006 skb_queue_purge(&sp->xmit_queue); 2007 } 2008 2009 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 2010 struct sk_buff *skb, bool takets) 2011 { 2012 struct sja1105_mgmt_entry mgmt_route = {0}; 2013 struct sja1105_private *priv = ds->priv; 2014 struct ethhdr *hdr; 2015 int timeout = 10; 2016 int rc; 2017 2018 hdr = eth_hdr(skb); 2019 2020 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 2021 mgmt_route.destports = BIT(port); 2022 mgmt_route.enfport = 1; 2023 mgmt_route.tsreg = 0; 2024 mgmt_route.takets = takets; 2025 2026 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2027 slot, &mgmt_route, true); 2028 if (rc < 0) { 2029 kfree_skb(skb); 2030 return rc; 2031 } 2032 2033 /* Transfer skb to the host port. */ 2034 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 2035 2036 /* Wait until the switch has processed the frame */ 2037 do { 2038 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 2039 slot, &mgmt_route); 2040 if (rc < 0) { 2041 dev_err_ratelimited(priv->ds->dev, 2042 "failed to poll for mgmt route\n"); 2043 continue; 2044 } 2045 2046 /* UM10944: The ENFPORT flag of the respective entry is 2047 * cleared when a match is found. The host can use this 2048 * flag as an acknowledgment. 2049 */ 2050 cpu_relax(); 2051 } while (mgmt_route.enfport && --timeout); 2052 2053 if (!timeout) { 2054 /* Clean up the management route so that a follow-up 2055 * frame may not match on it by mistake. 2056 * This is only hardware supported on P/Q/R/S - on E/T it is 2057 * a no-op and we are silently discarding the -EOPNOTSUPP. 2058 */ 2059 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2060 slot, &mgmt_route, false); 2061 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 2062 } 2063 2064 return NETDEV_TX_OK; 2065 } 2066 2067 #define work_to_port(work) \ 2068 container_of((work), struct sja1105_port, xmit_work) 2069 #define tagger_to_sja1105(t) \ 2070 container_of((t), struct sja1105_private, tagger_data) 2071 2072 /* Deferred work is unfortunately necessary because setting up the management 2073 * route cannot be done from atomit context (SPI transfer takes a sleepable 2074 * lock on the bus) 2075 */ 2076 static void sja1105_port_deferred_xmit(struct kthread_work *work) 2077 { 2078 struct sja1105_port *sp = work_to_port(work); 2079 struct sja1105_tagger_data *tagger_data = sp->data; 2080 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 2081 int port = sp - priv->ports; 2082 struct sk_buff *skb; 2083 2084 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 2085 struct sk_buff *clone = DSA_SKB_CB(skb)->clone; 2086 2087 mutex_lock(&priv->mgmt_lock); 2088 2089 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 2090 2091 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 2092 if (clone) 2093 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 2094 2095 mutex_unlock(&priv->mgmt_lock); 2096 } 2097 } 2098 2099 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 2100 * which cannot be reconfigured at runtime. So a switch reset is required. 2101 */ 2102 static int sja1105_set_ageing_time(struct dsa_switch *ds, 2103 unsigned int ageing_time) 2104 { 2105 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2106 struct sja1105_private *priv = ds->priv; 2107 struct sja1105_table *table; 2108 unsigned int maxage; 2109 2110 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2111 l2_lookup_params = table->entries; 2112 2113 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 2114 2115 if (l2_lookup_params->maxage == maxage) 2116 return 0; 2117 2118 l2_lookup_params->maxage = maxage; 2119 2120 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 2121 } 2122 2123 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 2124 enum tc_setup_type type, 2125 void *type_data) 2126 { 2127 switch (type) { 2128 case TC_SETUP_QDISC_TAPRIO: 2129 return sja1105_setup_tc_taprio(ds, port, type_data); 2130 default: 2131 return -EOPNOTSUPP; 2132 } 2133 } 2134 2135 /* We have a single mirror (@to) port, but can configure ingress and egress 2136 * mirroring on all other (@from) ports. 2137 * We need to allow mirroring rules only as long as the @to port is always the 2138 * same, and we need to unset the @to port from mirr_port only when there is no 2139 * mirroring rule that references it. 2140 */ 2141 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 2142 bool ingress, bool enabled) 2143 { 2144 struct sja1105_general_params_entry *general_params; 2145 struct sja1105_mac_config_entry *mac; 2146 struct sja1105_table *table; 2147 bool already_enabled; 2148 u64 new_mirr_port; 2149 int rc; 2150 2151 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2152 general_params = table->entries; 2153 2154 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 2155 2156 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS); 2157 if (already_enabled && enabled && general_params->mirr_port != to) { 2158 dev_err(priv->ds->dev, 2159 "Delete mirroring rules towards port %llu first\n", 2160 general_params->mirr_port); 2161 return -EBUSY; 2162 } 2163 2164 new_mirr_port = to; 2165 if (!enabled) { 2166 bool keep = false; 2167 int port; 2168 2169 /* Anybody still referencing mirr_port? */ 2170 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2171 if (mac[port].ing_mirr || mac[port].egr_mirr) { 2172 keep = true; 2173 break; 2174 } 2175 } 2176 /* Unset already_enabled for next time */ 2177 if (!keep) 2178 new_mirr_port = SJA1105_NUM_PORTS; 2179 } 2180 if (new_mirr_port != general_params->mirr_port) { 2181 general_params->mirr_port = new_mirr_port; 2182 2183 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 2184 0, general_params, true); 2185 if (rc < 0) 2186 return rc; 2187 } 2188 2189 if (ingress) 2190 mac[from].ing_mirr = enabled; 2191 else 2192 mac[from].egr_mirr = enabled; 2193 2194 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 2195 &mac[from], true); 2196 } 2197 2198 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 2199 struct dsa_mall_mirror_tc_entry *mirror, 2200 bool ingress) 2201 { 2202 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2203 ingress, true); 2204 } 2205 2206 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 2207 struct dsa_mall_mirror_tc_entry *mirror) 2208 { 2209 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2210 mirror->ingress, false); 2211 } 2212 2213 static const struct dsa_switch_ops sja1105_switch_ops = { 2214 .get_tag_protocol = sja1105_get_tag_protocol, 2215 .setup = sja1105_setup, 2216 .teardown = sja1105_teardown, 2217 .set_ageing_time = sja1105_set_ageing_time, 2218 .phylink_validate = sja1105_phylink_validate, 2219 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 2220 .phylink_mac_config = sja1105_mac_config, 2221 .phylink_mac_link_up = sja1105_mac_link_up, 2222 .phylink_mac_link_down = sja1105_mac_link_down, 2223 .get_strings = sja1105_get_strings, 2224 .get_ethtool_stats = sja1105_get_ethtool_stats, 2225 .get_sset_count = sja1105_get_sset_count, 2226 .get_ts_info = sja1105_get_ts_info, 2227 .port_enable = sja1105_port_enable, 2228 .port_disable = sja1105_port_disable, 2229 .port_fdb_dump = sja1105_fdb_dump, 2230 .port_fdb_add = sja1105_fdb_add, 2231 .port_fdb_del = sja1105_fdb_del, 2232 .port_bridge_join = sja1105_bridge_join, 2233 .port_bridge_leave = sja1105_bridge_leave, 2234 .port_stp_state_set = sja1105_bridge_stp_state_set, 2235 .port_vlan_prepare = sja1105_vlan_prepare, 2236 .port_vlan_filtering = sja1105_vlan_filtering, 2237 .port_vlan_add = sja1105_vlan_add, 2238 .port_vlan_del = sja1105_vlan_del, 2239 .port_mdb_prepare = sja1105_mdb_prepare, 2240 .port_mdb_add = sja1105_mdb_add, 2241 .port_mdb_del = sja1105_mdb_del, 2242 .port_hwtstamp_get = sja1105_hwtstamp_get, 2243 .port_hwtstamp_set = sja1105_hwtstamp_set, 2244 .port_rxtstamp = sja1105_port_rxtstamp, 2245 .port_txtstamp = sja1105_port_txtstamp, 2246 .port_setup_tc = sja1105_port_setup_tc, 2247 .port_mirror_add = sja1105_mirror_add, 2248 .port_mirror_del = sja1105_mirror_del, 2249 }; 2250 2251 static int sja1105_check_device_id(struct sja1105_private *priv) 2252 { 2253 const struct sja1105_regs *regs = priv->info->regs; 2254 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 2255 struct device *dev = &priv->spidev->dev; 2256 u32 device_id; 2257 u64 part_no; 2258 int rc; 2259 2260 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 2261 NULL); 2262 if (rc < 0) 2263 return rc; 2264 2265 if (device_id != priv->info->device_id) { 2266 dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n", 2267 priv->info->device_id, device_id); 2268 return -ENODEV; 2269 } 2270 2271 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 2272 SJA1105_SIZE_DEVICE_ID); 2273 if (rc < 0) 2274 return rc; 2275 2276 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 2277 2278 if (part_no != priv->info->part_no) { 2279 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 2280 priv->info->part_no, part_no); 2281 return -ENODEV; 2282 } 2283 2284 return 0; 2285 } 2286 2287 static int sja1105_probe(struct spi_device *spi) 2288 { 2289 struct sja1105_tagger_data *tagger_data; 2290 struct device *dev = &spi->dev; 2291 struct sja1105_private *priv; 2292 struct dsa_switch *ds; 2293 int rc, port; 2294 2295 if (!dev->of_node) { 2296 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 2297 return -EINVAL; 2298 } 2299 2300 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 2301 if (!priv) 2302 return -ENOMEM; 2303 2304 /* Configure the optional reset pin and bring up switch */ 2305 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 2306 if (IS_ERR(priv->reset_gpio)) 2307 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 2308 else 2309 sja1105_hw_reset(priv->reset_gpio, 1, 1); 2310 2311 /* Populate our driver private structure (priv) based on 2312 * the device tree node that was probed (spi) 2313 */ 2314 priv->spidev = spi; 2315 spi_set_drvdata(spi, priv); 2316 2317 /* Configure the SPI bus */ 2318 spi->bits_per_word = 8; 2319 rc = spi_setup(spi); 2320 if (rc < 0) { 2321 dev_err(dev, "Could not init SPI\n"); 2322 return rc; 2323 } 2324 2325 priv->info = of_device_get_match_data(dev); 2326 2327 /* Detect hardware device */ 2328 rc = sja1105_check_device_id(priv); 2329 if (rc < 0) { 2330 dev_err(dev, "Device ID check failed: %d\n", rc); 2331 return rc; 2332 } 2333 2334 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 2335 2336 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 2337 if (!ds) 2338 return -ENOMEM; 2339 2340 ds->dev = dev; 2341 ds->num_ports = SJA1105_NUM_PORTS; 2342 ds->ops = &sja1105_switch_ops; 2343 ds->priv = priv; 2344 priv->ds = ds; 2345 2346 tagger_data = &priv->tagger_data; 2347 2348 mutex_init(&priv->ptp_data.lock); 2349 mutex_init(&priv->mgmt_lock); 2350 2351 sja1105_tas_setup(ds); 2352 2353 rc = dsa_register_switch(priv->ds); 2354 if (rc) 2355 return rc; 2356 2357 /* Connections between dsa_port and sja1105_port */ 2358 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2359 struct sja1105_port *sp = &priv->ports[port]; 2360 struct dsa_port *dp = dsa_to_port(ds, port); 2361 struct net_device *slave; 2362 2363 if (!dsa_is_user_port(ds, port)) 2364 continue; 2365 2366 dp->priv = sp; 2367 sp->dp = dp; 2368 sp->data = tagger_data; 2369 slave = dp->slave; 2370 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 2371 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 2372 slave->name); 2373 if (IS_ERR(sp->xmit_worker)) { 2374 rc = PTR_ERR(sp->xmit_worker); 2375 dev_err(ds->dev, 2376 "failed to create deferred xmit thread: %d\n", 2377 rc); 2378 goto out; 2379 } 2380 skb_queue_head_init(&sp->xmit_queue); 2381 } 2382 2383 return 0; 2384 out: 2385 while (port-- > 0) { 2386 struct sja1105_port *sp = &priv->ports[port]; 2387 2388 if (!dsa_is_user_port(ds, port)) 2389 continue; 2390 2391 kthread_destroy_worker(sp->xmit_worker); 2392 } 2393 return rc; 2394 } 2395 2396 static int sja1105_remove(struct spi_device *spi) 2397 { 2398 struct sja1105_private *priv = spi_get_drvdata(spi); 2399 2400 dsa_unregister_switch(priv->ds); 2401 return 0; 2402 } 2403 2404 static const struct of_device_id sja1105_dt_ids[] = { 2405 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 2406 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 2407 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 2408 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 2409 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 2410 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 2411 { /* sentinel */ }, 2412 }; 2413 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 2414 2415 static struct spi_driver sja1105_driver = { 2416 .driver = { 2417 .name = "sja1105", 2418 .owner = THIS_MODULE, 2419 .of_match_table = of_match_ptr(sja1105_dt_ids), 2420 }, 2421 .probe = sja1105_probe, 2422 .remove = sja1105_remove, 2423 }; 2424 2425 module_spi_driver(sja1105_driver); 2426 2427 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 2428 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 2429 MODULE_DESCRIPTION("SJA1105 Driver"); 2430 MODULE_LICENSE("GPL v2"); 2431