1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_tas.h" 26 27 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 28 unsigned int startup_delay) 29 { 30 gpiod_set_value_cansleep(gpio, 1); 31 /* Wait for minimum reset pulse length */ 32 msleep(pulse_len); 33 gpiod_set_value_cansleep(gpio, 0); 34 /* Wait until chip is ready after reset */ 35 msleep(startup_delay); 36 } 37 38 static void 39 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 40 int from, int to, bool allow) 41 { 42 if (allow) { 43 l2_fwd[from].bc_domain |= BIT(to); 44 l2_fwd[from].reach_port |= BIT(to); 45 l2_fwd[from].fl_domain |= BIT(to); 46 } else { 47 l2_fwd[from].bc_domain &= ~BIT(to); 48 l2_fwd[from].reach_port &= ~BIT(to); 49 l2_fwd[from].fl_domain &= ~BIT(to); 50 } 51 } 52 53 /* Structure used to temporarily transport device tree 54 * settings into sja1105_setup 55 */ 56 struct sja1105_dt_port { 57 phy_interface_t phy_mode; 58 sja1105_mii_role_t role; 59 }; 60 61 static int sja1105_init_mac_settings(struct sja1105_private *priv) 62 { 63 struct sja1105_mac_config_entry default_mac = { 64 /* Enable all 8 priority queues on egress. 65 * Every queue i holds top[i] - base[i] frames. 66 * Sum of top[i] - base[i] is 511 (max hardware limit). 67 */ 68 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 69 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 70 .enabled = {true, true, true, true, true, true, true, true}, 71 /* Keep standard IFG of 12 bytes on egress. */ 72 .ifg = 0, 73 /* Always put the MAC speed in automatic mode, where it can be 74 * adjusted at runtime by PHYLINK. 75 */ 76 .speed = SJA1105_SPEED_AUTO, 77 /* No static correction for 1-step 1588 events */ 78 .tp_delin = 0, 79 .tp_delout = 0, 80 /* Disable aging for critical TTEthernet traffic */ 81 .maxage = 0xFF, 82 /* Internal VLAN (pvid) to apply to untagged ingress */ 83 .vlanprio = 0, 84 .vlanid = 1, 85 .ing_mirr = false, 86 .egr_mirr = false, 87 /* Don't drop traffic with other EtherType than ETH_P_IP */ 88 .drpnona664 = false, 89 /* Don't drop double-tagged traffic */ 90 .drpdtag = false, 91 /* Don't drop untagged traffic */ 92 .drpuntag = false, 93 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 94 .retag = false, 95 /* Disable learning and I/O on user ports by default - 96 * STP will enable it. 97 */ 98 .dyn_learn = false, 99 .egress = false, 100 .ingress = false, 101 }; 102 struct sja1105_mac_config_entry *mac; 103 struct sja1105_table *table; 104 int i; 105 106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 107 108 /* Discard previous MAC Configuration Table */ 109 if (table->entry_count) { 110 kfree(table->entries); 111 table->entry_count = 0; 112 } 113 114 table->entries = kcalloc(SJA1105_NUM_PORTS, 115 table->ops->unpacked_entry_size, GFP_KERNEL); 116 if (!table->entries) 117 return -ENOMEM; 118 119 table->entry_count = SJA1105_NUM_PORTS; 120 121 mac = table->entries; 122 123 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 124 mac[i] = default_mac; 125 if (i == dsa_upstream_port(priv->ds, i)) { 126 /* STP doesn't get called for CPU port, so we need to 127 * set the I/O parameters statically. 128 */ 129 mac[i].dyn_learn = true; 130 mac[i].ingress = true; 131 mac[i].egress = true; 132 } 133 } 134 135 return 0; 136 } 137 138 static int sja1105_init_mii_settings(struct sja1105_private *priv, 139 struct sja1105_dt_port *ports) 140 { 141 struct device *dev = &priv->spidev->dev; 142 struct sja1105_xmii_params_entry *mii; 143 struct sja1105_table *table; 144 int i; 145 146 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 147 148 /* Discard previous xMII Mode Parameters Table */ 149 if (table->entry_count) { 150 kfree(table->entries); 151 table->entry_count = 0; 152 } 153 154 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 155 table->ops->unpacked_entry_size, GFP_KERNEL); 156 if (!table->entries) 157 return -ENOMEM; 158 159 /* Override table based on PHYLINK DT bindings */ 160 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 161 162 mii = table->entries; 163 164 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 165 switch (ports[i].phy_mode) { 166 case PHY_INTERFACE_MODE_MII: 167 mii->xmii_mode[i] = XMII_MODE_MII; 168 break; 169 case PHY_INTERFACE_MODE_RMII: 170 mii->xmii_mode[i] = XMII_MODE_RMII; 171 break; 172 case PHY_INTERFACE_MODE_RGMII: 173 case PHY_INTERFACE_MODE_RGMII_ID: 174 case PHY_INTERFACE_MODE_RGMII_RXID: 175 case PHY_INTERFACE_MODE_RGMII_TXID: 176 mii->xmii_mode[i] = XMII_MODE_RGMII; 177 break; 178 default: 179 dev_err(dev, "Unsupported PHY mode %s!\n", 180 phy_modes(ports[i].phy_mode)); 181 } 182 183 mii->phy_mac[i] = ports[i].role; 184 } 185 return 0; 186 } 187 188 static int sja1105_init_static_fdb(struct sja1105_private *priv) 189 { 190 struct sja1105_table *table; 191 192 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 193 194 /* We only populate the FDB table through dynamic 195 * L2 Address Lookup entries 196 */ 197 if (table->entry_count) { 198 kfree(table->entries); 199 table->entry_count = 0; 200 } 201 return 0; 202 } 203 204 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 205 { 206 struct sja1105_table *table; 207 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 208 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 209 /* Learned FDB entries are forgotten after 300 seconds */ 210 .maxage = SJA1105_AGEING_TIME_MS(300000), 211 /* All entries within a FDB bin are available for learning */ 212 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 213 /* And the P/Q/R/S equivalent setting: */ 214 .start_dynspc = 0, 215 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 216 max_fdb_entries, max_fdb_entries, }, 217 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 218 .poly = 0x97, 219 /* This selects between Independent VLAN Learning (IVL) and 220 * Shared VLAN Learning (SVL) 221 */ 222 .shared_learn = true, 223 /* Don't discard management traffic based on ENFPORT - 224 * we don't perform SMAC port enforcement anyway, so 225 * what we are setting here doesn't matter. 226 */ 227 .no_enf_hostprt = false, 228 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 229 * Maybe correlate with no_linklocal_learn from bridge driver? 230 */ 231 .no_mgmt_learn = true, 232 /* P/Q/R/S only */ 233 .use_static = true, 234 /* Dynamically learned FDB entries can overwrite other (older) 235 * dynamic FDB entries 236 */ 237 .owr_dyn = true, 238 .drpnolearn = true, 239 }; 240 241 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 242 243 if (table->entry_count) { 244 kfree(table->entries); 245 table->entry_count = 0; 246 } 247 248 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 249 table->ops->unpacked_entry_size, GFP_KERNEL); 250 if (!table->entries) 251 return -ENOMEM; 252 253 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 254 255 /* This table only has a single entry */ 256 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 257 default_l2_lookup_params; 258 259 return 0; 260 } 261 262 static int sja1105_init_static_vlan(struct sja1105_private *priv) 263 { 264 struct sja1105_table *table; 265 struct sja1105_vlan_lookup_entry pvid = { 266 .ving_mirr = 0, 267 .vegr_mirr = 0, 268 .vmemb_port = 0, 269 .vlan_bc = 0, 270 .tag_port = 0, 271 .vlanid = 1, 272 }; 273 int i; 274 275 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 276 277 /* The static VLAN table will only contain the initial pvid of 1. 278 * All other VLANs are to be configured through dynamic entries, 279 * and kept in the static configuration table as backing memory. 280 */ 281 if (table->entry_count) { 282 kfree(table->entries); 283 table->entry_count = 0; 284 } 285 286 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 287 GFP_KERNEL); 288 if (!table->entries) 289 return -ENOMEM; 290 291 table->entry_count = 1; 292 293 /* VLAN 1: all DT-defined ports are members; no restrictions on 294 * forwarding; always transmit priority-tagged frames as untagged. 295 */ 296 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 297 pvid.vmemb_port |= BIT(i); 298 pvid.vlan_bc |= BIT(i); 299 pvid.tag_port &= ~BIT(i); 300 } 301 302 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 303 return 0; 304 } 305 306 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 307 { 308 struct sja1105_l2_forwarding_entry *l2fwd; 309 struct sja1105_table *table; 310 int i, j; 311 312 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 313 314 if (table->entry_count) { 315 kfree(table->entries); 316 table->entry_count = 0; 317 } 318 319 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 320 table->ops->unpacked_entry_size, GFP_KERNEL); 321 if (!table->entries) 322 return -ENOMEM; 323 324 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 325 326 l2fwd = table->entries; 327 328 /* First 5 entries define the forwarding rules */ 329 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 330 unsigned int upstream = dsa_upstream_port(priv->ds, i); 331 332 for (j = 0; j < SJA1105_NUM_TC; j++) 333 l2fwd[i].vlan_pmap[j] = j; 334 335 if (i == upstream) 336 continue; 337 338 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 339 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 340 } 341 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 342 * Create a one-to-one mapping. 343 */ 344 for (i = 0; i < SJA1105_NUM_TC; i++) 345 for (j = 0; j < SJA1105_NUM_PORTS; j++) 346 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 347 348 return 0; 349 } 350 351 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 352 { 353 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 354 /* Disallow dynamic reconfiguration of vlan_pmap */ 355 .max_dynp = 0, 356 /* Use a single memory partition for all ingress queues */ 357 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 358 }; 359 struct sja1105_table *table; 360 361 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 362 363 if (table->entry_count) { 364 kfree(table->entries); 365 table->entry_count = 0; 366 } 367 368 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 369 table->ops->unpacked_entry_size, GFP_KERNEL); 370 if (!table->entries) 371 return -ENOMEM; 372 373 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 374 375 /* This table only has a single entry */ 376 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 377 default_l2fwd_params; 378 379 return 0; 380 } 381 382 static int sja1105_init_general_params(struct sja1105_private *priv) 383 { 384 struct sja1105_general_params_entry default_general_params = { 385 /* Disallow dynamic changing of the mirror port */ 386 .mirr_ptacu = 0, 387 .switchid = priv->ds->index, 388 /* Priority queue for link-local management frames 389 * (both ingress to and egress from CPU - PTP, STP etc) 390 */ 391 .hostprio = 7, 392 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 393 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 394 .incl_srcpt1 = false, 395 .send_meta1 = false, 396 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 397 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 398 .incl_srcpt0 = false, 399 .send_meta0 = false, 400 /* The destination for traffic matching mac_fltres1 and 401 * mac_fltres0 on all ports except host_port. Such traffic 402 * receieved on host_port itself would be dropped, except 403 * by installing a temporary 'management route' 404 */ 405 .host_port = dsa_upstream_port(priv->ds, 0), 406 /* Same as host port */ 407 .mirr_port = dsa_upstream_port(priv->ds, 0), 408 /* Link-local traffic received on casc_port will be forwarded 409 * to host_port without embedding the source port and device ID 410 * info in the destination MAC address (presumably because it 411 * is a cascaded port and a downstream SJA switch already did 412 * that). Default to an invalid port (to disable the feature) 413 * and overwrite this if we find any DSA (cascaded) ports. 414 */ 415 .casc_port = SJA1105_NUM_PORTS, 416 /* No TTEthernet */ 417 .vllupformat = 0, 418 .vlmarker = 0, 419 .vlmask = 0, 420 /* Only update correctionField for 1-step PTP (L2 transport) */ 421 .ignore2stf = 0, 422 /* Forcefully disable VLAN filtering by telling 423 * the switch that VLAN has a different EtherType. 424 */ 425 .tpid = ETH_P_SJA1105, 426 .tpid2 = ETH_P_SJA1105, 427 }; 428 struct sja1105_table *table; 429 int i, k = 0; 430 431 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 432 if (dsa_is_dsa_port(priv->ds, i)) 433 default_general_params.casc_port = i; 434 else if (dsa_is_user_port(priv->ds, i)) 435 priv->ports[i].mgmt_slot = k++; 436 } 437 438 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 439 440 if (table->entry_count) { 441 kfree(table->entries); 442 table->entry_count = 0; 443 } 444 445 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 446 table->ops->unpacked_entry_size, GFP_KERNEL); 447 if (!table->entries) 448 return -ENOMEM; 449 450 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 451 452 /* This table only has a single entry */ 453 ((struct sja1105_general_params_entry *)table->entries)[0] = 454 default_general_params; 455 456 return 0; 457 } 458 459 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 460 461 static inline void 462 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, 463 int index) 464 { 465 policing[index].sharindx = index; 466 policing[index].smax = 65535; /* Burst size in bytes */ 467 policing[index].rate = SJA1105_RATE_MBPS(1000); 468 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 469 policing[index].partition = 0; 470 } 471 472 static int sja1105_init_l2_policing(struct sja1105_private *priv) 473 { 474 struct sja1105_l2_policing_entry *policing; 475 struct sja1105_table *table; 476 int i, j, k; 477 478 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 479 480 /* Discard previous L2 Policing Table */ 481 if (table->entry_count) { 482 kfree(table->entries); 483 table->entry_count = 0; 484 } 485 486 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 487 table->ops->unpacked_entry_size, GFP_KERNEL); 488 if (!table->entries) 489 return -ENOMEM; 490 491 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 492 493 policing = table->entries; 494 495 /* k sweeps through all unicast policers (0-39). 496 * bcast sweeps through policers 40-44. 497 */ 498 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { 499 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; 500 501 for (j = 0; j < SJA1105_NUM_TC; j++, k++) 502 sja1105_setup_policer(policing, k); 503 504 /* Set up this port's policer for broadcast traffic */ 505 sja1105_setup_policer(policing, bcast); 506 } 507 return 0; 508 } 509 510 static int sja1105_init_avb_params(struct sja1105_private *priv, 511 bool on) 512 { 513 struct sja1105_avb_params_entry *avb; 514 struct sja1105_table *table; 515 516 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 517 518 /* Discard previous AVB Parameters Table */ 519 if (table->entry_count) { 520 kfree(table->entries); 521 table->entry_count = 0; 522 } 523 524 /* Configure the reception of meta frames only if requested */ 525 if (!on) 526 return 0; 527 528 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 529 table->ops->unpacked_entry_size, GFP_KERNEL); 530 if (!table->entries) 531 return -ENOMEM; 532 533 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 534 535 avb = table->entries; 536 537 avb->destmeta = SJA1105_META_DMAC; 538 avb->srcmeta = SJA1105_META_SMAC; 539 540 return 0; 541 } 542 543 static int sja1105_static_config_load(struct sja1105_private *priv, 544 struct sja1105_dt_port *ports) 545 { 546 int rc; 547 548 sja1105_static_config_free(&priv->static_config); 549 rc = sja1105_static_config_init(&priv->static_config, 550 priv->info->static_ops, 551 priv->info->device_id); 552 if (rc) 553 return rc; 554 555 /* Build static configuration */ 556 rc = sja1105_init_mac_settings(priv); 557 if (rc < 0) 558 return rc; 559 rc = sja1105_init_mii_settings(priv, ports); 560 if (rc < 0) 561 return rc; 562 rc = sja1105_init_static_fdb(priv); 563 if (rc < 0) 564 return rc; 565 rc = sja1105_init_static_vlan(priv); 566 if (rc < 0) 567 return rc; 568 rc = sja1105_init_l2_lookup_params(priv); 569 if (rc < 0) 570 return rc; 571 rc = sja1105_init_l2_forwarding(priv); 572 if (rc < 0) 573 return rc; 574 rc = sja1105_init_l2_forwarding_params(priv); 575 if (rc < 0) 576 return rc; 577 rc = sja1105_init_l2_policing(priv); 578 if (rc < 0) 579 return rc; 580 rc = sja1105_init_general_params(priv); 581 if (rc < 0) 582 return rc; 583 rc = sja1105_init_avb_params(priv, false); 584 if (rc < 0) 585 return rc; 586 587 /* Send initial configuration to hardware via SPI */ 588 return sja1105_static_config_upload(priv); 589 } 590 591 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 592 const struct sja1105_dt_port *ports) 593 { 594 int i; 595 596 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 597 if (ports->role == XMII_MAC) 598 continue; 599 600 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 601 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 602 priv->rgmii_rx_delay[i] = true; 603 604 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 605 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 606 priv->rgmii_tx_delay[i] = true; 607 608 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 609 !priv->info->setup_rgmii_delay) 610 return -EINVAL; 611 } 612 return 0; 613 } 614 615 static int sja1105_parse_ports_node(struct sja1105_private *priv, 616 struct sja1105_dt_port *ports, 617 struct device_node *ports_node) 618 { 619 struct device *dev = &priv->spidev->dev; 620 struct device_node *child; 621 622 for_each_child_of_node(ports_node, child) { 623 struct device_node *phy_node; 624 int phy_mode; 625 u32 index; 626 627 /* Get switch port number from DT */ 628 if (of_property_read_u32(child, "reg", &index) < 0) { 629 dev_err(dev, "Port number not defined in device tree " 630 "(property \"reg\")\n"); 631 of_node_put(child); 632 return -ENODEV; 633 } 634 635 /* Get PHY mode from DT */ 636 phy_mode = of_get_phy_mode(child); 637 if (phy_mode < 0) { 638 dev_err(dev, "Failed to read phy-mode or " 639 "phy-interface-type property for port %d\n", 640 index); 641 of_node_put(child); 642 return -ENODEV; 643 } 644 ports[index].phy_mode = phy_mode; 645 646 phy_node = of_parse_phandle(child, "phy-handle", 0); 647 if (!phy_node) { 648 if (!of_phy_is_fixed_link(child)) { 649 dev_err(dev, "phy-handle or fixed-link " 650 "properties missing!\n"); 651 of_node_put(child); 652 return -ENODEV; 653 } 654 /* phy-handle is missing, but fixed-link isn't. 655 * So it's a fixed link. Default to PHY role. 656 */ 657 ports[index].role = XMII_PHY; 658 } else { 659 /* phy-handle present => put port in MAC role */ 660 ports[index].role = XMII_MAC; 661 of_node_put(phy_node); 662 } 663 664 /* The MAC/PHY role can be overridden with explicit bindings */ 665 if (of_property_read_bool(child, "sja1105,role-mac")) 666 ports[index].role = XMII_MAC; 667 else if (of_property_read_bool(child, "sja1105,role-phy")) 668 ports[index].role = XMII_PHY; 669 } 670 671 return 0; 672 } 673 674 static int sja1105_parse_dt(struct sja1105_private *priv, 675 struct sja1105_dt_port *ports) 676 { 677 struct device *dev = &priv->spidev->dev; 678 struct device_node *switch_node = dev->of_node; 679 struct device_node *ports_node; 680 int rc; 681 682 ports_node = of_get_child_by_name(switch_node, "ports"); 683 if (!ports_node) { 684 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 685 return -ENODEV; 686 } 687 688 rc = sja1105_parse_ports_node(priv, ports, ports_node); 689 of_node_put(ports_node); 690 691 return rc; 692 } 693 694 /* Convert link speed from SJA1105 to ethtool encoding */ 695 static int sja1105_speed[] = { 696 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 697 [SJA1105_SPEED_10MBPS] = SPEED_10, 698 [SJA1105_SPEED_100MBPS] = SPEED_100, 699 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 700 }; 701 702 /* Set link speed in the MAC configuration for a specific port. */ 703 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 704 int speed_mbps) 705 { 706 struct sja1105_xmii_params_entry *mii; 707 struct sja1105_mac_config_entry *mac; 708 struct device *dev = priv->ds->dev; 709 sja1105_phy_interface_t phy_mode; 710 sja1105_speed_t speed; 711 int rc; 712 713 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 714 * tables. On E/T, MAC reconfig tables are not readable, only writable. 715 * We have to *know* what the MAC looks like. For the sake of keeping 716 * the code common, we'll use the static configuration tables as a 717 * reasonable approximation for both E/T and P/Q/R/S. 718 */ 719 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 720 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 721 722 switch (speed_mbps) { 723 case SPEED_UNKNOWN: 724 /* PHYLINK called sja1105_mac_config() to inform us about 725 * the state->interface, but AN has not completed and the 726 * speed is not yet valid. UM10944.pdf says that setting 727 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 728 * ok for power consumption in case AN will never complete - 729 * otherwise PHYLINK should come back with a new update. 730 */ 731 speed = SJA1105_SPEED_AUTO; 732 break; 733 case SPEED_10: 734 speed = SJA1105_SPEED_10MBPS; 735 break; 736 case SPEED_100: 737 speed = SJA1105_SPEED_100MBPS; 738 break; 739 case SPEED_1000: 740 speed = SJA1105_SPEED_1000MBPS; 741 break; 742 default: 743 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 744 return -EINVAL; 745 } 746 747 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 748 * table, since this will be used for the clocking setup, and we no 749 * longer need to store it in the static config (already told hardware 750 * we want auto during upload phase). 751 */ 752 mac[port].speed = speed; 753 754 /* Write to the dynamic reconfiguration tables */ 755 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 756 &mac[port], true); 757 if (rc < 0) { 758 dev_err(dev, "Failed to write MAC config: %d\n", rc); 759 return rc; 760 } 761 762 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 763 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 764 * RMII no change of the clock setup is required. Actually, changing 765 * the clock setup does interrupt the clock signal for a certain time 766 * which causes trouble for all PHYs relying on this signal. 767 */ 768 phy_mode = mii->xmii_mode[port]; 769 if (phy_mode != XMII_MODE_RGMII) 770 return 0; 771 772 return sja1105_clocking_setup_port(priv, port); 773 } 774 775 /* The SJA1105 MAC programming model is through the static config (the xMII 776 * Mode table cannot be dynamically reconfigured), and we have to program 777 * that early (earlier than PHYLINK calls us, anyway). 778 * So just error out in case the connected PHY attempts to change the initial 779 * system interface MII protocol from what is defined in the DT, at least for 780 * now. 781 */ 782 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 783 phy_interface_t interface) 784 { 785 struct sja1105_xmii_params_entry *mii; 786 sja1105_phy_interface_t phy_mode; 787 788 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 789 phy_mode = mii->xmii_mode[port]; 790 791 switch (interface) { 792 case PHY_INTERFACE_MODE_MII: 793 return (phy_mode != XMII_MODE_MII); 794 case PHY_INTERFACE_MODE_RMII: 795 return (phy_mode != XMII_MODE_RMII); 796 case PHY_INTERFACE_MODE_RGMII: 797 case PHY_INTERFACE_MODE_RGMII_ID: 798 case PHY_INTERFACE_MODE_RGMII_RXID: 799 case PHY_INTERFACE_MODE_RGMII_TXID: 800 return (phy_mode != XMII_MODE_RGMII); 801 default: 802 return true; 803 } 804 } 805 806 static void sja1105_mac_config(struct dsa_switch *ds, int port, 807 unsigned int link_an_mode, 808 const struct phylink_link_state *state) 809 { 810 struct sja1105_private *priv = ds->priv; 811 812 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) 813 return; 814 815 if (link_an_mode == MLO_AN_INBAND) { 816 dev_err(ds->dev, "In-band AN not supported!\n"); 817 return; 818 } 819 820 sja1105_adjust_port_config(priv, port, state->speed); 821 } 822 823 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 824 unsigned int mode, 825 phy_interface_t interface) 826 { 827 sja1105_inhibit_tx(ds->priv, BIT(port), true); 828 } 829 830 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 831 unsigned int mode, 832 phy_interface_t interface, 833 struct phy_device *phydev) 834 { 835 sja1105_inhibit_tx(ds->priv, BIT(port), false); 836 } 837 838 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 839 unsigned long *supported, 840 struct phylink_link_state *state) 841 { 842 /* Construct a new mask which exhaustively contains all link features 843 * supported by the MAC, and then apply that (logical AND) to what will 844 * be sent to the PHY for "marketing". 845 */ 846 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 847 struct sja1105_private *priv = ds->priv; 848 struct sja1105_xmii_params_entry *mii; 849 850 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 851 852 /* include/linux/phylink.h says: 853 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 854 * expects the MAC driver to return all supported link modes. 855 */ 856 if (state->interface != PHY_INTERFACE_MODE_NA && 857 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 858 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 859 return; 860 } 861 862 /* The MAC does not support pause frames, and also doesn't 863 * support half-duplex traffic modes. 864 */ 865 phylink_set(mask, Autoneg); 866 phylink_set(mask, MII); 867 phylink_set(mask, 10baseT_Full); 868 phylink_set(mask, 100baseT_Full); 869 if (mii->xmii_mode[port] == XMII_MODE_RGMII) 870 phylink_set(mask, 1000baseT_Full); 871 872 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 873 bitmap_and(state->advertising, state->advertising, mask, 874 __ETHTOOL_LINK_MODE_MASK_NBITS); 875 } 876 877 static int 878 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 879 const struct sja1105_l2_lookup_entry *requested) 880 { 881 struct sja1105_l2_lookup_entry *l2_lookup; 882 struct sja1105_table *table; 883 int i; 884 885 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 886 l2_lookup = table->entries; 887 888 for (i = 0; i < table->entry_count; i++) 889 if (l2_lookup[i].macaddr == requested->macaddr && 890 l2_lookup[i].vlanid == requested->vlanid && 891 l2_lookup[i].destports & BIT(port)) 892 return i; 893 894 return -1; 895 } 896 897 /* We want FDB entries added statically through the bridge command to persist 898 * across switch resets, which are a common thing during normal SJA1105 899 * operation. So we have to back them up in the static configuration tables 900 * and hence apply them on next static config upload... yay! 901 */ 902 static int 903 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 904 const struct sja1105_l2_lookup_entry *requested, 905 bool keep) 906 { 907 struct sja1105_l2_lookup_entry *l2_lookup; 908 struct sja1105_table *table; 909 int rc, match; 910 911 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 912 913 match = sja1105_find_static_fdb_entry(priv, port, requested); 914 if (match < 0) { 915 /* Can't delete a missing entry. */ 916 if (!keep) 917 return 0; 918 919 /* No match => new entry */ 920 rc = sja1105_table_resize(table, table->entry_count + 1); 921 if (rc) 922 return rc; 923 924 match = table->entry_count - 1; 925 } 926 927 /* Assign pointer after the resize (it may be new memory) */ 928 l2_lookup = table->entries; 929 930 /* We have a match. 931 * If the job was to add this FDB entry, it's already done (mostly 932 * anyway, since the port forwarding mask may have changed, case in 933 * which we update it). 934 * Otherwise we have to delete it. 935 */ 936 if (keep) { 937 l2_lookup[match] = *requested; 938 return 0; 939 } 940 941 /* To remove, the strategy is to overwrite the element with 942 * the last one, and then reduce the array size by 1 943 */ 944 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 945 return sja1105_table_resize(table, table->entry_count - 1); 946 } 947 948 /* First-generation switches have a 4-way set associative TCAM that 949 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 950 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 951 * For the placement of a newly learnt FDB entry, the switch selects the bin 952 * based on a hash function, and the way within that bin incrementally. 953 */ 954 static inline int sja1105et_fdb_index(int bin, int way) 955 { 956 return bin * SJA1105ET_FDB_BIN_SIZE + way; 957 } 958 959 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 960 const u8 *addr, u16 vid, 961 struct sja1105_l2_lookup_entry *match, 962 int *last_unused) 963 { 964 int way; 965 966 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 967 struct sja1105_l2_lookup_entry l2_lookup = {0}; 968 int index = sja1105et_fdb_index(bin, way); 969 970 /* Skip unused entries, optionally marking them 971 * into the return value 972 */ 973 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 974 index, &l2_lookup)) { 975 if (last_unused) 976 *last_unused = way; 977 continue; 978 } 979 980 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 981 l2_lookup.vlanid == vid) { 982 if (match) 983 *match = l2_lookup; 984 return way; 985 } 986 } 987 /* Return an invalid entry index if not found */ 988 return -1; 989 } 990 991 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 992 const unsigned char *addr, u16 vid) 993 { 994 struct sja1105_l2_lookup_entry l2_lookup = {0}; 995 struct sja1105_private *priv = ds->priv; 996 struct device *dev = ds->dev; 997 int last_unused = -1; 998 int bin, way, rc; 999 1000 bin = sja1105et_fdb_hash(priv, addr, vid); 1001 1002 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1003 &l2_lookup, &last_unused); 1004 if (way >= 0) { 1005 /* We have an FDB entry. Is our port in the destination 1006 * mask? If yes, we need to do nothing. If not, we need 1007 * to rewrite the entry by adding this port to it. 1008 */ 1009 if (l2_lookup.destports & BIT(port)) 1010 return 0; 1011 l2_lookup.destports |= BIT(port); 1012 } else { 1013 int index = sja1105et_fdb_index(bin, way); 1014 1015 /* We don't have an FDB entry. We construct a new one and 1016 * try to find a place for it within the FDB table. 1017 */ 1018 l2_lookup.macaddr = ether_addr_to_u64(addr); 1019 l2_lookup.destports = BIT(port); 1020 l2_lookup.vlanid = vid; 1021 1022 if (last_unused >= 0) { 1023 way = last_unused; 1024 } else { 1025 /* Bin is full, need to evict somebody. 1026 * Choose victim at random. If you get these messages 1027 * often, you may need to consider changing the 1028 * distribution function: 1029 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1030 */ 1031 get_random_bytes(&way, sizeof(u8)); 1032 way %= SJA1105ET_FDB_BIN_SIZE; 1033 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1034 bin, addr, way); 1035 /* Evict entry */ 1036 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1037 index, NULL, false); 1038 } 1039 } 1040 l2_lookup.index = sja1105et_fdb_index(bin, way); 1041 1042 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1043 l2_lookup.index, &l2_lookup, 1044 true); 1045 if (rc < 0) 1046 return rc; 1047 1048 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1049 } 1050 1051 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1052 const unsigned char *addr, u16 vid) 1053 { 1054 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1055 struct sja1105_private *priv = ds->priv; 1056 int index, bin, way, rc; 1057 bool keep; 1058 1059 bin = sja1105et_fdb_hash(priv, addr, vid); 1060 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1061 &l2_lookup, NULL); 1062 if (way < 0) 1063 return 0; 1064 index = sja1105et_fdb_index(bin, way); 1065 1066 /* We have an FDB entry. Is our port in the destination mask? If yes, 1067 * we need to remove it. If the resulting port mask becomes empty, we 1068 * need to completely evict the FDB entry. 1069 * Otherwise we just write it back. 1070 */ 1071 l2_lookup.destports &= ~BIT(port); 1072 1073 if (l2_lookup.destports) 1074 keep = true; 1075 else 1076 keep = false; 1077 1078 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1079 index, &l2_lookup, keep); 1080 if (rc < 0) 1081 return rc; 1082 1083 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1084 } 1085 1086 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1087 const unsigned char *addr, u16 vid) 1088 { 1089 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1090 struct sja1105_private *priv = ds->priv; 1091 int rc, i; 1092 1093 /* Search for an existing entry in the FDB table */ 1094 l2_lookup.macaddr = ether_addr_to_u64(addr); 1095 l2_lookup.vlanid = vid; 1096 l2_lookup.iotag = SJA1105_S_TAG; 1097 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1098 if (dsa_port_is_vlan_filtering(&ds->ports[port])) { 1099 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1100 l2_lookup.mask_iotag = BIT(0); 1101 } else { 1102 l2_lookup.mask_vlanid = 0; 1103 l2_lookup.mask_iotag = 0; 1104 } 1105 l2_lookup.destports = BIT(port); 1106 1107 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1108 SJA1105_SEARCH, &l2_lookup); 1109 if (rc == 0) { 1110 /* Found and this port is already in the entry's 1111 * port mask => job done 1112 */ 1113 if (l2_lookup.destports & BIT(port)) 1114 return 0; 1115 /* l2_lookup.index is populated by the switch in case it 1116 * found something. 1117 */ 1118 l2_lookup.destports |= BIT(port); 1119 goto skip_finding_an_index; 1120 } 1121 1122 /* Not found, so try to find an unused spot in the FDB. 1123 * This is slightly inefficient because the strategy is knock-knock at 1124 * every possible position from 0 to 1023. 1125 */ 1126 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1127 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1128 i, NULL); 1129 if (rc < 0) 1130 break; 1131 } 1132 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1133 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1134 return -EINVAL; 1135 } 1136 l2_lookup.lockeds = true; 1137 l2_lookup.index = i; 1138 1139 skip_finding_an_index: 1140 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1141 l2_lookup.index, &l2_lookup, 1142 true); 1143 if (rc < 0) 1144 return rc; 1145 1146 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1147 } 1148 1149 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1150 const unsigned char *addr, u16 vid) 1151 { 1152 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1153 struct sja1105_private *priv = ds->priv; 1154 bool keep; 1155 int rc; 1156 1157 l2_lookup.macaddr = ether_addr_to_u64(addr); 1158 l2_lookup.vlanid = vid; 1159 l2_lookup.iotag = SJA1105_S_TAG; 1160 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1161 if (dsa_port_is_vlan_filtering(&ds->ports[port])) { 1162 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1163 l2_lookup.mask_iotag = BIT(0); 1164 } else { 1165 l2_lookup.mask_vlanid = 0; 1166 l2_lookup.mask_iotag = 0; 1167 } 1168 l2_lookup.destports = BIT(port); 1169 1170 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1171 SJA1105_SEARCH, &l2_lookup); 1172 if (rc < 0) 1173 return 0; 1174 1175 l2_lookup.destports &= ~BIT(port); 1176 1177 /* Decide whether we remove just this port from the FDB entry, 1178 * or if we remove it completely. 1179 */ 1180 if (l2_lookup.destports) 1181 keep = true; 1182 else 1183 keep = false; 1184 1185 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1186 l2_lookup.index, &l2_lookup, keep); 1187 if (rc < 0) 1188 return rc; 1189 1190 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1191 } 1192 1193 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1194 const unsigned char *addr, u16 vid) 1195 { 1196 struct sja1105_private *priv = ds->priv; 1197 1198 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1199 * so the switch still does some VLAN processing internally. 1200 * But Shared VLAN Learning (SVL) is also active, and it will take 1201 * care of autonomous forwarding between the unique pvid's of each 1202 * port. Here we just make sure that users can't add duplicate FDB 1203 * entries when in this mode - the actual VID doesn't matter except 1204 * for what gets printed in 'bridge fdb show'. In the case of zero, 1205 * no VID gets printed at all. 1206 */ 1207 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1208 vid = 0; 1209 1210 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1211 } 1212 1213 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1214 const unsigned char *addr, u16 vid) 1215 { 1216 struct sja1105_private *priv = ds->priv; 1217 1218 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1219 vid = 0; 1220 1221 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1222 } 1223 1224 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1225 dsa_fdb_dump_cb_t *cb, void *data) 1226 { 1227 struct sja1105_private *priv = ds->priv; 1228 struct device *dev = ds->dev; 1229 int i; 1230 1231 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1232 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1233 u8 macaddr[ETH_ALEN]; 1234 int rc; 1235 1236 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1237 i, &l2_lookup); 1238 /* No fdb entry at i, not an issue */ 1239 if (rc == -ENOENT) 1240 continue; 1241 if (rc) { 1242 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1243 return rc; 1244 } 1245 1246 /* FDB dump callback is per port. This means we have to 1247 * disregard a valid entry if it's not for this port, even if 1248 * only to revisit it later. This is inefficient because the 1249 * 1024-sized FDB table needs to be traversed 4 times through 1250 * SPI during a 'bridge fdb show' command. 1251 */ 1252 if (!(l2_lookup.destports & BIT(port))) 1253 continue; 1254 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1255 1256 /* We need to hide the dsa_8021q VLANs from the user. */ 1257 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) 1258 l2_lookup.vlanid = 0; 1259 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1260 } 1261 return 0; 1262 } 1263 1264 /* This callback needs to be present */ 1265 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1266 const struct switchdev_obj_port_mdb *mdb) 1267 { 1268 return 0; 1269 } 1270 1271 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1272 const struct switchdev_obj_port_mdb *mdb) 1273 { 1274 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1275 } 1276 1277 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1278 const struct switchdev_obj_port_mdb *mdb) 1279 { 1280 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1281 } 1282 1283 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1284 struct net_device *br, bool member) 1285 { 1286 struct sja1105_l2_forwarding_entry *l2_fwd; 1287 struct sja1105_private *priv = ds->priv; 1288 int i, rc; 1289 1290 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1291 1292 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1293 /* Add this port to the forwarding matrix of the 1294 * other ports in the same bridge, and viceversa. 1295 */ 1296 if (!dsa_is_user_port(ds, i)) 1297 continue; 1298 /* For the ports already under the bridge, only one thing needs 1299 * to be done, and that is to add this port to their 1300 * reachability domain. So we can perform the SPI write for 1301 * them immediately. However, for this port itself (the one 1302 * that is new to the bridge), we need to add all other ports 1303 * to its reachability domain. So we do that incrementally in 1304 * this loop, and perform the SPI write only at the end, once 1305 * the domain contains all other bridge ports. 1306 */ 1307 if (i == port) 1308 continue; 1309 if (dsa_to_port(ds, i)->bridge_dev != br) 1310 continue; 1311 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1312 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1313 1314 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1315 i, &l2_fwd[i], true); 1316 if (rc < 0) 1317 return rc; 1318 } 1319 1320 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1321 port, &l2_fwd[port], true); 1322 } 1323 1324 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1325 u8 state) 1326 { 1327 struct sja1105_private *priv = ds->priv; 1328 struct sja1105_mac_config_entry *mac; 1329 1330 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1331 1332 switch (state) { 1333 case BR_STATE_DISABLED: 1334 case BR_STATE_BLOCKING: 1335 /* From UM10944 description of DRPDTAG (why put this there?): 1336 * "Management traffic flows to the port regardless of the state 1337 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1338 * At the moment no difference between DISABLED and BLOCKING. 1339 */ 1340 mac[port].ingress = false; 1341 mac[port].egress = false; 1342 mac[port].dyn_learn = false; 1343 break; 1344 case BR_STATE_LISTENING: 1345 mac[port].ingress = true; 1346 mac[port].egress = false; 1347 mac[port].dyn_learn = false; 1348 break; 1349 case BR_STATE_LEARNING: 1350 mac[port].ingress = true; 1351 mac[port].egress = false; 1352 mac[port].dyn_learn = true; 1353 break; 1354 case BR_STATE_FORWARDING: 1355 mac[port].ingress = true; 1356 mac[port].egress = true; 1357 mac[port].dyn_learn = true; 1358 break; 1359 default: 1360 dev_err(ds->dev, "invalid STP state: %d\n", state); 1361 return; 1362 } 1363 1364 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1365 &mac[port], true); 1366 } 1367 1368 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1369 struct net_device *br) 1370 { 1371 return sja1105_bridge_member(ds, port, br, true); 1372 } 1373 1374 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1375 struct net_device *br) 1376 { 1377 sja1105_bridge_member(ds, port, br, false); 1378 } 1379 1380 /* For situations where we need to change a setting at runtime that is only 1381 * available through the static configuration, resetting the switch in order 1382 * to upload the new static config is unavoidable. Back up the settings we 1383 * modify at runtime (currently only MAC) and restore them after uploading, 1384 * such that this operation is relatively seamless. 1385 */ 1386 int sja1105_static_config_reload(struct sja1105_private *priv) 1387 { 1388 struct sja1105_mac_config_entry *mac; 1389 int speed_mbps[SJA1105_NUM_PORTS]; 1390 int rc, i; 1391 1392 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1393 1394 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1395 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1396 * switch wants to see in the static config in order to allow us to 1397 * change it through the dynamic interface later. 1398 */ 1399 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1400 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1401 mac[i].speed = SJA1105_SPEED_AUTO; 1402 } 1403 1404 /* Reset switch and send updated static configuration */ 1405 rc = sja1105_static_config_upload(priv); 1406 if (rc < 0) 1407 goto out; 1408 1409 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1410 * For these interfaces there is no dynamic configuration 1411 * needed, since PLLs have same settings at all speeds. 1412 */ 1413 rc = sja1105_clocking_setup(priv); 1414 if (rc < 0) 1415 goto out; 1416 1417 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1418 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1419 if (rc < 0) 1420 goto out; 1421 } 1422 out: 1423 return rc; 1424 } 1425 1426 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1427 { 1428 struct sja1105_mac_config_entry *mac; 1429 1430 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1431 1432 mac[port].vlanid = pvid; 1433 1434 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1435 &mac[port], true); 1436 } 1437 1438 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1439 { 1440 struct sja1105_vlan_lookup_entry *vlan; 1441 int count, i; 1442 1443 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1444 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1445 1446 for (i = 0; i < count; i++) 1447 if (vlan[i].vlanid == vid) 1448 return i; 1449 1450 /* Return an invalid entry index if not found */ 1451 return -1; 1452 } 1453 1454 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1455 bool enabled, bool untagged) 1456 { 1457 struct sja1105_vlan_lookup_entry *vlan; 1458 struct sja1105_table *table; 1459 bool keep = true; 1460 int match, rc; 1461 1462 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1463 1464 match = sja1105_is_vlan_configured(priv, vid); 1465 if (match < 0) { 1466 /* Can't delete a missing entry. */ 1467 if (!enabled) 1468 return 0; 1469 rc = sja1105_table_resize(table, table->entry_count + 1); 1470 if (rc) 1471 return rc; 1472 match = table->entry_count - 1; 1473 } 1474 /* Assign pointer after the resize (it's new memory) */ 1475 vlan = table->entries; 1476 vlan[match].vlanid = vid; 1477 if (enabled) { 1478 vlan[match].vlan_bc |= BIT(port); 1479 vlan[match].vmemb_port |= BIT(port); 1480 } else { 1481 vlan[match].vlan_bc &= ~BIT(port); 1482 vlan[match].vmemb_port &= ~BIT(port); 1483 } 1484 /* Also unset tag_port if removing this VLAN was requested, 1485 * just so we don't have a confusing bitmap (no practical purpose). 1486 */ 1487 if (untagged || !enabled) 1488 vlan[match].tag_port &= ~BIT(port); 1489 else 1490 vlan[match].tag_port |= BIT(port); 1491 /* If there's no port left as member of this VLAN, 1492 * it's time for it to go. 1493 */ 1494 if (!vlan[match].vmemb_port) 1495 keep = false; 1496 1497 dev_dbg(priv->ds->dev, 1498 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1499 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1500 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1501 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1502 1503 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1504 &vlan[match], keep); 1505 if (rc < 0) 1506 return rc; 1507 1508 if (!keep) 1509 return sja1105_table_delete_entry(table, match); 1510 1511 return 0; 1512 } 1513 1514 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1515 { 1516 int rc, i; 1517 1518 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1519 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1520 if (rc < 0) { 1521 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1522 i, rc); 1523 return rc; 1524 } 1525 } 1526 dev_info(ds->dev, "%s switch tagging\n", 1527 enabled ? "Enabled" : "Disabled"); 1528 return 0; 1529 } 1530 1531 static enum dsa_tag_protocol 1532 sja1105_get_tag_protocol(struct dsa_switch *ds, int port) 1533 { 1534 return DSA_TAG_PROTO_SJA1105; 1535 } 1536 1537 /* This callback needs to be present */ 1538 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1539 const struct switchdev_obj_port_vlan *vlan) 1540 { 1541 return 0; 1542 } 1543 1544 /* The TPID setting belongs to the General Parameters table, 1545 * which can only be partially reconfigured at runtime (and not the TPID). 1546 * So a switch reset is required. 1547 */ 1548 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1549 { 1550 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1551 struct sja1105_general_params_entry *general_params; 1552 struct sja1105_private *priv = ds->priv; 1553 struct sja1105_table *table; 1554 u16 tpid, tpid2; 1555 int rc; 1556 1557 if (enabled) { 1558 /* Enable VLAN filtering. */ 1559 tpid = ETH_P_8021AD; 1560 tpid2 = ETH_P_8021Q; 1561 } else { 1562 /* Disable VLAN filtering. */ 1563 tpid = ETH_P_SJA1105; 1564 tpid2 = ETH_P_SJA1105; 1565 } 1566 1567 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1568 general_params = table->entries; 1569 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1570 general_params->tpid = tpid; 1571 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1572 general_params->tpid2 = tpid2; 1573 /* When VLAN filtering is on, we need to at least be able to 1574 * decode management traffic through the "backup plan". 1575 */ 1576 general_params->incl_srcpt1 = enabled; 1577 general_params->incl_srcpt0 = enabled; 1578 1579 /* VLAN filtering => independent VLAN learning. 1580 * No VLAN filtering => shared VLAN learning. 1581 * 1582 * In shared VLAN learning mode, untagged traffic still gets 1583 * pvid-tagged, and the FDB table gets populated with entries 1584 * containing the "real" (pvid or from VLAN tag) VLAN ID. 1585 * However the switch performs a masked L2 lookup in the FDB, 1586 * effectively only looking up a frame's DMAC (and not VID) for the 1587 * forwarding decision. 1588 * 1589 * This is extremely convenient for us, because in modes with 1590 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 1591 * each front panel port. This is good for identification but breaks 1592 * learning badly - the VID of the learnt FDB entry is unique, aka 1593 * no frames coming from any other port are going to have it. So 1594 * for forwarding purposes, this is as though learning was broken 1595 * (all frames get flooded). 1596 */ 1597 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1598 l2_lookup_params = table->entries; 1599 l2_lookup_params->shared_learn = !enabled; 1600 1601 rc = sja1105_static_config_reload(priv); 1602 if (rc) 1603 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1604 1605 /* Switch port identification based on 802.1Q is only passable 1606 * if we are not under a vlan_filtering bridge. So make sure 1607 * the two configurations are mutually exclusive. 1608 */ 1609 return sja1105_setup_8021q_tagging(ds, !enabled); 1610 } 1611 1612 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1613 const struct switchdev_obj_port_vlan *vlan) 1614 { 1615 struct sja1105_private *priv = ds->priv; 1616 u16 vid; 1617 int rc; 1618 1619 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1620 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1621 BRIDGE_VLAN_INFO_UNTAGGED); 1622 if (rc < 0) { 1623 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1624 vid, port, rc); 1625 return; 1626 } 1627 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1628 rc = sja1105_pvid_apply(ds->priv, port, vid); 1629 if (rc < 0) { 1630 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1631 vid, port, rc); 1632 return; 1633 } 1634 } 1635 } 1636 } 1637 1638 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1639 const struct switchdev_obj_port_vlan *vlan) 1640 { 1641 struct sja1105_private *priv = ds->priv; 1642 u16 vid; 1643 int rc; 1644 1645 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1646 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1647 BRIDGE_VLAN_INFO_UNTAGGED); 1648 if (rc < 0) { 1649 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1650 vid, port, rc); 1651 return rc; 1652 } 1653 } 1654 return 0; 1655 } 1656 1657 /* The programming model for the SJA1105 switch is "all-at-once" via static 1658 * configuration tables. Some of these can be dynamically modified at runtime, 1659 * but not the xMII mode parameters table. 1660 * Furthermode, some PHYs may not have crystals for generating their clocks 1661 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1662 * ref_clk pin. So port clocking needs to be initialized early, before 1663 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1664 * Setting correct PHY link speed does not matter now. 1665 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1666 * bindings are not yet parsed by DSA core. We need to parse early so that we 1667 * can populate the xMII mode parameters table. 1668 */ 1669 static int sja1105_setup(struct dsa_switch *ds) 1670 { 1671 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1672 struct sja1105_private *priv = ds->priv; 1673 int rc; 1674 1675 rc = sja1105_parse_dt(priv, ports); 1676 if (rc < 0) { 1677 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1678 return rc; 1679 } 1680 1681 /* Error out early if internal delays are required through DT 1682 * and we can't apply them. 1683 */ 1684 rc = sja1105_parse_rgmii_delays(priv, ports); 1685 if (rc < 0) { 1686 dev_err(ds->dev, "RGMII delay not supported\n"); 1687 return rc; 1688 } 1689 1690 rc = sja1105_ptp_clock_register(priv); 1691 if (rc < 0) { 1692 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 1693 return rc; 1694 } 1695 /* Create and send configuration down to device */ 1696 rc = sja1105_static_config_load(priv, ports); 1697 if (rc < 0) { 1698 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1699 return rc; 1700 } 1701 /* Configure the CGU (PHY link modes and speeds) */ 1702 rc = sja1105_clocking_setup(priv); 1703 if (rc < 0) { 1704 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1705 return rc; 1706 } 1707 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1708 * The only thing we can do to disable it is lie about what the 802.1Q 1709 * EtherType is. 1710 * So it will still try to apply VLAN filtering, but all ingress 1711 * traffic (except frames received with EtherType of ETH_P_SJA1105) 1712 * will be internally tagged with a distorted VLAN header where the 1713 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 1714 */ 1715 ds->vlan_filtering_is_global = true; 1716 1717 /* Advertise the 8 egress queues */ 1718 ds->num_tx_queues = SJA1105_NUM_TC; 1719 1720 /* The DSA/switchdev model brings up switch ports in standalone mode by 1721 * default, and that means vlan_filtering is 0 since they're not under 1722 * a bridge, so it's safe to set up switch tagging at this time. 1723 */ 1724 return sja1105_setup_8021q_tagging(ds, true); 1725 } 1726 1727 static void sja1105_teardown(struct dsa_switch *ds) 1728 { 1729 struct sja1105_private *priv = ds->priv; 1730 1731 sja1105_tas_teardown(ds); 1732 cancel_work_sync(&priv->tagger_data.rxtstamp_work); 1733 skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); 1734 sja1105_ptp_clock_unregister(priv); 1735 sja1105_static_config_free(&priv->static_config); 1736 } 1737 1738 static int sja1105_port_enable(struct dsa_switch *ds, int port, 1739 struct phy_device *phy) 1740 { 1741 struct net_device *slave; 1742 1743 if (!dsa_is_user_port(ds, port)) 1744 return 0; 1745 1746 slave = ds->ports[port].slave; 1747 1748 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1749 1750 return 0; 1751 } 1752 1753 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 1754 struct sk_buff *skb, bool takets) 1755 { 1756 struct sja1105_mgmt_entry mgmt_route = {0}; 1757 struct sja1105_private *priv = ds->priv; 1758 struct ethhdr *hdr; 1759 int timeout = 10; 1760 int rc; 1761 1762 hdr = eth_hdr(skb); 1763 1764 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 1765 mgmt_route.destports = BIT(port); 1766 mgmt_route.enfport = 1; 1767 mgmt_route.tsreg = 0; 1768 mgmt_route.takets = takets; 1769 1770 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1771 slot, &mgmt_route, true); 1772 if (rc < 0) { 1773 kfree_skb(skb); 1774 return rc; 1775 } 1776 1777 /* Transfer skb to the host port. */ 1778 dsa_enqueue_skb(skb, ds->ports[port].slave); 1779 1780 /* Wait until the switch has processed the frame */ 1781 do { 1782 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 1783 slot, &mgmt_route); 1784 if (rc < 0) { 1785 dev_err_ratelimited(priv->ds->dev, 1786 "failed to poll for mgmt route\n"); 1787 continue; 1788 } 1789 1790 /* UM10944: The ENFPORT flag of the respective entry is 1791 * cleared when a match is found. The host can use this 1792 * flag as an acknowledgment. 1793 */ 1794 cpu_relax(); 1795 } while (mgmt_route.enfport && --timeout); 1796 1797 if (!timeout) { 1798 /* Clean up the management route so that a follow-up 1799 * frame may not match on it by mistake. 1800 * This is only hardware supported on P/Q/R/S - on E/T it is 1801 * a no-op and we are silently discarding the -EOPNOTSUPP. 1802 */ 1803 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 1804 slot, &mgmt_route, false); 1805 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 1806 } 1807 1808 return NETDEV_TX_OK; 1809 } 1810 1811 /* Deferred work is unfortunately necessary because setting up the management 1812 * route cannot be done from atomit context (SPI transfer takes a sleepable 1813 * lock on the bus) 1814 */ 1815 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, 1816 struct sk_buff *skb) 1817 { 1818 struct sja1105_private *priv = ds->priv; 1819 struct sja1105_port *sp = &priv->ports[port]; 1820 struct skb_shared_hwtstamps shwt = {0}; 1821 int slot = sp->mgmt_slot; 1822 struct sk_buff *clone; 1823 u64 now, ts; 1824 int rc; 1825 1826 /* The tragic fact about the switch having 4x2 slots for installing 1827 * management routes is that all of them except one are actually 1828 * useless. 1829 * If 2 slots are simultaneously configured for two BPDUs sent to the 1830 * same (multicast) DMAC but on different egress ports, the switch 1831 * would confuse them and redirect first frame it receives on the CPU 1832 * port towards the port configured on the numerically first slot 1833 * (therefore wrong port), then second received frame on second slot 1834 * (also wrong port). 1835 * So for all practical purposes, there needs to be a lock that 1836 * prevents that from happening. The slot used here is utterly useless 1837 * (could have simply been 0 just as fine), but we are doing it 1838 * nonetheless, in case a smarter idea ever comes up in the future. 1839 */ 1840 mutex_lock(&priv->mgmt_lock); 1841 1842 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 1843 clone = DSA_SKB_CB(skb)->clone; 1844 1845 sja1105_mgmt_xmit(ds, port, slot, skb, !!clone); 1846 1847 if (!clone) 1848 goto out; 1849 1850 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; 1851 1852 mutex_lock(&priv->ptp_lock); 1853 1854 now = priv->tstamp_cc.read(&priv->tstamp_cc); 1855 1856 rc = sja1105_ptpegr_ts_poll(priv, slot, &ts); 1857 if (rc < 0) { 1858 dev_err(ds->dev, "xmit: timed out polling for tstamp\n"); 1859 kfree_skb(clone); 1860 goto out_unlock_ptp; 1861 } 1862 1863 ts = sja1105_tstamp_reconstruct(priv, now, ts); 1864 ts = timecounter_cyc2time(&priv->tstamp_tc, ts); 1865 1866 shwt.hwtstamp = ns_to_ktime(ts); 1867 skb_complete_tx_timestamp(clone, &shwt); 1868 1869 out_unlock_ptp: 1870 mutex_unlock(&priv->ptp_lock); 1871 out: 1872 mutex_unlock(&priv->mgmt_lock); 1873 return NETDEV_TX_OK; 1874 } 1875 1876 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 1877 * which cannot be reconfigured at runtime. So a switch reset is required. 1878 */ 1879 static int sja1105_set_ageing_time(struct dsa_switch *ds, 1880 unsigned int ageing_time) 1881 { 1882 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1883 struct sja1105_private *priv = ds->priv; 1884 struct sja1105_table *table; 1885 unsigned int maxage; 1886 1887 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1888 l2_lookup_params = table->entries; 1889 1890 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 1891 1892 if (l2_lookup_params->maxage == maxage) 1893 return 0; 1894 1895 l2_lookup_params->maxage = maxage; 1896 1897 return sja1105_static_config_reload(priv); 1898 } 1899 1900 /* Must be called only with priv->tagger_data.state bit 1901 * SJA1105_HWTS_RX_EN cleared 1902 */ 1903 static int sja1105_change_rxtstamping(struct sja1105_private *priv, 1904 bool on) 1905 { 1906 struct sja1105_general_params_entry *general_params; 1907 struct sja1105_table *table; 1908 int rc; 1909 1910 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1911 general_params = table->entries; 1912 general_params->send_meta1 = on; 1913 general_params->send_meta0 = on; 1914 1915 rc = sja1105_init_avb_params(priv, on); 1916 if (rc < 0) 1917 return rc; 1918 1919 /* Initialize the meta state machine to a known state */ 1920 if (priv->tagger_data.stampable_skb) { 1921 kfree_skb(priv->tagger_data.stampable_skb); 1922 priv->tagger_data.stampable_skb = NULL; 1923 } 1924 1925 return sja1105_static_config_reload(priv); 1926 } 1927 1928 static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, 1929 struct ifreq *ifr) 1930 { 1931 struct sja1105_private *priv = ds->priv; 1932 struct hwtstamp_config config; 1933 bool rx_on; 1934 int rc; 1935 1936 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1937 return -EFAULT; 1938 1939 switch (config.tx_type) { 1940 case HWTSTAMP_TX_OFF: 1941 priv->ports[port].hwts_tx_en = false; 1942 break; 1943 case HWTSTAMP_TX_ON: 1944 priv->ports[port].hwts_tx_en = true; 1945 break; 1946 default: 1947 return -ERANGE; 1948 } 1949 1950 switch (config.rx_filter) { 1951 case HWTSTAMP_FILTER_NONE: 1952 rx_on = false; 1953 break; 1954 default: 1955 rx_on = true; 1956 break; 1957 } 1958 1959 if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { 1960 clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); 1961 1962 rc = sja1105_change_rxtstamping(priv, rx_on); 1963 if (rc < 0) { 1964 dev_err(ds->dev, 1965 "Failed to change RX timestamping: %d\n", rc); 1966 return rc; 1967 } 1968 if (rx_on) 1969 set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); 1970 } 1971 1972 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1973 return -EFAULT; 1974 return 0; 1975 } 1976 1977 static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, 1978 struct ifreq *ifr) 1979 { 1980 struct sja1105_private *priv = ds->priv; 1981 struct hwtstamp_config config; 1982 1983 config.flags = 0; 1984 if (priv->ports[port].hwts_tx_en) 1985 config.tx_type = HWTSTAMP_TX_ON; 1986 else 1987 config.tx_type = HWTSTAMP_TX_OFF; 1988 if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) 1989 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1990 else 1991 config.rx_filter = HWTSTAMP_FILTER_NONE; 1992 1993 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1994 -EFAULT : 0; 1995 } 1996 1997 #define to_tagger(d) \ 1998 container_of((d), struct sja1105_tagger_data, rxtstamp_work) 1999 #define to_sja1105(d) \ 2000 container_of((d), struct sja1105_private, tagger_data) 2001 2002 static void sja1105_rxtstamp_work(struct work_struct *work) 2003 { 2004 struct sja1105_tagger_data *data = to_tagger(work); 2005 struct sja1105_private *priv = to_sja1105(data); 2006 struct sk_buff *skb; 2007 u64 now; 2008 2009 mutex_lock(&priv->ptp_lock); 2010 2011 while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) { 2012 struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); 2013 u64 ts; 2014 2015 now = priv->tstamp_cc.read(&priv->tstamp_cc); 2016 2017 *shwt = (struct skb_shared_hwtstamps) {0}; 2018 2019 ts = SJA1105_SKB_CB(skb)->meta_tstamp; 2020 ts = sja1105_tstamp_reconstruct(priv, now, ts); 2021 ts = timecounter_cyc2time(&priv->tstamp_tc, ts); 2022 2023 shwt->hwtstamp = ns_to_ktime(ts); 2024 netif_rx_ni(skb); 2025 } 2026 2027 mutex_unlock(&priv->ptp_lock); 2028 } 2029 2030 /* Called from dsa_skb_defer_rx_timestamp */ 2031 static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, 2032 struct sk_buff *skb, unsigned int type) 2033 { 2034 struct sja1105_private *priv = ds->priv; 2035 struct sja1105_tagger_data *data = &priv->tagger_data; 2036 2037 if (!test_bit(SJA1105_HWTS_RX_EN, &data->state)) 2038 return false; 2039 2040 /* We need to read the full PTP clock to reconstruct the Rx 2041 * timestamp. For that we need a sleepable context. 2042 */ 2043 skb_queue_tail(&data->skb_rxtstamp_queue, skb); 2044 schedule_work(&data->rxtstamp_work); 2045 return true; 2046 } 2047 2048 /* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone 2049 * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit 2050 * callback, where we will timestamp it synchronously. 2051 */ 2052 static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, 2053 struct sk_buff *skb, unsigned int type) 2054 { 2055 struct sja1105_private *priv = ds->priv; 2056 struct sja1105_port *sp = &priv->ports[port]; 2057 2058 if (!sp->hwts_tx_en) 2059 return false; 2060 2061 return true; 2062 } 2063 2064 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 2065 enum tc_setup_type type, 2066 void *type_data) 2067 { 2068 switch (type) { 2069 case TC_SETUP_QDISC_TAPRIO: 2070 return sja1105_setup_tc_taprio(ds, port, type_data); 2071 default: 2072 return -EOPNOTSUPP; 2073 } 2074 } 2075 2076 static const struct dsa_switch_ops sja1105_switch_ops = { 2077 .get_tag_protocol = sja1105_get_tag_protocol, 2078 .setup = sja1105_setup, 2079 .teardown = sja1105_teardown, 2080 .set_ageing_time = sja1105_set_ageing_time, 2081 .phylink_validate = sja1105_phylink_validate, 2082 .phylink_mac_config = sja1105_mac_config, 2083 .phylink_mac_link_up = sja1105_mac_link_up, 2084 .phylink_mac_link_down = sja1105_mac_link_down, 2085 .get_strings = sja1105_get_strings, 2086 .get_ethtool_stats = sja1105_get_ethtool_stats, 2087 .get_sset_count = sja1105_get_sset_count, 2088 .get_ts_info = sja1105_get_ts_info, 2089 .port_enable = sja1105_port_enable, 2090 .port_fdb_dump = sja1105_fdb_dump, 2091 .port_fdb_add = sja1105_fdb_add, 2092 .port_fdb_del = sja1105_fdb_del, 2093 .port_bridge_join = sja1105_bridge_join, 2094 .port_bridge_leave = sja1105_bridge_leave, 2095 .port_stp_state_set = sja1105_bridge_stp_state_set, 2096 .port_vlan_prepare = sja1105_vlan_prepare, 2097 .port_vlan_filtering = sja1105_vlan_filtering, 2098 .port_vlan_add = sja1105_vlan_add, 2099 .port_vlan_del = sja1105_vlan_del, 2100 .port_mdb_prepare = sja1105_mdb_prepare, 2101 .port_mdb_add = sja1105_mdb_add, 2102 .port_mdb_del = sja1105_mdb_del, 2103 .port_deferred_xmit = sja1105_port_deferred_xmit, 2104 .port_hwtstamp_get = sja1105_hwtstamp_get, 2105 .port_hwtstamp_set = sja1105_hwtstamp_set, 2106 .port_rxtstamp = sja1105_port_rxtstamp, 2107 .port_txtstamp = sja1105_port_txtstamp, 2108 .port_setup_tc = sja1105_port_setup_tc, 2109 }; 2110 2111 static int sja1105_check_device_id(struct sja1105_private *priv) 2112 { 2113 const struct sja1105_regs *regs = priv->info->regs; 2114 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 2115 struct device *dev = &priv->spidev->dev; 2116 u64 device_id; 2117 u64 part_no; 2118 int rc; 2119 2120 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id, 2121 &device_id, SJA1105_SIZE_DEVICE_ID); 2122 if (rc < 0) 2123 return rc; 2124 2125 if (device_id != priv->info->device_id) { 2126 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n", 2127 priv->info->device_id, device_id); 2128 return -ENODEV; 2129 } 2130 2131 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id, 2132 prod_id, SJA1105_SIZE_DEVICE_ID); 2133 if (rc < 0) 2134 return rc; 2135 2136 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 2137 2138 if (part_no != priv->info->part_no) { 2139 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 2140 priv->info->part_no, part_no); 2141 return -ENODEV; 2142 } 2143 2144 return 0; 2145 } 2146 2147 static int sja1105_probe(struct spi_device *spi) 2148 { 2149 struct sja1105_tagger_data *tagger_data; 2150 struct device *dev = &spi->dev; 2151 struct sja1105_private *priv; 2152 struct dsa_switch *ds; 2153 int rc, i; 2154 2155 if (!dev->of_node) { 2156 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 2157 return -EINVAL; 2158 } 2159 2160 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 2161 if (!priv) 2162 return -ENOMEM; 2163 2164 /* Configure the optional reset pin and bring up switch */ 2165 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 2166 if (IS_ERR(priv->reset_gpio)) 2167 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 2168 else 2169 sja1105_hw_reset(priv->reset_gpio, 1, 1); 2170 2171 /* Populate our driver private structure (priv) based on 2172 * the device tree node that was probed (spi) 2173 */ 2174 priv->spidev = spi; 2175 spi_set_drvdata(spi, priv); 2176 2177 /* Configure the SPI bus */ 2178 spi->bits_per_word = 8; 2179 rc = spi_setup(spi); 2180 if (rc < 0) { 2181 dev_err(dev, "Could not init SPI\n"); 2182 return rc; 2183 } 2184 2185 priv->info = of_device_get_match_data(dev); 2186 2187 /* Detect hardware device */ 2188 rc = sja1105_check_device_id(priv); 2189 if (rc < 0) { 2190 dev_err(dev, "Device ID check failed: %d\n", rc); 2191 return rc; 2192 } 2193 2194 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 2195 2196 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS); 2197 if (!ds) 2198 return -ENOMEM; 2199 2200 ds->ops = &sja1105_switch_ops; 2201 ds->priv = priv; 2202 priv->ds = ds; 2203 2204 tagger_data = &priv->tagger_data; 2205 skb_queue_head_init(&tagger_data->skb_rxtstamp_queue); 2206 INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work); 2207 spin_lock_init(&tagger_data->meta_lock); 2208 2209 /* Connections between dsa_port and sja1105_port */ 2210 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 2211 struct sja1105_port *sp = &priv->ports[i]; 2212 2213 ds->ports[i].priv = sp; 2214 sp->dp = &ds->ports[i]; 2215 sp->data = tagger_data; 2216 } 2217 mutex_init(&priv->mgmt_lock); 2218 2219 sja1105_tas_setup(ds); 2220 2221 return dsa_register_switch(priv->ds); 2222 } 2223 2224 static int sja1105_remove(struct spi_device *spi) 2225 { 2226 struct sja1105_private *priv = spi_get_drvdata(spi); 2227 2228 dsa_unregister_switch(priv->ds); 2229 return 0; 2230 } 2231 2232 static const struct of_device_id sja1105_dt_ids[] = { 2233 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 2234 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 2235 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 2236 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 2237 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 2238 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 2239 { /* sentinel */ }, 2240 }; 2241 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 2242 2243 static struct spi_driver sja1105_driver = { 2244 .driver = { 2245 .name = "sja1105", 2246 .owner = THIS_MODULE, 2247 .of_match_table = of_match_ptr(sja1105_dt_ids), 2248 }, 2249 .probe = sja1105_probe, 2250 .remove = sja1105_remove, 2251 }; 2252 2253 module_spi_driver(sja1105_driver); 2254 2255 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 2256 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 2257 MODULE_DESCRIPTION("SJA1105 Driver"); 2258 MODULE_LICENSE("GPL v2"); 2259