1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/delay.h> 9 #include <linux/module.h> 10 #include <linux/printk.h> 11 #include <linux/spi/spi.h> 12 #include <linux/errno.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/phylink.h> 15 #include <linux/of.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_device.h> 19 #include <linux/netdev_features.h> 20 #include <linux/netdevice.h> 21 #include <linux/if_bridge.h> 22 #include <linux/if_ether.h> 23 #include <linux/dsa/8021q.h> 24 #include "sja1105.h" 25 #include "sja1105_sgmii.h" 26 #include "sja1105_tas.h" 27 28 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, 29 unsigned int startup_delay) 30 { 31 gpiod_set_value_cansleep(gpio, 1); 32 /* Wait for minimum reset pulse length */ 33 msleep(pulse_len); 34 gpiod_set_value_cansleep(gpio, 0); 35 /* Wait until chip is ready after reset */ 36 msleep(startup_delay); 37 } 38 39 static void 40 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, 41 int from, int to, bool allow) 42 { 43 if (allow) { 44 l2_fwd[from].bc_domain |= BIT(to); 45 l2_fwd[from].reach_port |= BIT(to); 46 l2_fwd[from].fl_domain |= BIT(to); 47 } else { 48 l2_fwd[from].bc_domain &= ~BIT(to); 49 l2_fwd[from].reach_port &= ~BIT(to); 50 l2_fwd[from].fl_domain &= ~BIT(to); 51 } 52 } 53 54 /* Structure used to temporarily transport device tree 55 * settings into sja1105_setup 56 */ 57 struct sja1105_dt_port { 58 phy_interface_t phy_mode; 59 sja1105_mii_role_t role; 60 }; 61 62 static int sja1105_init_mac_settings(struct sja1105_private *priv) 63 { 64 struct sja1105_mac_config_entry default_mac = { 65 /* Enable all 8 priority queues on egress. 66 * Every queue i holds top[i] - base[i] frames. 67 * Sum of top[i] - base[i] is 511 (max hardware limit). 68 */ 69 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, 70 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, 71 .enabled = {true, true, true, true, true, true, true, true}, 72 /* Keep standard IFG of 12 bytes on egress. */ 73 .ifg = 0, 74 /* Always put the MAC speed in automatic mode, where it can be 75 * adjusted at runtime by PHYLINK. 76 */ 77 .speed = SJA1105_SPEED_AUTO, 78 /* No static correction for 1-step 1588 events */ 79 .tp_delin = 0, 80 .tp_delout = 0, 81 /* Disable aging for critical TTEthernet traffic */ 82 .maxage = 0xFF, 83 /* Internal VLAN (pvid) to apply to untagged ingress */ 84 .vlanprio = 0, 85 .vlanid = 1, 86 .ing_mirr = false, 87 .egr_mirr = false, 88 /* Don't drop traffic with other EtherType than ETH_P_IP */ 89 .drpnona664 = false, 90 /* Don't drop double-tagged traffic */ 91 .drpdtag = false, 92 /* Don't drop untagged traffic */ 93 .drpuntag = false, 94 /* Don't retag 802.1p (VID 0) traffic with the pvid */ 95 .retag = false, 96 /* Disable learning and I/O on user ports by default - 97 * STP will enable it. 98 */ 99 .dyn_learn = false, 100 .egress = false, 101 .ingress = false, 102 }; 103 struct sja1105_mac_config_entry *mac; 104 struct sja1105_table *table; 105 int i; 106 107 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; 108 109 /* Discard previous MAC Configuration Table */ 110 if (table->entry_count) { 111 kfree(table->entries); 112 table->entry_count = 0; 113 } 114 115 table->entries = kcalloc(SJA1105_NUM_PORTS, 116 table->ops->unpacked_entry_size, GFP_KERNEL); 117 if (!table->entries) 118 return -ENOMEM; 119 120 table->entry_count = SJA1105_NUM_PORTS; 121 122 mac = table->entries; 123 124 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 125 mac[i] = default_mac; 126 if (i == dsa_upstream_port(priv->ds, i)) { 127 /* STP doesn't get called for CPU port, so we need to 128 * set the I/O parameters statically. 129 */ 130 mac[i].dyn_learn = true; 131 mac[i].ingress = true; 132 mac[i].egress = true; 133 } 134 } 135 136 return 0; 137 } 138 139 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port) 140 { 141 if (priv->info->part_no != SJA1105R_PART_NO && 142 priv->info->part_no != SJA1105S_PART_NO) 143 return false; 144 145 if (port != SJA1105_SGMII_PORT) 146 return false; 147 148 if (dsa_is_unused_port(priv->ds, port)) 149 return false; 150 151 return true; 152 } 153 154 static int sja1105_init_mii_settings(struct sja1105_private *priv, 155 struct sja1105_dt_port *ports) 156 { 157 struct device *dev = &priv->spidev->dev; 158 struct sja1105_xmii_params_entry *mii; 159 struct sja1105_table *table; 160 int i; 161 162 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; 163 164 /* Discard previous xMII Mode Parameters Table */ 165 if (table->entry_count) { 166 kfree(table->entries); 167 table->entry_count = 0; 168 } 169 170 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, 171 table->ops->unpacked_entry_size, GFP_KERNEL); 172 if (!table->entries) 173 return -ENOMEM; 174 175 /* Override table based on PHYLINK DT bindings */ 176 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; 177 178 mii = table->entries; 179 180 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 181 if (dsa_is_unused_port(priv->ds, i)) 182 continue; 183 184 switch (ports[i].phy_mode) { 185 case PHY_INTERFACE_MODE_MII: 186 mii->xmii_mode[i] = XMII_MODE_MII; 187 break; 188 case PHY_INTERFACE_MODE_RMII: 189 mii->xmii_mode[i] = XMII_MODE_RMII; 190 break; 191 case PHY_INTERFACE_MODE_RGMII: 192 case PHY_INTERFACE_MODE_RGMII_ID: 193 case PHY_INTERFACE_MODE_RGMII_RXID: 194 case PHY_INTERFACE_MODE_RGMII_TXID: 195 mii->xmii_mode[i] = XMII_MODE_RGMII; 196 break; 197 case PHY_INTERFACE_MODE_SGMII: 198 if (!sja1105_supports_sgmii(priv, i)) 199 return -EINVAL; 200 mii->xmii_mode[i] = XMII_MODE_SGMII; 201 break; 202 default: 203 dev_err(dev, "Unsupported PHY mode %s!\n", 204 phy_modes(ports[i].phy_mode)); 205 } 206 207 /* Even though the SerDes port is able to drive SGMII autoneg 208 * like a PHY would, from the perspective of the XMII tables, 209 * the SGMII port should always be put in MAC mode. 210 */ 211 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII) 212 mii->phy_mac[i] = XMII_MAC; 213 else 214 mii->phy_mac[i] = ports[i].role; 215 } 216 return 0; 217 } 218 219 static int sja1105_init_static_fdb(struct sja1105_private *priv) 220 { 221 struct sja1105_table *table; 222 223 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 224 225 /* We only populate the FDB table through dynamic 226 * L2 Address Lookup entries 227 */ 228 if (table->entry_count) { 229 kfree(table->entries); 230 table->entry_count = 0; 231 } 232 return 0; 233 } 234 235 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) 236 { 237 struct sja1105_table *table; 238 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS; 239 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { 240 /* Learned FDB entries are forgotten after 300 seconds */ 241 .maxage = SJA1105_AGEING_TIME_MS(300000), 242 /* All entries within a FDB bin are available for learning */ 243 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, 244 /* And the P/Q/R/S equivalent setting: */ 245 .start_dynspc = 0, 246 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries, 247 max_fdb_entries, max_fdb_entries, }, 248 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ 249 .poly = 0x97, 250 /* This selects between Independent VLAN Learning (IVL) and 251 * Shared VLAN Learning (SVL) 252 */ 253 .shared_learn = true, 254 /* Don't discard management traffic based on ENFPORT - 255 * we don't perform SMAC port enforcement anyway, so 256 * what we are setting here doesn't matter. 257 */ 258 .no_enf_hostprt = false, 259 /* Don't learn SMAC for mac_fltres1 and mac_fltres0. 260 * Maybe correlate with no_linklocal_learn from bridge driver? 261 */ 262 .no_mgmt_learn = true, 263 /* P/Q/R/S only */ 264 .use_static = true, 265 /* Dynamically learned FDB entries can overwrite other (older) 266 * dynamic FDB entries 267 */ 268 .owr_dyn = true, 269 .drpnolearn = true, 270 }; 271 272 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 273 274 if (table->entry_count) { 275 kfree(table->entries); 276 table->entry_count = 0; 277 } 278 279 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, 280 table->ops->unpacked_entry_size, GFP_KERNEL); 281 if (!table->entries) 282 return -ENOMEM; 283 284 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; 285 286 /* This table only has a single entry */ 287 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = 288 default_l2_lookup_params; 289 290 return 0; 291 } 292 293 static int sja1105_init_static_vlan(struct sja1105_private *priv) 294 { 295 struct sja1105_table *table; 296 struct sja1105_vlan_lookup_entry pvid = { 297 .ving_mirr = 0, 298 .vegr_mirr = 0, 299 .vmemb_port = 0, 300 .vlan_bc = 0, 301 .tag_port = 0, 302 .vlanid = 1, 303 }; 304 int i; 305 306 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 307 308 /* The static VLAN table will only contain the initial pvid of 1. 309 * All other VLANs are to be configured through dynamic entries, 310 * and kept in the static configuration table as backing memory. 311 */ 312 if (table->entry_count) { 313 kfree(table->entries); 314 table->entry_count = 0; 315 } 316 317 table->entries = kcalloc(1, table->ops->unpacked_entry_size, 318 GFP_KERNEL); 319 if (!table->entries) 320 return -ENOMEM; 321 322 table->entry_count = 1; 323 324 /* VLAN 1: all DT-defined ports are members; no restrictions on 325 * forwarding; always transmit priority-tagged frames as untagged. 326 */ 327 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 328 pvid.vmemb_port |= BIT(i); 329 pvid.vlan_bc |= BIT(i); 330 pvid.tag_port &= ~BIT(i); 331 } 332 333 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; 334 return 0; 335 } 336 337 static int sja1105_init_l2_forwarding(struct sja1105_private *priv) 338 { 339 struct sja1105_l2_forwarding_entry *l2fwd; 340 struct sja1105_table *table; 341 int i, j; 342 343 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; 344 345 if (table->entry_count) { 346 kfree(table->entries); 347 table->entry_count = 0; 348 } 349 350 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, 351 table->ops->unpacked_entry_size, GFP_KERNEL); 352 if (!table->entries) 353 return -ENOMEM; 354 355 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; 356 357 l2fwd = table->entries; 358 359 /* First 5 entries define the forwarding rules */ 360 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 361 unsigned int upstream = dsa_upstream_port(priv->ds, i); 362 363 for (j = 0; j < SJA1105_NUM_TC; j++) 364 l2fwd[i].vlan_pmap[j] = j; 365 366 if (i == upstream) 367 continue; 368 369 sja1105_port_allow_traffic(l2fwd, i, upstream, true); 370 sja1105_port_allow_traffic(l2fwd, upstream, i, true); 371 } 372 /* Next 8 entries define VLAN PCP mapping from ingress to egress. 373 * Create a one-to-one mapping. 374 */ 375 for (i = 0; i < SJA1105_NUM_TC; i++) 376 for (j = 0; j < SJA1105_NUM_PORTS; j++) 377 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; 378 379 return 0; 380 } 381 382 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) 383 { 384 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { 385 /* Disallow dynamic reconfiguration of vlan_pmap */ 386 .max_dynp = 0, 387 /* Use a single memory partition for all ingress queues */ 388 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, 389 }; 390 struct sja1105_table *table; 391 392 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; 393 394 if (table->entry_count) { 395 kfree(table->entries); 396 table->entry_count = 0; 397 } 398 399 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, 400 table->ops->unpacked_entry_size, GFP_KERNEL); 401 if (!table->entries) 402 return -ENOMEM; 403 404 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; 405 406 /* This table only has a single entry */ 407 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = 408 default_l2fwd_params; 409 410 return 0; 411 } 412 413 static int sja1105_init_general_params(struct sja1105_private *priv) 414 { 415 struct sja1105_general_params_entry default_general_params = { 416 /* Allow dynamic changing of the mirror port */ 417 .mirr_ptacu = true, 418 .switchid = priv->ds->index, 419 /* Priority queue for link-local management frames 420 * (both ingress to and egress from CPU - PTP, STP etc) 421 */ 422 .hostprio = 7, 423 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, 424 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, 425 .incl_srcpt1 = false, 426 .send_meta1 = false, 427 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, 428 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, 429 .incl_srcpt0 = false, 430 .send_meta0 = false, 431 /* The destination for traffic matching mac_fltres1 and 432 * mac_fltres0 on all ports except host_port. Such traffic 433 * receieved on host_port itself would be dropped, except 434 * by installing a temporary 'management route' 435 */ 436 .host_port = dsa_upstream_port(priv->ds, 0), 437 /* Default to an invalid value */ 438 .mirr_port = SJA1105_NUM_PORTS, 439 /* Link-local traffic received on casc_port will be forwarded 440 * to host_port without embedding the source port and device ID 441 * info in the destination MAC address (presumably because it 442 * is a cascaded port and a downstream SJA switch already did 443 * that). Default to an invalid port (to disable the feature) 444 * and overwrite this if we find any DSA (cascaded) ports. 445 */ 446 .casc_port = SJA1105_NUM_PORTS, 447 /* No TTEthernet */ 448 .vllupformat = SJA1105_VL_FORMAT_PSFP, 449 .vlmarker = 0, 450 .vlmask = 0, 451 /* Only update correctionField for 1-step PTP (L2 transport) */ 452 .ignore2stf = 0, 453 /* Forcefully disable VLAN filtering by telling 454 * the switch that VLAN has a different EtherType. 455 */ 456 .tpid = ETH_P_SJA1105, 457 .tpid2 = ETH_P_SJA1105, 458 }; 459 struct sja1105_table *table; 460 461 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 462 463 if (table->entry_count) { 464 kfree(table->entries); 465 table->entry_count = 0; 466 } 467 468 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, 469 table->ops->unpacked_entry_size, GFP_KERNEL); 470 if (!table->entries) 471 return -ENOMEM; 472 473 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; 474 475 /* This table only has a single entry */ 476 ((struct sja1105_general_params_entry *)table->entries)[0] = 477 default_general_params; 478 479 return 0; 480 } 481 482 static int sja1105_init_avb_params(struct sja1105_private *priv) 483 { 484 struct sja1105_avb_params_entry *avb; 485 struct sja1105_table *table; 486 487 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; 488 489 /* Discard previous AVB Parameters Table */ 490 if (table->entry_count) { 491 kfree(table->entries); 492 table->entry_count = 0; 493 } 494 495 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, 496 table->ops->unpacked_entry_size, GFP_KERNEL); 497 if (!table->entries) 498 return -ENOMEM; 499 500 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; 501 502 avb = table->entries; 503 504 /* Configure the MAC addresses for meta frames */ 505 avb->destmeta = SJA1105_META_DMAC; 506 avb->srcmeta = SJA1105_META_SMAC; 507 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by 508 * default. This is because there might be boards with a hardware 509 * layout where enabling the pin as output might cause an electrical 510 * clash. On E/T the pin is always an output, which the board designers 511 * probably already knew, so even if there are going to be electrical 512 * issues, there's nothing we can do. 513 */ 514 avb->cas_master = false; 515 516 return 0; 517 } 518 519 /* The L2 policing table is 2-stage. The table is looked up for each frame 520 * according to the ingress port, whether it was broadcast or not, and the 521 * classified traffic class (given by VLAN PCP). This portion of the lookup is 522 * fixed, and gives access to the SHARINDX, an indirection register pointing 523 * within the policing table itself, which is used to resolve the policer that 524 * will be used for this frame. 525 * 526 * Stage 1 Stage 2 527 * +------------+--------+ +---------------------------------+ 528 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU | 529 * +------------+--------+ +---------------------------------+ 530 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU | 531 * +------------+--------+ +---------------------------------+ 532 * ... | Policer 2: Rate, Burst, MTU | 533 * +------------+--------+ +---------------------------------+ 534 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU | 535 * +------------+--------+ +---------------------------------+ 536 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU | 537 * +------------+--------+ +---------------------------------+ 538 * ... | Policer 5: Rate, Burst, MTU | 539 * +------------+--------+ +---------------------------------+ 540 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU | 541 * +------------+--------+ +---------------------------------+ 542 * ... | Policer 7: Rate, Burst, MTU | 543 * +------------+--------+ +---------------------------------+ 544 * |Port 4 TC 7 |SHARINDX| ... 545 * +------------+--------+ 546 * |Port 0 BCAST|SHARINDX| ... 547 * +------------+--------+ 548 * |Port 1 BCAST|SHARINDX| ... 549 * +------------+--------+ 550 * ... ... 551 * +------------+--------+ +---------------------------------+ 552 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU | 553 * +------------+--------+ +---------------------------------+ 554 * 555 * In this driver, we shall use policers 0-4 as statically alocated port 556 * (matchall) policers. So we need to make the SHARINDX for all lookups 557 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast 558 * lookup) equal. 559 * The remaining policers (40) shall be dynamically allocated for flower 560 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff. 561 */ 562 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) 563 564 static int sja1105_init_l2_policing(struct sja1105_private *priv) 565 { 566 struct sja1105_l2_policing_entry *policing; 567 struct sja1105_table *table; 568 int port, tc; 569 570 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; 571 572 /* Discard previous L2 Policing Table */ 573 if (table->entry_count) { 574 kfree(table->entries); 575 table->entry_count = 0; 576 } 577 578 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, 579 table->ops->unpacked_entry_size, GFP_KERNEL); 580 if (!table->entries) 581 return -ENOMEM; 582 583 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; 584 585 policing = table->entries; 586 587 /* Setup shared indices for the matchall policers */ 588 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 589 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port; 590 591 for (tc = 0; tc < SJA1105_NUM_TC; tc++) 592 policing[port * SJA1105_NUM_TC + tc].sharindx = port; 593 594 policing[bcast].sharindx = port; 595 } 596 597 /* Setup the matchall policer parameters */ 598 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 599 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 600 601 if (dsa_is_cpu_port(priv->ds, port)) 602 mtu += VLAN_HLEN; 603 604 policing[port].smax = 65535; /* Burst size in bytes */ 605 policing[port].rate = SJA1105_RATE_MBPS(1000); 606 policing[port].maxlen = mtu; 607 policing[port].partition = 0; 608 } 609 610 return 0; 611 } 612 613 static int sja1105_static_config_load(struct sja1105_private *priv, 614 struct sja1105_dt_port *ports) 615 { 616 int rc; 617 618 sja1105_static_config_free(&priv->static_config); 619 rc = sja1105_static_config_init(&priv->static_config, 620 priv->info->static_ops, 621 priv->info->device_id); 622 if (rc) 623 return rc; 624 625 /* Build static configuration */ 626 rc = sja1105_init_mac_settings(priv); 627 if (rc < 0) 628 return rc; 629 rc = sja1105_init_mii_settings(priv, ports); 630 if (rc < 0) 631 return rc; 632 rc = sja1105_init_static_fdb(priv); 633 if (rc < 0) 634 return rc; 635 rc = sja1105_init_static_vlan(priv); 636 if (rc < 0) 637 return rc; 638 rc = sja1105_init_l2_lookup_params(priv); 639 if (rc < 0) 640 return rc; 641 rc = sja1105_init_l2_forwarding(priv); 642 if (rc < 0) 643 return rc; 644 rc = sja1105_init_l2_forwarding_params(priv); 645 if (rc < 0) 646 return rc; 647 rc = sja1105_init_l2_policing(priv); 648 if (rc < 0) 649 return rc; 650 rc = sja1105_init_general_params(priv); 651 if (rc < 0) 652 return rc; 653 rc = sja1105_init_avb_params(priv); 654 if (rc < 0) 655 return rc; 656 657 /* Send initial configuration to hardware via SPI */ 658 return sja1105_static_config_upload(priv); 659 } 660 661 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, 662 const struct sja1105_dt_port *ports) 663 { 664 int i; 665 666 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 667 if (ports[i].role == XMII_MAC) 668 continue; 669 670 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || 671 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 672 priv->rgmii_rx_delay[i] = true; 673 674 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || 675 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) 676 priv->rgmii_tx_delay[i] = true; 677 678 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && 679 !priv->info->setup_rgmii_delay) 680 return -EINVAL; 681 } 682 return 0; 683 } 684 685 static int sja1105_parse_ports_node(struct sja1105_private *priv, 686 struct sja1105_dt_port *ports, 687 struct device_node *ports_node) 688 { 689 struct device *dev = &priv->spidev->dev; 690 struct device_node *child; 691 692 for_each_available_child_of_node(ports_node, child) { 693 struct device_node *phy_node; 694 phy_interface_t phy_mode; 695 u32 index; 696 int err; 697 698 /* Get switch port number from DT */ 699 if (of_property_read_u32(child, "reg", &index) < 0) { 700 dev_err(dev, "Port number not defined in device tree " 701 "(property \"reg\")\n"); 702 of_node_put(child); 703 return -ENODEV; 704 } 705 706 /* Get PHY mode from DT */ 707 err = of_get_phy_mode(child, &phy_mode); 708 if (err) { 709 dev_err(dev, "Failed to read phy-mode or " 710 "phy-interface-type property for port %d\n", 711 index); 712 of_node_put(child); 713 return -ENODEV; 714 } 715 ports[index].phy_mode = phy_mode; 716 717 phy_node = of_parse_phandle(child, "phy-handle", 0); 718 if (!phy_node) { 719 if (!of_phy_is_fixed_link(child)) { 720 dev_err(dev, "phy-handle or fixed-link " 721 "properties missing!\n"); 722 of_node_put(child); 723 return -ENODEV; 724 } 725 /* phy-handle is missing, but fixed-link isn't. 726 * So it's a fixed link. Default to PHY role. 727 */ 728 ports[index].role = XMII_PHY; 729 } else { 730 /* phy-handle present => put port in MAC role */ 731 ports[index].role = XMII_MAC; 732 of_node_put(phy_node); 733 } 734 735 /* The MAC/PHY role can be overridden with explicit bindings */ 736 if (of_property_read_bool(child, "sja1105,role-mac")) 737 ports[index].role = XMII_MAC; 738 else if (of_property_read_bool(child, "sja1105,role-phy")) 739 ports[index].role = XMII_PHY; 740 } 741 742 return 0; 743 } 744 745 static int sja1105_parse_dt(struct sja1105_private *priv, 746 struct sja1105_dt_port *ports) 747 { 748 struct device *dev = &priv->spidev->dev; 749 struct device_node *switch_node = dev->of_node; 750 struct device_node *ports_node; 751 int rc; 752 753 ports_node = of_get_child_by_name(switch_node, "ports"); 754 if (!ports_node) { 755 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); 756 return -ENODEV; 757 } 758 759 rc = sja1105_parse_ports_node(priv, ports, ports_node); 760 of_node_put(ports_node); 761 762 return rc; 763 } 764 765 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg) 766 { 767 const struct sja1105_regs *regs = priv->info->regs; 768 u32 val; 769 int rc; 770 771 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val, 772 NULL); 773 if (rc < 0) 774 return rc; 775 776 return val; 777 } 778 779 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg, 780 u16 pcs_val) 781 { 782 const struct sja1105_regs *regs = priv->info->regs; 783 u32 val = pcs_val; 784 int rc; 785 786 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val, 787 NULL); 788 if (rc < 0) 789 return rc; 790 791 return val; 792 } 793 794 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, 795 bool an_enabled, bool an_master) 796 { 797 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII; 798 799 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to 800 * stop the clock during LPI mode, make the MAC reconfigure 801 * autonomously after PCS autoneg is done, flush the internal FIFOs. 802 */ 803 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 | 804 SJA1105_DC1_CLOCK_STOP_EN | 805 SJA1105_DC1_MAC_AUTO_SW | 806 SJA1105_DC1_INIT); 807 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */ 808 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE); 809 /* AUTONEG_CONTROL: Use SGMII autoneg */ 810 if (an_master) 811 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK; 812 sja1105_sgmii_write(priv, SJA1105_AC, ac); 813 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise, 814 * sja1105_sgmii_pcs_force_speed must be called later for the link 815 * to become operational. 816 */ 817 if (an_enabled) 818 sja1105_sgmii_write(priv, MII_BMCR, 819 BMCR_ANENABLE | BMCR_ANRESTART); 820 } 821 822 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv, 823 int speed) 824 { 825 int pcs_speed; 826 827 switch (speed) { 828 case SPEED_1000: 829 pcs_speed = BMCR_SPEED1000; 830 break; 831 case SPEED_100: 832 pcs_speed = BMCR_SPEED100; 833 break; 834 case SPEED_10: 835 pcs_speed = BMCR_SPEED10; 836 break; 837 default: 838 dev_err(priv->ds->dev, "Invalid speed %d\n", speed); 839 return; 840 } 841 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX); 842 } 843 844 /* Convert link speed from SJA1105 to ethtool encoding */ 845 static int sja1105_speed[] = { 846 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN, 847 [SJA1105_SPEED_10MBPS] = SPEED_10, 848 [SJA1105_SPEED_100MBPS] = SPEED_100, 849 [SJA1105_SPEED_1000MBPS] = SPEED_1000, 850 }; 851 852 /* Set link speed in the MAC configuration for a specific port. */ 853 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, 854 int speed_mbps) 855 { 856 struct sja1105_xmii_params_entry *mii; 857 struct sja1105_mac_config_entry *mac; 858 struct device *dev = priv->ds->dev; 859 sja1105_phy_interface_t phy_mode; 860 sja1105_speed_t speed; 861 int rc; 862 863 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 864 * tables. On E/T, MAC reconfig tables are not readable, only writable. 865 * We have to *know* what the MAC looks like. For the sake of keeping 866 * the code common, we'll use the static configuration tables as a 867 * reasonable approximation for both E/T and P/Q/R/S. 868 */ 869 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 870 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 871 872 switch (speed_mbps) { 873 case SPEED_UNKNOWN: 874 /* PHYLINK called sja1105_mac_config() to inform us about 875 * the state->interface, but AN has not completed and the 876 * speed is not yet valid. UM10944.pdf says that setting 877 * SJA1105_SPEED_AUTO at runtime disables the port, so that is 878 * ok for power consumption in case AN will never complete - 879 * otherwise PHYLINK should come back with a new update. 880 */ 881 speed = SJA1105_SPEED_AUTO; 882 break; 883 case SPEED_10: 884 speed = SJA1105_SPEED_10MBPS; 885 break; 886 case SPEED_100: 887 speed = SJA1105_SPEED_100MBPS; 888 break; 889 case SPEED_1000: 890 speed = SJA1105_SPEED_1000MBPS; 891 break; 892 default: 893 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 894 return -EINVAL; 895 } 896 897 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration 898 * table, since this will be used for the clocking setup, and we no 899 * longer need to store it in the static config (already told hardware 900 * we want auto during upload phase). 901 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and 902 * we need to configure the PCS only (if even that). 903 */ 904 if (sja1105_supports_sgmii(priv, port)) 905 mac[port].speed = SJA1105_SPEED_1000MBPS; 906 else 907 mac[port].speed = speed; 908 909 /* Write to the dynamic reconfiguration tables */ 910 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 911 &mac[port], true); 912 if (rc < 0) { 913 dev_err(dev, "Failed to write MAC config: %d\n", rc); 914 return rc; 915 } 916 917 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at 918 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and 919 * RMII no change of the clock setup is required. Actually, changing 920 * the clock setup does interrupt the clock signal for a certain time 921 * which causes trouble for all PHYs relying on this signal. 922 */ 923 phy_mode = mii->xmii_mode[port]; 924 if (phy_mode != XMII_MODE_RGMII) 925 return 0; 926 927 return sja1105_clocking_setup_port(priv, port); 928 } 929 930 /* The SJA1105 MAC programming model is through the static config (the xMII 931 * Mode table cannot be dynamically reconfigured), and we have to program 932 * that early (earlier than PHYLINK calls us, anyway). 933 * So just error out in case the connected PHY attempts to change the initial 934 * system interface MII protocol from what is defined in the DT, at least for 935 * now. 936 */ 937 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, 938 phy_interface_t interface) 939 { 940 struct sja1105_xmii_params_entry *mii; 941 sja1105_phy_interface_t phy_mode; 942 943 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 944 phy_mode = mii->xmii_mode[port]; 945 946 switch (interface) { 947 case PHY_INTERFACE_MODE_MII: 948 return (phy_mode != XMII_MODE_MII); 949 case PHY_INTERFACE_MODE_RMII: 950 return (phy_mode != XMII_MODE_RMII); 951 case PHY_INTERFACE_MODE_RGMII: 952 case PHY_INTERFACE_MODE_RGMII_ID: 953 case PHY_INTERFACE_MODE_RGMII_RXID: 954 case PHY_INTERFACE_MODE_RGMII_TXID: 955 return (phy_mode != XMII_MODE_RGMII); 956 case PHY_INTERFACE_MODE_SGMII: 957 return (phy_mode != XMII_MODE_SGMII); 958 default: 959 return true; 960 } 961 } 962 963 static void sja1105_mac_config(struct dsa_switch *ds, int port, 964 unsigned int mode, 965 const struct phylink_link_state *state) 966 { 967 struct sja1105_private *priv = ds->priv; 968 bool is_sgmii = sja1105_supports_sgmii(priv, port); 969 970 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { 971 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", 972 phy_modes(state->interface)); 973 return; 974 } 975 976 if (phylink_autoneg_inband(mode) && !is_sgmii) { 977 dev_err(ds->dev, "In-band AN not supported!\n"); 978 return; 979 } 980 981 if (is_sgmii) 982 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode), 983 false); 984 } 985 986 static void sja1105_mac_link_down(struct dsa_switch *ds, int port, 987 unsigned int mode, 988 phy_interface_t interface) 989 { 990 sja1105_inhibit_tx(ds->priv, BIT(port), true); 991 } 992 993 static void sja1105_mac_link_up(struct dsa_switch *ds, int port, 994 unsigned int mode, 995 phy_interface_t interface, 996 struct phy_device *phydev, 997 int speed, int duplex, 998 bool tx_pause, bool rx_pause) 999 { 1000 struct sja1105_private *priv = ds->priv; 1001 1002 sja1105_adjust_port_config(priv, port, speed); 1003 1004 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode)) 1005 sja1105_sgmii_pcs_force_speed(priv, speed); 1006 1007 sja1105_inhibit_tx(priv, BIT(port), false); 1008 } 1009 1010 static void sja1105_phylink_validate(struct dsa_switch *ds, int port, 1011 unsigned long *supported, 1012 struct phylink_link_state *state) 1013 { 1014 /* Construct a new mask which exhaustively contains all link features 1015 * supported by the MAC, and then apply that (logical AND) to what will 1016 * be sent to the PHY for "marketing". 1017 */ 1018 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1019 struct sja1105_private *priv = ds->priv; 1020 struct sja1105_xmii_params_entry *mii; 1021 1022 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 1023 1024 /* include/linux/phylink.h says: 1025 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink 1026 * expects the MAC driver to return all supported link modes. 1027 */ 1028 if (state->interface != PHY_INTERFACE_MODE_NA && 1029 sja1105_phy_mode_mismatch(priv, port, state->interface)) { 1030 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1031 return; 1032 } 1033 1034 /* The MAC does not support pause frames, and also doesn't 1035 * support half-duplex traffic modes. 1036 */ 1037 phylink_set(mask, Autoneg); 1038 phylink_set(mask, MII); 1039 phylink_set(mask, 10baseT_Full); 1040 phylink_set(mask, 100baseT_Full); 1041 phylink_set(mask, 100baseT1_Full); 1042 if (mii->xmii_mode[port] == XMII_MODE_RGMII || 1043 mii->xmii_mode[port] == XMII_MODE_SGMII) 1044 phylink_set(mask, 1000baseT_Full); 1045 1046 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 1047 bitmap_and(state->advertising, state->advertising, mask, 1048 __ETHTOOL_LINK_MODE_MASK_NBITS); 1049 } 1050 1051 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port, 1052 struct phylink_link_state *state) 1053 { 1054 struct sja1105_private *priv = ds->priv; 1055 int ais; 1056 1057 /* Read the vendor-specific AUTONEG_INTR_STATUS register */ 1058 ais = sja1105_sgmii_read(priv, SJA1105_AIS); 1059 if (ais < 0) 1060 return ais; 1061 1062 switch (SJA1105_AIS_SPEED(ais)) { 1063 case 0: 1064 state->speed = SPEED_10; 1065 break; 1066 case 1: 1067 state->speed = SPEED_100; 1068 break; 1069 case 2: 1070 state->speed = SPEED_1000; 1071 break; 1072 default: 1073 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n", 1074 SJA1105_AIS_SPEED(ais)); 1075 } 1076 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais); 1077 state->an_complete = SJA1105_AIS_COMPLETE(ais); 1078 state->link = SJA1105_AIS_LINK_STATUS(ais); 1079 1080 return 0; 1081 } 1082 1083 static int 1084 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port, 1085 const struct sja1105_l2_lookup_entry *requested) 1086 { 1087 struct sja1105_l2_lookup_entry *l2_lookup; 1088 struct sja1105_table *table; 1089 int i; 1090 1091 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1092 l2_lookup = table->entries; 1093 1094 for (i = 0; i < table->entry_count; i++) 1095 if (l2_lookup[i].macaddr == requested->macaddr && 1096 l2_lookup[i].vlanid == requested->vlanid && 1097 l2_lookup[i].destports & BIT(port)) 1098 return i; 1099 1100 return -1; 1101 } 1102 1103 /* We want FDB entries added statically through the bridge command to persist 1104 * across switch resets, which are a common thing during normal SJA1105 1105 * operation. So we have to back them up in the static configuration tables 1106 * and hence apply them on next static config upload... yay! 1107 */ 1108 static int 1109 sja1105_static_fdb_change(struct sja1105_private *priv, int port, 1110 const struct sja1105_l2_lookup_entry *requested, 1111 bool keep) 1112 { 1113 struct sja1105_l2_lookup_entry *l2_lookup; 1114 struct sja1105_table *table; 1115 int rc, match; 1116 1117 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; 1118 1119 match = sja1105_find_static_fdb_entry(priv, port, requested); 1120 if (match < 0) { 1121 /* Can't delete a missing entry. */ 1122 if (!keep) 1123 return 0; 1124 1125 /* No match => new entry */ 1126 rc = sja1105_table_resize(table, table->entry_count + 1); 1127 if (rc) 1128 return rc; 1129 1130 match = table->entry_count - 1; 1131 } 1132 1133 /* Assign pointer after the resize (it may be new memory) */ 1134 l2_lookup = table->entries; 1135 1136 /* We have a match. 1137 * If the job was to add this FDB entry, it's already done (mostly 1138 * anyway, since the port forwarding mask may have changed, case in 1139 * which we update it). 1140 * Otherwise we have to delete it. 1141 */ 1142 if (keep) { 1143 l2_lookup[match] = *requested; 1144 return 0; 1145 } 1146 1147 /* To remove, the strategy is to overwrite the element with 1148 * the last one, and then reduce the array size by 1 1149 */ 1150 l2_lookup[match] = l2_lookup[table->entry_count - 1]; 1151 return sja1105_table_resize(table, table->entry_count - 1); 1152 } 1153 1154 /* First-generation switches have a 4-way set associative TCAM that 1155 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of 1156 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). 1157 * For the placement of a newly learnt FDB entry, the switch selects the bin 1158 * based on a hash function, and the way within that bin incrementally. 1159 */ 1160 static int sja1105et_fdb_index(int bin, int way) 1161 { 1162 return bin * SJA1105ET_FDB_BIN_SIZE + way; 1163 } 1164 1165 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, 1166 const u8 *addr, u16 vid, 1167 struct sja1105_l2_lookup_entry *match, 1168 int *last_unused) 1169 { 1170 int way; 1171 1172 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { 1173 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1174 int index = sja1105et_fdb_index(bin, way); 1175 1176 /* Skip unused entries, optionally marking them 1177 * into the return value 1178 */ 1179 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1180 index, &l2_lookup)) { 1181 if (last_unused) 1182 *last_unused = way; 1183 continue; 1184 } 1185 1186 if (l2_lookup.macaddr == ether_addr_to_u64(addr) && 1187 l2_lookup.vlanid == vid) { 1188 if (match) 1189 *match = l2_lookup; 1190 return way; 1191 } 1192 } 1193 /* Return an invalid entry index if not found */ 1194 return -1; 1195 } 1196 1197 int sja1105et_fdb_add(struct dsa_switch *ds, int port, 1198 const unsigned char *addr, u16 vid) 1199 { 1200 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1201 struct sja1105_private *priv = ds->priv; 1202 struct device *dev = ds->dev; 1203 int last_unused = -1; 1204 int bin, way, rc; 1205 1206 bin = sja1105et_fdb_hash(priv, addr, vid); 1207 1208 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1209 &l2_lookup, &last_unused); 1210 if (way >= 0) { 1211 /* We have an FDB entry. Is our port in the destination 1212 * mask? If yes, we need to do nothing. If not, we need 1213 * to rewrite the entry by adding this port to it. 1214 */ 1215 if (l2_lookup.destports & BIT(port)) 1216 return 0; 1217 l2_lookup.destports |= BIT(port); 1218 } else { 1219 int index = sja1105et_fdb_index(bin, way); 1220 1221 /* We don't have an FDB entry. We construct a new one and 1222 * try to find a place for it within the FDB table. 1223 */ 1224 l2_lookup.macaddr = ether_addr_to_u64(addr); 1225 l2_lookup.destports = BIT(port); 1226 l2_lookup.vlanid = vid; 1227 1228 if (last_unused >= 0) { 1229 way = last_unused; 1230 } else { 1231 /* Bin is full, need to evict somebody. 1232 * Choose victim at random. If you get these messages 1233 * often, you may need to consider changing the 1234 * distribution function: 1235 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly 1236 */ 1237 get_random_bytes(&way, sizeof(u8)); 1238 way %= SJA1105ET_FDB_BIN_SIZE; 1239 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", 1240 bin, addr, way); 1241 /* Evict entry */ 1242 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1243 index, NULL, false); 1244 } 1245 } 1246 l2_lookup.index = sja1105et_fdb_index(bin, way); 1247 1248 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1249 l2_lookup.index, &l2_lookup, 1250 true); 1251 if (rc < 0) 1252 return rc; 1253 1254 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1255 } 1256 1257 int sja1105et_fdb_del(struct dsa_switch *ds, int port, 1258 const unsigned char *addr, u16 vid) 1259 { 1260 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1261 struct sja1105_private *priv = ds->priv; 1262 int index, bin, way, rc; 1263 bool keep; 1264 1265 bin = sja1105et_fdb_hash(priv, addr, vid); 1266 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid, 1267 &l2_lookup, NULL); 1268 if (way < 0) 1269 return 0; 1270 index = sja1105et_fdb_index(bin, way); 1271 1272 /* We have an FDB entry. Is our port in the destination mask? If yes, 1273 * we need to remove it. If the resulting port mask becomes empty, we 1274 * need to completely evict the FDB entry. 1275 * Otherwise we just write it back. 1276 */ 1277 l2_lookup.destports &= ~BIT(port); 1278 1279 if (l2_lookup.destports) 1280 keep = true; 1281 else 1282 keep = false; 1283 1284 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1285 index, &l2_lookup, keep); 1286 if (rc < 0) 1287 return rc; 1288 1289 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1290 } 1291 1292 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, 1293 const unsigned char *addr, u16 vid) 1294 { 1295 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1296 struct sja1105_private *priv = ds->priv; 1297 int rc, i; 1298 1299 /* Search for an existing entry in the FDB table */ 1300 l2_lookup.macaddr = ether_addr_to_u64(addr); 1301 l2_lookup.vlanid = vid; 1302 l2_lookup.iotag = SJA1105_S_TAG; 1303 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1304 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1305 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1306 l2_lookup.mask_iotag = BIT(0); 1307 } else { 1308 l2_lookup.mask_vlanid = 0; 1309 l2_lookup.mask_iotag = 0; 1310 } 1311 l2_lookup.destports = BIT(port); 1312 1313 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1314 SJA1105_SEARCH, &l2_lookup); 1315 if (rc == 0) { 1316 /* Found and this port is already in the entry's 1317 * port mask => job done 1318 */ 1319 if (l2_lookup.destports & BIT(port)) 1320 return 0; 1321 /* l2_lookup.index is populated by the switch in case it 1322 * found something. 1323 */ 1324 l2_lookup.destports |= BIT(port); 1325 goto skip_finding_an_index; 1326 } 1327 1328 /* Not found, so try to find an unused spot in the FDB. 1329 * This is slightly inefficient because the strategy is knock-knock at 1330 * every possible position from 0 to 1023. 1331 */ 1332 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1333 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1334 i, NULL); 1335 if (rc < 0) 1336 break; 1337 } 1338 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) { 1339 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); 1340 return -EINVAL; 1341 } 1342 l2_lookup.lockeds = true; 1343 l2_lookup.index = i; 1344 1345 skip_finding_an_index: 1346 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1347 l2_lookup.index, &l2_lookup, 1348 true); 1349 if (rc < 0) 1350 return rc; 1351 1352 return sja1105_static_fdb_change(priv, port, &l2_lookup, true); 1353 } 1354 1355 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, 1356 const unsigned char *addr, u16 vid) 1357 { 1358 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1359 struct sja1105_private *priv = ds->priv; 1360 bool keep; 1361 int rc; 1362 1363 l2_lookup.macaddr = ether_addr_to_u64(addr); 1364 l2_lookup.vlanid = vid; 1365 l2_lookup.iotag = SJA1105_S_TAG; 1366 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); 1367 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { 1368 l2_lookup.mask_vlanid = VLAN_VID_MASK; 1369 l2_lookup.mask_iotag = BIT(0); 1370 } else { 1371 l2_lookup.mask_vlanid = 0; 1372 l2_lookup.mask_iotag = 0; 1373 } 1374 l2_lookup.destports = BIT(port); 1375 1376 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1377 SJA1105_SEARCH, &l2_lookup); 1378 if (rc < 0) 1379 return 0; 1380 1381 l2_lookup.destports &= ~BIT(port); 1382 1383 /* Decide whether we remove just this port from the FDB entry, 1384 * or if we remove it completely. 1385 */ 1386 if (l2_lookup.destports) 1387 keep = true; 1388 else 1389 keep = false; 1390 1391 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, 1392 l2_lookup.index, &l2_lookup, keep); 1393 if (rc < 0) 1394 return rc; 1395 1396 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep); 1397 } 1398 1399 static int sja1105_fdb_add(struct dsa_switch *ds, int port, 1400 const unsigned char *addr, u16 vid) 1401 { 1402 struct sja1105_private *priv = ds->priv; 1403 1404 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, 1405 * so the switch still does some VLAN processing internally. 1406 * But Shared VLAN Learning (SVL) is also active, and it will take 1407 * care of autonomous forwarding between the unique pvid's of each 1408 * port. Here we just make sure that users can't add duplicate FDB 1409 * entries when in this mode - the actual VID doesn't matter except 1410 * for what gets printed in 'bridge fdb show'. In the case of zero, 1411 * no VID gets printed at all. 1412 */ 1413 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1414 vid = 0; 1415 1416 return priv->info->fdb_add_cmd(ds, port, addr, vid); 1417 } 1418 1419 static int sja1105_fdb_del(struct dsa_switch *ds, int port, 1420 const unsigned char *addr, u16 vid) 1421 { 1422 struct sja1105_private *priv = ds->priv; 1423 1424 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1425 vid = 0; 1426 1427 return priv->info->fdb_del_cmd(ds, port, addr, vid); 1428 } 1429 1430 static int sja1105_fdb_dump(struct dsa_switch *ds, int port, 1431 dsa_fdb_dump_cb_t *cb, void *data) 1432 { 1433 struct sja1105_private *priv = ds->priv; 1434 struct device *dev = ds->dev; 1435 int i; 1436 1437 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1438 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1439 u8 macaddr[ETH_ALEN]; 1440 int rc; 1441 1442 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, 1443 i, &l2_lookup); 1444 /* No fdb entry at i, not an issue */ 1445 if (rc == -ENOENT) 1446 continue; 1447 if (rc) { 1448 dev_err(dev, "Failed to dump FDB: %d\n", rc); 1449 return rc; 1450 } 1451 1452 /* FDB dump callback is per port. This means we have to 1453 * disregard a valid entry if it's not for this port, even if 1454 * only to revisit it later. This is inefficient because the 1455 * 1024-sized FDB table needs to be traversed 4 times through 1456 * SPI during a 'bridge fdb show' command. 1457 */ 1458 if (!(l2_lookup.destports & BIT(port))) 1459 continue; 1460 u64_to_ether_addr(l2_lookup.macaddr, macaddr); 1461 1462 /* We need to hide the dsa_8021q VLANs from the user. */ 1463 if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1464 l2_lookup.vlanid = 0; 1465 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); 1466 } 1467 return 0; 1468 } 1469 1470 /* This callback needs to be present */ 1471 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, 1472 const struct switchdev_obj_port_mdb *mdb) 1473 { 1474 return 0; 1475 } 1476 1477 static void sja1105_mdb_add(struct dsa_switch *ds, int port, 1478 const struct switchdev_obj_port_mdb *mdb) 1479 { 1480 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); 1481 } 1482 1483 static int sja1105_mdb_del(struct dsa_switch *ds, int port, 1484 const struct switchdev_obj_port_mdb *mdb) 1485 { 1486 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); 1487 } 1488 1489 static int sja1105_bridge_member(struct dsa_switch *ds, int port, 1490 struct net_device *br, bool member) 1491 { 1492 struct sja1105_l2_forwarding_entry *l2_fwd; 1493 struct sja1105_private *priv = ds->priv; 1494 int i, rc; 1495 1496 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; 1497 1498 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1499 /* Add this port to the forwarding matrix of the 1500 * other ports in the same bridge, and viceversa. 1501 */ 1502 if (!dsa_is_user_port(ds, i)) 1503 continue; 1504 /* For the ports already under the bridge, only one thing needs 1505 * to be done, and that is to add this port to their 1506 * reachability domain. So we can perform the SPI write for 1507 * them immediately. However, for this port itself (the one 1508 * that is new to the bridge), we need to add all other ports 1509 * to its reachability domain. So we do that incrementally in 1510 * this loop, and perform the SPI write only at the end, once 1511 * the domain contains all other bridge ports. 1512 */ 1513 if (i == port) 1514 continue; 1515 if (dsa_to_port(ds, i)->bridge_dev != br) 1516 continue; 1517 sja1105_port_allow_traffic(l2_fwd, i, port, member); 1518 sja1105_port_allow_traffic(l2_fwd, port, i, member); 1519 1520 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1521 i, &l2_fwd[i], true); 1522 if (rc < 0) 1523 return rc; 1524 } 1525 1526 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, 1527 port, &l2_fwd[port], true); 1528 } 1529 1530 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, 1531 u8 state) 1532 { 1533 struct sja1105_private *priv = ds->priv; 1534 struct sja1105_mac_config_entry *mac; 1535 1536 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1537 1538 switch (state) { 1539 case BR_STATE_DISABLED: 1540 case BR_STATE_BLOCKING: 1541 /* From UM10944 description of DRPDTAG (why put this there?): 1542 * "Management traffic flows to the port regardless of the state 1543 * of the INGRESS flag". So BPDUs are still be allowed to pass. 1544 * At the moment no difference between DISABLED and BLOCKING. 1545 */ 1546 mac[port].ingress = false; 1547 mac[port].egress = false; 1548 mac[port].dyn_learn = false; 1549 break; 1550 case BR_STATE_LISTENING: 1551 mac[port].ingress = true; 1552 mac[port].egress = false; 1553 mac[port].dyn_learn = false; 1554 break; 1555 case BR_STATE_LEARNING: 1556 mac[port].ingress = true; 1557 mac[port].egress = false; 1558 mac[port].dyn_learn = true; 1559 break; 1560 case BR_STATE_FORWARDING: 1561 mac[port].ingress = true; 1562 mac[port].egress = true; 1563 mac[port].dyn_learn = true; 1564 break; 1565 default: 1566 dev_err(ds->dev, "invalid STP state: %d\n", state); 1567 return; 1568 } 1569 1570 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1571 &mac[port], true); 1572 } 1573 1574 static int sja1105_bridge_join(struct dsa_switch *ds, int port, 1575 struct net_device *br) 1576 { 1577 return sja1105_bridge_member(ds, port, br, true); 1578 } 1579 1580 static void sja1105_bridge_leave(struct dsa_switch *ds, int port, 1581 struct net_device *br) 1582 { 1583 sja1105_bridge_member(ds, port, br, false); 1584 } 1585 1586 static const char * const sja1105_reset_reasons[] = { 1587 [SJA1105_VLAN_FILTERING] = "VLAN filtering", 1588 [SJA1105_RX_HWTSTAMPING] = "RX timestamping", 1589 [SJA1105_AGEING_TIME] = "Ageing time", 1590 [SJA1105_SCHEDULING] = "Time-aware scheduling", 1591 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", 1592 [SJA1105_VIRTUAL_LINKS] = "Virtual links", 1593 }; 1594 1595 /* For situations where we need to change a setting at runtime that is only 1596 * available through the static configuration, resetting the switch in order 1597 * to upload the new static config is unavoidable. Back up the settings we 1598 * modify at runtime (currently only MAC) and restore them after uploading, 1599 * such that this operation is relatively seamless. 1600 */ 1601 int sja1105_static_config_reload(struct sja1105_private *priv, 1602 enum sja1105_reset_reason reason) 1603 { 1604 struct ptp_system_timestamp ptp_sts_before; 1605 struct ptp_system_timestamp ptp_sts_after; 1606 struct sja1105_mac_config_entry *mac; 1607 int speed_mbps[SJA1105_NUM_PORTS]; 1608 struct dsa_switch *ds = priv->ds; 1609 s64 t1, t2, t3, t4; 1610 s64 t12, t34; 1611 u16 bmcr = 0; 1612 int rc, i; 1613 s64 now; 1614 1615 mutex_lock(&priv->mgmt_lock); 1616 1617 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1618 1619 /* Back up the dynamic link speed changed by sja1105_adjust_port_config 1620 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the 1621 * switch wants to see in the static config in order to allow us to 1622 * change it through the dynamic interface later. 1623 */ 1624 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1625 speed_mbps[i] = sja1105_speed[mac[i].speed]; 1626 mac[i].speed = SJA1105_SPEED_AUTO; 1627 } 1628 1629 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) 1630 bmcr = sja1105_sgmii_read(priv, MII_BMCR); 1631 1632 /* No PTP operations can run right now */ 1633 mutex_lock(&priv->ptp_data.lock); 1634 1635 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); 1636 if (rc < 0) 1637 goto out_unlock_ptp; 1638 1639 /* Reset switch and send updated static configuration */ 1640 rc = sja1105_static_config_upload(priv); 1641 if (rc < 0) 1642 goto out_unlock_ptp; 1643 1644 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); 1645 if (rc < 0) 1646 goto out_unlock_ptp; 1647 1648 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); 1649 t2 = timespec64_to_ns(&ptp_sts_before.post_ts); 1650 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); 1651 t4 = timespec64_to_ns(&ptp_sts_after.post_ts); 1652 /* Mid point, corresponds to pre-reset PTPCLKVAL */ 1653 t12 = t1 + (t2 - t1) / 2; 1654 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ 1655 t34 = t3 + (t4 - t3) / 2; 1656 /* Advance PTPCLKVAL by the time it took since its readout */ 1657 now += (t34 - t12); 1658 1659 __sja1105_ptp_adjtime(ds, now); 1660 1661 out_unlock_ptp: 1662 mutex_unlock(&priv->ptp_data.lock); 1663 1664 dev_info(priv->ds->dev, 1665 "Reset switch and programmed static config. Reason: %s\n", 1666 sja1105_reset_reasons[reason]); 1667 1668 /* Configure the CGU (PLLs) for MII and RMII PHYs. 1669 * For these interfaces there is no dynamic configuration 1670 * needed, since PLLs have same settings at all speeds. 1671 */ 1672 rc = sja1105_clocking_setup(priv); 1673 if (rc < 0) 1674 goto out; 1675 1676 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1677 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); 1678 if (rc < 0) 1679 goto out; 1680 } 1681 1682 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) { 1683 bool an_enabled = !!(bmcr & BMCR_ANENABLE); 1684 1685 sja1105_sgmii_pcs_config(priv, an_enabled, false); 1686 1687 if (!an_enabled) { 1688 int speed = SPEED_UNKNOWN; 1689 1690 if (bmcr & BMCR_SPEED1000) 1691 speed = SPEED_1000; 1692 else if (bmcr & BMCR_SPEED100) 1693 speed = SPEED_100; 1694 else if (bmcr & BMCR_SPEED10) 1695 speed = SPEED_10; 1696 1697 sja1105_sgmii_pcs_force_speed(priv, speed); 1698 } 1699 } 1700 out: 1701 mutex_unlock(&priv->mgmt_lock); 1702 1703 return rc; 1704 } 1705 1706 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) 1707 { 1708 struct sja1105_mac_config_entry *mac; 1709 1710 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 1711 1712 mac[port].vlanid = pvid; 1713 1714 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, 1715 &mac[port], true); 1716 } 1717 1718 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) 1719 { 1720 struct sja1105_vlan_lookup_entry *vlan; 1721 int count, i; 1722 1723 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; 1724 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; 1725 1726 for (i = 0; i < count; i++) 1727 if (vlan[i].vlanid == vid) 1728 return i; 1729 1730 /* Return an invalid entry index if not found */ 1731 return -1; 1732 } 1733 1734 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, 1735 bool enabled, bool untagged) 1736 { 1737 struct sja1105_vlan_lookup_entry *vlan; 1738 struct sja1105_table *table; 1739 bool keep = true; 1740 int match, rc; 1741 1742 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; 1743 1744 match = sja1105_is_vlan_configured(priv, vid); 1745 if (match < 0) { 1746 /* Can't delete a missing entry. */ 1747 if (!enabled) 1748 return 0; 1749 rc = sja1105_table_resize(table, table->entry_count + 1); 1750 if (rc) 1751 return rc; 1752 match = table->entry_count - 1; 1753 } 1754 /* Assign pointer after the resize (it's new memory) */ 1755 vlan = table->entries; 1756 vlan[match].vlanid = vid; 1757 if (enabled) { 1758 vlan[match].vlan_bc |= BIT(port); 1759 vlan[match].vmemb_port |= BIT(port); 1760 } else { 1761 vlan[match].vlan_bc &= ~BIT(port); 1762 vlan[match].vmemb_port &= ~BIT(port); 1763 } 1764 /* Also unset tag_port if removing this VLAN was requested, 1765 * just so we don't have a confusing bitmap (no practical purpose). 1766 */ 1767 if (untagged || !enabled) 1768 vlan[match].tag_port &= ~BIT(port); 1769 else 1770 vlan[match].tag_port |= BIT(port); 1771 /* If there's no port left as member of this VLAN, 1772 * it's time for it to go. 1773 */ 1774 if (!vlan[match].vmemb_port) 1775 keep = false; 1776 1777 dev_dbg(priv->ds->dev, 1778 "%s: port %d, vid %llu, broadcast domain 0x%llx, " 1779 "port members 0x%llx, tagged ports 0x%llx, keep %d\n", 1780 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, 1781 vlan[match].vmemb_port, vlan[match].tag_port, keep); 1782 1783 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, 1784 &vlan[match], keep); 1785 if (rc < 0) 1786 return rc; 1787 1788 if (!keep) 1789 return sja1105_table_delete_entry(table, match); 1790 1791 return 0; 1792 } 1793 1794 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) 1795 { 1796 int rc, i; 1797 1798 for (i = 0; i < SJA1105_NUM_PORTS; i++) { 1799 rc = dsa_port_setup_8021q_tagging(ds, i, enabled); 1800 if (rc < 0) { 1801 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", 1802 i, rc); 1803 return rc; 1804 } 1805 } 1806 dev_info(ds->dev, "%s switch tagging\n", 1807 enabled ? "Enabled" : "Disabled"); 1808 return 0; 1809 } 1810 1811 static enum dsa_tag_protocol 1812 sja1105_get_tag_protocol(struct dsa_switch *ds, int port, 1813 enum dsa_tag_protocol mp) 1814 { 1815 return DSA_TAG_PROTO_SJA1105; 1816 } 1817 1818 /* This callback needs to be present */ 1819 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, 1820 const struct switchdev_obj_port_vlan *vlan) 1821 { 1822 return 0; 1823 } 1824 1825 /* The TPID setting belongs to the General Parameters table, 1826 * which can only be partially reconfigured at runtime (and not the TPID). 1827 * So a switch reset is required. 1828 */ 1829 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) 1830 { 1831 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 1832 struct sja1105_general_params_entry *general_params; 1833 struct sja1105_private *priv = ds->priv; 1834 struct sja1105_table *table; 1835 struct sja1105_rule *rule; 1836 u16 tpid, tpid2; 1837 int rc; 1838 1839 list_for_each_entry(rule, &priv->flow_block.rules, list) { 1840 if (rule->type == SJA1105_RULE_VL) { 1841 dev_err(ds->dev, 1842 "Cannot change VLAN filtering state while VL rules are active\n"); 1843 return -EBUSY; 1844 } 1845 } 1846 1847 if (enabled) { 1848 /* Enable VLAN filtering. */ 1849 tpid = ETH_P_8021Q; 1850 tpid2 = ETH_P_8021AD; 1851 } else { 1852 /* Disable VLAN filtering. */ 1853 tpid = ETH_P_SJA1105; 1854 tpid2 = ETH_P_SJA1105; 1855 } 1856 1857 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1858 general_params = table->entries; 1859 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1860 general_params->tpid = tpid; 1861 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1862 general_params->tpid2 = tpid2; 1863 /* When VLAN filtering is on, we need to at least be able to 1864 * decode management traffic through the "backup plan". 1865 */ 1866 general_params->incl_srcpt1 = enabled; 1867 general_params->incl_srcpt0 = enabled; 1868 1869 /* VLAN filtering => independent VLAN learning. 1870 * No VLAN filtering => shared VLAN learning. 1871 * 1872 * In shared VLAN learning mode, untagged traffic still gets 1873 * pvid-tagged, and the FDB table gets populated with entries 1874 * containing the "real" (pvid or from VLAN tag) VLAN ID. 1875 * However the switch performs a masked L2 lookup in the FDB, 1876 * effectively only looking up a frame's DMAC (and not VID) for the 1877 * forwarding decision. 1878 * 1879 * This is extremely convenient for us, because in modes with 1880 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into 1881 * each front panel port. This is good for identification but breaks 1882 * learning badly - the VID of the learnt FDB entry is unique, aka 1883 * no frames coming from any other port are going to have it. So 1884 * for forwarding purposes, this is as though learning was broken 1885 * (all frames get flooded). 1886 */ 1887 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 1888 l2_lookup_params = table->entries; 1889 l2_lookup_params->shared_learn = !enabled; 1890 1891 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); 1892 if (rc) 1893 dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); 1894 1895 /* Switch port identification based on 802.1Q is only passable 1896 * if we are not under a vlan_filtering bridge. So make sure 1897 * the two configurations are mutually exclusive. 1898 */ 1899 return sja1105_setup_8021q_tagging(ds, !enabled); 1900 } 1901 1902 static void sja1105_vlan_add(struct dsa_switch *ds, int port, 1903 const struct switchdev_obj_port_vlan *vlan) 1904 { 1905 struct sja1105_private *priv = ds->priv; 1906 u16 vid; 1907 int rc; 1908 1909 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1910 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & 1911 BRIDGE_VLAN_INFO_UNTAGGED); 1912 if (rc < 0) { 1913 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 1914 vid, port, rc); 1915 return; 1916 } 1917 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1918 rc = sja1105_pvid_apply(ds->priv, port, vid); 1919 if (rc < 0) { 1920 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", 1921 vid, port, rc); 1922 return; 1923 } 1924 } 1925 } 1926 } 1927 1928 static int sja1105_vlan_del(struct dsa_switch *ds, int port, 1929 const struct switchdev_obj_port_vlan *vlan) 1930 { 1931 struct sja1105_private *priv = ds->priv; 1932 u16 vid; 1933 int rc; 1934 1935 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1936 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & 1937 BRIDGE_VLAN_INFO_UNTAGGED); 1938 if (rc < 0) { 1939 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", 1940 vid, port, rc); 1941 return rc; 1942 } 1943 } 1944 return 0; 1945 } 1946 1947 /* The programming model for the SJA1105 switch is "all-at-once" via static 1948 * configuration tables. Some of these can be dynamically modified at runtime, 1949 * but not the xMII mode parameters table. 1950 * Furthermode, some PHYs may not have crystals for generating their clocks 1951 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's 1952 * ref_clk pin. So port clocking needs to be initialized early, before 1953 * connecting to PHYs is attempted, otherwise they won't respond through MDIO. 1954 * Setting correct PHY link speed does not matter now. 1955 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY 1956 * bindings are not yet parsed by DSA core. We need to parse early so that we 1957 * can populate the xMII mode parameters table. 1958 */ 1959 static int sja1105_setup(struct dsa_switch *ds) 1960 { 1961 struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; 1962 struct sja1105_private *priv = ds->priv; 1963 int rc; 1964 1965 rc = sja1105_parse_dt(priv, ports); 1966 if (rc < 0) { 1967 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); 1968 return rc; 1969 } 1970 1971 /* Error out early if internal delays are required through DT 1972 * and we can't apply them. 1973 */ 1974 rc = sja1105_parse_rgmii_delays(priv, ports); 1975 if (rc < 0) { 1976 dev_err(ds->dev, "RGMII delay not supported\n"); 1977 return rc; 1978 } 1979 1980 rc = sja1105_ptp_clock_register(ds); 1981 if (rc < 0) { 1982 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); 1983 return rc; 1984 } 1985 /* Create and send configuration down to device */ 1986 rc = sja1105_static_config_load(priv, ports); 1987 if (rc < 0) { 1988 dev_err(ds->dev, "Failed to load static config: %d\n", rc); 1989 return rc; 1990 } 1991 /* Configure the CGU (PHY link modes and speeds) */ 1992 rc = sja1105_clocking_setup(priv); 1993 if (rc < 0) { 1994 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); 1995 return rc; 1996 } 1997 /* On SJA1105, VLAN filtering per se is always enabled in hardware. 1998 * The only thing we can do to disable it is lie about what the 802.1Q 1999 * EtherType is. 2000 * So it will still try to apply VLAN filtering, but all ingress 2001 * traffic (except frames received with EtherType of ETH_P_SJA1105) 2002 * will be internally tagged with a distorted VLAN header where the 2003 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. 2004 */ 2005 ds->vlan_filtering_is_global = true; 2006 2007 /* Advertise the 8 egress queues */ 2008 ds->num_tx_queues = SJA1105_NUM_TC; 2009 2010 ds->mtu_enforcement_ingress = true; 2011 2012 /* The DSA/switchdev model brings up switch ports in standalone mode by 2013 * default, and that means vlan_filtering is 0 since they're not under 2014 * a bridge, so it's safe to set up switch tagging at this time. 2015 */ 2016 return sja1105_setup_8021q_tagging(ds, true); 2017 } 2018 2019 static void sja1105_teardown(struct dsa_switch *ds) 2020 { 2021 struct sja1105_private *priv = ds->priv; 2022 int port; 2023 2024 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2025 struct sja1105_port *sp = &priv->ports[port]; 2026 2027 if (!dsa_is_user_port(ds, port)) 2028 continue; 2029 2030 if (sp->xmit_worker) 2031 kthread_destroy_worker(sp->xmit_worker); 2032 } 2033 2034 sja1105_flower_teardown(ds); 2035 sja1105_tas_teardown(ds); 2036 sja1105_ptp_clock_unregister(ds); 2037 sja1105_static_config_free(&priv->static_config); 2038 } 2039 2040 static int sja1105_port_enable(struct dsa_switch *ds, int port, 2041 struct phy_device *phy) 2042 { 2043 struct net_device *slave; 2044 2045 if (!dsa_is_user_port(ds, port)) 2046 return 0; 2047 2048 slave = dsa_to_port(ds, port)->slave; 2049 2050 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2051 2052 return 0; 2053 } 2054 2055 static void sja1105_port_disable(struct dsa_switch *ds, int port) 2056 { 2057 struct sja1105_private *priv = ds->priv; 2058 struct sja1105_port *sp = &priv->ports[port]; 2059 2060 if (!dsa_is_user_port(ds, port)) 2061 return; 2062 2063 kthread_cancel_work_sync(&sp->xmit_work); 2064 skb_queue_purge(&sp->xmit_queue); 2065 } 2066 2067 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, 2068 struct sk_buff *skb, bool takets) 2069 { 2070 struct sja1105_mgmt_entry mgmt_route = {0}; 2071 struct sja1105_private *priv = ds->priv; 2072 struct ethhdr *hdr; 2073 int timeout = 10; 2074 int rc; 2075 2076 hdr = eth_hdr(skb); 2077 2078 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); 2079 mgmt_route.destports = BIT(port); 2080 mgmt_route.enfport = 1; 2081 mgmt_route.tsreg = 0; 2082 mgmt_route.takets = takets; 2083 2084 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2085 slot, &mgmt_route, true); 2086 if (rc < 0) { 2087 kfree_skb(skb); 2088 return rc; 2089 } 2090 2091 /* Transfer skb to the host port. */ 2092 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); 2093 2094 /* Wait until the switch has processed the frame */ 2095 do { 2096 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, 2097 slot, &mgmt_route); 2098 if (rc < 0) { 2099 dev_err_ratelimited(priv->ds->dev, 2100 "failed to poll for mgmt route\n"); 2101 continue; 2102 } 2103 2104 /* UM10944: The ENFPORT flag of the respective entry is 2105 * cleared when a match is found. The host can use this 2106 * flag as an acknowledgment. 2107 */ 2108 cpu_relax(); 2109 } while (mgmt_route.enfport && --timeout); 2110 2111 if (!timeout) { 2112 /* Clean up the management route so that a follow-up 2113 * frame may not match on it by mistake. 2114 * This is only hardware supported on P/Q/R/S - on E/T it is 2115 * a no-op and we are silently discarding the -EOPNOTSUPP. 2116 */ 2117 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, 2118 slot, &mgmt_route, false); 2119 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); 2120 } 2121 2122 return NETDEV_TX_OK; 2123 } 2124 2125 #define work_to_port(work) \ 2126 container_of((work), struct sja1105_port, xmit_work) 2127 #define tagger_to_sja1105(t) \ 2128 container_of((t), struct sja1105_private, tagger_data) 2129 2130 /* Deferred work is unfortunately necessary because setting up the management 2131 * route cannot be done from atomit context (SPI transfer takes a sleepable 2132 * lock on the bus) 2133 */ 2134 static void sja1105_port_deferred_xmit(struct kthread_work *work) 2135 { 2136 struct sja1105_port *sp = work_to_port(work); 2137 struct sja1105_tagger_data *tagger_data = sp->data; 2138 struct sja1105_private *priv = tagger_to_sja1105(tagger_data); 2139 int port = sp - priv->ports; 2140 struct sk_buff *skb; 2141 2142 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { 2143 struct sk_buff *clone = DSA_SKB_CB(skb)->clone; 2144 2145 mutex_lock(&priv->mgmt_lock); 2146 2147 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); 2148 2149 /* The clone, if there, was made by dsa_skb_tx_timestamp */ 2150 if (clone) 2151 sja1105_ptp_txtstamp_skb(priv->ds, port, clone); 2152 2153 mutex_unlock(&priv->mgmt_lock); 2154 } 2155 } 2156 2157 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, 2158 * which cannot be reconfigured at runtime. So a switch reset is required. 2159 */ 2160 static int sja1105_set_ageing_time(struct dsa_switch *ds, 2161 unsigned int ageing_time) 2162 { 2163 struct sja1105_l2_lookup_params_entry *l2_lookup_params; 2164 struct sja1105_private *priv = ds->priv; 2165 struct sja1105_table *table; 2166 unsigned int maxage; 2167 2168 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; 2169 l2_lookup_params = table->entries; 2170 2171 maxage = SJA1105_AGEING_TIME_MS(ageing_time); 2172 2173 if (l2_lookup_params->maxage == maxage) 2174 return 0; 2175 2176 l2_lookup_params->maxage = maxage; 2177 2178 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); 2179 } 2180 2181 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 2182 { 2183 struct sja1105_l2_policing_entry *policing; 2184 struct sja1105_private *priv = ds->priv; 2185 2186 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; 2187 2188 if (dsa_is_cpu_port(ds, port)) 2189 new_mtu += VLAN_HLEN; 2190 2191 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2192 2193 if (policing[port].maxlen == new_mtu) 2194 return 0; 2195 2196 policing[port].maxlen = new_mtu; 2197 2198 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2199 } 2200 2201 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port) 2202 { 2203 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; 2204 } 2205 2206 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, 2207 enum tc_setup_type type, 2208 void *type_data) 2209 { 2210 switch (type) { 2211 case TC_SETUP_QDISC_TAPRIO: 2212 return sja1105_setup_tc_taprio(ds, port, type_data); 2213 default: 2214 return -EOPNOTSUPP; 2215 } 2216 } 2217 2218 /* We have a single mirror (@to) port, but can configure ingress and egress 2219 * mirroring on all other (@from) ports. 2220 * We need to allow mirroring rules only as long as the @to port is always the 2221 * same, and we need to unset the @to port from mirr_port only when there is no 2222 * mirroring rule that references it. 2223 */ 2224 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, 2225 bool ingress, bool enabled) 2226 { 2227 struct sja1105_general_params_entry *general_params; 2228 struct sja1105_mac_config_entry *mac; 2229 struct sja1105_table *table; 2230 bool already_enabled; 2231 u64 new_mirr_port; 2232 int rc; 2233 2234 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 2235 general_params = table->entries; 2236 2237 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 2238 2239 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS); 2240 if (already_enabled && enabled && general_params->mirr_port != to) { 2241 dev_err(priv->ds->dev, 2242 "Delete mirroring rules towards port %llu first\n", 2243 general_params->mirr_port); 2244 return -EBUSY; 2245 } 2246 2247 new_mirr_port = to; 2248 if (!enabled) { 2249 bool keep = false; 2250 int port; 2251 2252 /* Anybody still referencing mirr_port? */ 2253 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2254 if (mac[port].ing_mirr || mac[port].egr_mirr) { 2255 keep = true; 2256 break; 2257 } 2258 } 2259 /* Unset already_enabled for next time */ 2260 if (!keep) 2261 new_mirr_port = SJA1105_NUM_PORTS; 2262 } 2263 if (new_mirr_port != general_params->mirr_port) { 2264 general_params->mirr_port = new_mirr_port; 2265 2266 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, 2267 0, general_params, true); 2268 if (rc < 0) 2269 return rc; 2270 } 2271 2272 if (ingress) 2273 mac[from].ing_mirr = enabled; 2274 else 2275 mac[from].egr_mirr = enabled; 2276 2277 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, 2278 &mac[from], true); 2279 } 2280 2281 static int sja1105_mirror_add(struct dsa_switch *ds, int port, 2282 struct dsa_mall_mirror_tc_entry *mirror, 2283 bool ingress) 2284 { 2285 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2286 ingress, true); 2287 } 2288 2289 static void sja1105_mirror_del(struct dsa_switch *ds, int port, 2290 struct dsa_mall_mirror_tc_entry *mirror) 2291 { 2292 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, 2293 mirror->ingress, false); 2294 } 2295 2296 static int sja1105_port_policer_add(struct dsa_switch *ds, int port, 2297 struct dsa_mall_policer_tc_entry *policer) 2298 { 2299 struct sja1105_l2_policing_entry *policing; 2300 struct sja1105_private *priv = ds->priv; 2301 2302 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2303 2304 /* In hardware, every 8 microseconds the credit level is incremented by 2305 * the value of RATE bytes divided by 64, up to a maximum of SMAX 2306 * bytes. 2307 */ 2308 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 2309 1000000); 2310 policing[port].smax = div_u64(policer->rate_bytes_per_sec * 2311 PSCHED_NS2TICKS(policer->burst), 2312 PSCHED_TICKS_PER_SEC); 2313 2314 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2315 } 2316 2317 static void sja1105_port_policer_del(struct dsa_switch *ds, int port) 2318 { 2319 struct sja1105_l2_policing_entry *policing; 2320 struct sja1105_private *priv = ds->priv; 2321 2322 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 2323 2324 policing[port].rate = SJA1105_RATE_MBPS(1000); 2325 policing[port].smax = 65535; 2326 2327 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 2328 } 2329 2330 static const struct dsa_switch_ops sja1105_switch_ops = { 2331 .get_tag_protocol = sja1105_get_tag_protocol, 2332 .setup = sja1105_setup, 2333 .teardown = sja1105_teardown, 2334 .set_ageing_time = sja1105_set_ageing_time, 2335 .port_change_mtu = sja1105_change_mtu, 2336 .port_max_mtu = sja1105_get_max_mtu, 2337 .phylink_validate = sja1105_phylink_validate, 2338 .phylink_mac_link_state = sja1105_mac_pcs_get_state, 2339 .phylink_mac_config = sja1105_mac_config, 2340 .phylink_mac_link_up = sja1105_mac_link_up, 2341 .phylink_mac_link_down = sja1105_mac_link_down, 2342 .get_strings = sja1105_get_strings, 2343 .get_ethtool_stats = sja1105_get_ethtool_stats, 2344 .get_sset_count = sja1105_get_sset_count, 2345 .get_ts_info = sja1105_get_ts_info, 2346 .port_enable = sja1105_port_enable, 2347 .port_disable = sja1105_port_disable, 2348 .port_fdb_dump = sja1105_fdb_dump, 2349 .port_fdb_add = sja1105_fdb_add, 2350 .port_fdb_del = sja1105_fdb_del, 2351 .port_bridge_join = sja1105_bridge_join, 2352 .port_bridge_leave = sja1105_bridge_leave, 2353 .port_stp_state_set = sja1105_bridge_stp_state_set, 2354 .port_vlan_prepare = sja1105_vlan_prepare, 2355 .port_vlan_filtering = sja1105_vlan_filtering, 2356 .port_vlan_add = sja1105_vlan_add, 2357 .port_vlan_del = sja1105_vlan_del, 2358 .port_mdb_prepare = sja1105_mdb_prepare, 2359 .port_mdb_add = sja1105_mdb_add, 2360 .port_mdb_del = sja1105_mdb_del, 2361 .port_hwtstamp_get = sja1105_hwtstamp_get, 2362 .port_hwtstamp_set = sja1105_hwtstamp_set, 2363 .port_rxtstamp = sja1105_port_rxtstamp, 2364 .port_txtstamp = sja1105_port_txtstamp, 2365 .port_setup_tc = sja1105_port_setup_tc, 2366 .port_mirror_add = sja1105_mirror_add, 2367 .port_mirror_del = sja1105_mirror_del, 2368 .port_policer_add = sja1105_port_policer_add, 2369 .port_policer_del = sja1105_port_policer_del, 2370 .cls_flower_add = sja1105_cls_flower_add, 2371 .cls_flower_del = sja1105_cls_flower_del, 2372 .cls_flower_stats = sja1105_cls_flower_stats, 2373 }; 2374 2375 static int sja1105_check_device_id(struct sja1105_private *priv) 2376 { 2377 const struct sja1105_regs *regs = priv->info->regs; 2378 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; 2379 struct device *dev = &priv->spidev->dev; 2380 u32 device_id; 2381 u64 part_no; 2382 int rc; 2383 2384 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, 2385 NULL); 2386 if (rc < 0) 2387 return rc; 2388 2389 if (device_id != priv->info->device_id) { 2390 dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n", 2391 priv->info->device_id, device_id); 2392 return -ENODEV; 2393 } 2394 2395 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, 2396 SJA1105_SIZE_DEVICE_ID); 2397 if (rc < 0) 2398 return rc; 2399 2400 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); 2401 2402 if (part_no != priv->info->part_no) { 2403 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", 2404 priv->info->part_no, part_no); 2405 return -ENODEV; 2406 } 2407 2408 return 0; 2409 } 2410 2411 static int sja1105_probe(struct spi_device *spi) 2412 { 2413 struct sja1105_tagger_data *tagger_data; 2414 struct device *dev = &spi->dev; 2415 struct sja1105_private *priv; 2416 struct dsa_switch *ds; 2417 int rc, port; 2418 2419 if (!dev->of_node) { 2420 dev_err(dev, "No DTS bindings for SJA1105 driver\n"); 2421 return -EINVAL; 2422 } 2423 2424 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); 2425 if (!priv) 2426 return -ENOMEM; 2427 2428 /* Configure the optional reset pin and bring up switch */ 2429 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 2430 if (IS_ERR(priv->reset_gpio)) 2431 dev_dbg(dev, "reset-gpios not defined, ignoring\n"); 2432 else 2433 sja1105_hw_reset(priv->reset_gpio, 1, 1); 2434 2435 /* Populate our driver private structure (priv) based on 2436 * the device tree node that was probed (spi) 2437 */ 2438 priv->spidev = spi; 2439 spi_set_drvdata(spi, priv); 2440 2441 /* Configure the SPI bus */ 2442 spi->bits_per_word = 8; 2443 rc = spi_setup(spi); 2444 if (rc < 0) { 2445 dev_err(dev, "Could not init SPI\n"); 2446 return rc; 2447 } 2448 2449 priv->info = of_device_get_match_data(dev); 2450 2451 /* Detect hardware device */ 2452 rc = sja1105_check_device_id(priv); 2453 if (rc < 0) { 2454 dev_err(dev, "Device ID check failed: %d\n", rc); 2455 return rc; 2456 } 2457 2458 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); 2459 2460 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); 2461 if (!ds) 2462 return -ENOMEM; 2463 2464 ds->dev = dev; 2465 ds->num_ports = SJA1105_NUM_PORTS; 2466 ds->ops = &sja1105_switch_ops; 2467 ds->priv = priv; 2468 priv->ds = ds; 2469 2470 tagger_data = &priv->tagger_data; 2471 2472 mutex_init(&priv->ptp_data.lock); 2473 mutex_init(&priv->mgmt_lock); 2474 2475 sja1105_tas_setup(ds); 2476 sja1105_flower_setup(ds); 2477 2478 rc = dsa_register_switch(priv->ds); 2479 if (rc) 2480 return rc; 2481 2482 /* Connections between dsa_port and sja1105_port */ 2483 for (port = 0; port < SJA1105_NUM_PORTS; port++) { 2484 struct sja1105_port *sp = &priv->ports[port]; 2485 struct dsa_port *dp = dsa_to_port(ds, port); 2486 struct net_device *slave; 2487 2488 if (!dsa_is_user_port(ds, port)) 2489 continue; 2490 2491 dp->priv = sp; 2492 sp->dp = dp; 2493 sp->data = tagger_data; 2494 slave = dp->slave; 2495 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); 2496 sp->xmit_worker = kthread_create_worker(0, "%s_xmit", 2497 slave->name); 2498 if (IS_ERR(sp->xmit_worker)) { 2499 rc = PTR_ERR(sp->xmit_worker); 2500 dev_err(ds->dev, 2501 "failed to create deferred xmit thread: %d\n", 2502 rc); 2503 goto out; 2504 } 2505 skb_queue_head_init(&sp->xmit_queue); 2506 } 2507 2508 return 0; 2509 out: 2510 while (port-- > 0) { 2511 struct sja1105_port *sp = &priv->ports[port]; 2512 2513 if (!dsa_is_user_port(ds, port)) 2514 continue; 2515 2516 kthread_destroy_worker(sp->xmit_worker); 2517 } 2518 return rc; 2519 } 2520 2521 static int sja1105_remove(struct spi_device *spi) 2522 { 2523 struct sja1105_private *priv = spi_get_drvdata(spi); 2524 2525 dsa_unregister_switch(priv->ds); 2526 return 0; 2527 } 2528 2529 static const struct of_device_id sja1105_dt_ids[] = { 2530 { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, 2531 { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, 2532 { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, 2533 { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, 2534 { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, 2535 { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, 2536 { /* sentinel */ }, 2537 }; 2538 MODULE_DEVICE_TABLE(of, sja1105_dt_ids); 2539 2540 static struct spi_driver sja1105_driver = { 2541 .driver = { 2542 .name = "sja1105", 2543 .owner = THIS_MODULE, 2544 .of_match_table = of_match_ptr(sja1105_dt_ids), 2545 }, 2546 .probe = sja1105_probe, 2547 .remove = sja1105_remove, 2548 }; 2549 2550 module_spi_driver(sja1105_driver); 2551 2552 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); 2553 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); 2554 MODULE_DESCRIPTION("SJA1105 Driver"); 2555 MODULE_LICENSE("GPL v2"); 2556