Lines Matching +full:tx +full:- +full:termination +full:- +full:fix

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
18 #include <linux/pcs/pcs-xpcs.h>
77 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_is_vlan_configured()
78 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; in sja1105_is_vlan_configured()
85 return -1; in sja1105_is_vlan_configured()
90 struct sja1105_private *priv = ds->priv; in sja1105_drop_untagged()
93 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_drop_untagged()
108 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_pvid_apply()
123 struct sja1105_private *priv = ds->priv; in sja1105_commit_pvid()
130 pvid = priv->bridge_pvid[port]; in sja1105_commit_pvid()
132 pvid = priv->tag_8021q_pvid[port]; in sja1105_commit_pvid()
139 * VLAN-aware bridge. When the tag_8021q pvid is used, we are in sja1105_commit_pvid()
145 if (pvid == priv->bridge_pvid[port]) { in sja1105_commit_pvid()
146 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_commit_pvid()
164 * Every queue i holds top[i] - base[i] frames. in sja1105_init_mac_settings()
165 * Sum of top[i] - base[i] is 511 (max hardware limit). in sja1105_init_mac_settings()
175 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO], in sja1105_init_mac_settings()
176 /* No static correction for 1-step 1588 events */ in sja1105_init_mac_settings()
188 /* Don't drop double-tagged traffic */ in sja1105_init_mac_settings()
194 /* Disable learning and I/O on user ports by default - in sja1105_init_mac_settings()
202 struct dsa_switch *ds = priv->ds; in sja1105_init_mac_settings()
206 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; in sja1105_init_mac_settings()
209 if (table->entry_count) { in sja1105_init_mac_settings()
210 kfree(table->entries); in sja1105_init_mac_settings()
211 table->entry_count = 0; in sja1105_init_mac_settings()
214 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mac_settings()
215 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mac_settings()
216 if (!table->entries) in sja1105_init_mac_settings()
217 return -ENOMEM; in sja1105_init_mac_settings()
219 table->entry_count = table->ops->max_entry_count; in sja1105_init_mac_settings()
221 mac = table->entries; in sja1105_init_mac_settings()
223 list_for_each_entry(dp, &ds->dst->ports, list) { in sja1105_init_mac_settings()
224 if (dp->ds != ds) in sja1105_init_mac_settings()
227 mac[dp->index] = default_mac; in sja1105_init_mac_settings()
230 * enabled for the DSA ports. CPU ports use software-assisted in sja1105_init_mac_settings()
233 * CPU ports in a cross-chip topology if multiple CPU ports in sja1105_init_mac_settings()
237 dp->learning = true; in sja1105_init_mac_settings()
243 mac[dp->index].drpuntag = true; in sja1105_init_mac_settings()
251 struct device *dev = &priv->spidev->dev; in sja1105_init_mii_settings()
253 struct dsa_switch *ds = priv->ds; in sja1105_init_mii_settings()
257 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; in sja1105_init_mii_settings()
260 if (table->entry_count) { in sja1105_init_mii_settings()
261 kfree(table->entries); in sja1105_init_mii_settings()
262 table->entry_count = 0; in sja1105_init_mii_settings()
265 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mii_settings()
266 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mii_settings()
267 if (!table->entries) in sja1105_init_mii_settings()
268 return -ENOMEM; in sja1105_init_mii_settings()
271 table->entry_count = table->ops->max_entry_count; in sja1105_init_mii_settings()
273 mii = table->entries; in sja1105_init_mii_settings()
275 for (i = 0; i < ds->num_ports; i++) { in sja1105_init_mii_settings()
278 if (dsa_is_unused_port(priv->ds, i)) in sja1105_init_mii_settings()
281 switch (priv->phy_mode[i]) { in sja1105_init_mii_settings()
283 if (priv->info->internal_phy[i] == SJA1105_NO_PHY) in sja1105_init_mii_settings()
286 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
287 if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX) in sja1105_init_mii_settings()
288 mii->special[i] = true; in sja1105_init_mii_settings()
295 if (!priv->info->supports_mii[i]) in sja1105_init_mii_settings()
298 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
304 if (!priv->info->supports_rmii[i]) in sja1105_init_mii_settings()
307 mii->xmii_mode[i] = XMII_MODE_RMII; in sja1105_init_mii_settings()
313 if (!priv->info->supports_rgmii[i]) in sja1105_init_mii_settings()
316 mii->xmii_mode[i] = XMII_MODE_RGMII; in sja1105_init_mii_settings()
319 if (!priv->info->supports_sgmii[i]) in sja1105_init_mii_settings()
322 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
323 mii->special[i] = true; in sja1105_init_mii_settings()
326 if (!priv->info->supports_2500basex[i]) in sja1105_init_mii_settings()
329 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
330 mii->special[i] = true; in sja1105_init_mii_settings()
335 phy_modes(priv->phy_mode[i]), i); in sja1105_init_mii_settings()
336 return -EINVAL; in sja1105_init_mii_settings()
339 mii->phy_mac[i] = role; in sja1105_init_mii_settings()
350 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_init_static_fdb()
353 * entries, except for a special entry at the end which is a catch-all in sja1105_init_static_fdb()
356 if (table->entry_count) { in sja1105_init_static_fdb()
357 kfree(table->entries); in sja1105_init_static_fdb()
358 table->entry_count = 0; in sja1105_init_static_fdb()
361 if (!priv->info->can_limit_mcast_flood) in sja1105_init_static_fdb()
364 table->entries = kcalloc(1, table->ops->unpacked_entry_size, in sja1105_init_static_fdb()
366 if (!table->entries) in sja1105_init_static_fdb()
367 return -ENOMEM; in sja1105_init_static_fdb()
369 table->entry_count = 1; in sja1105_init_static_fdb()
370 l2_lookup = table->entries; in sja1105_init_static_fdb()
376 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; in sja1105_init_static_fdb()
379 for (port = 0; port < priv->ds->num_ports; port++) in sja1105_init_static_fdb()
380 if (!dsa_is_unused_port(priv->ds, port)) in sja1105_init_static_fdb()
399 /* Don't discard management traffic based on ENFPORT - in sja1105_init_l2_lookup_params()
416 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_lookup_params()
421 for (port = 0; port < ds->num_ports; port++) in sja1105_init_l2_lookup_params()
427 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_lookup_params()
434 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_init_l2_lookup_params()
436 if (table->entry_count) { in sja1105_init_l2_lookup_params()
437 kfree(table->entries); in sja1105_init_l2_lookup_params()
438 table->entry_count = 0; in sja1105_init_l2_lookup_params()
441 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_lookup_params()
442 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_lookup_params()
443 if (!table->entries) in sja1105_init_l2_lookup_params()
444 return -ENOMEM; in sja1105_init_l2_lookup_params()
446 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_lookup_params()
449 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = in sja1105_init_l2_lookup_params()
457 * All DT-defined ports are members of this VLAN, and there are no
474 struct dsa_switch *ds = priv->ds; in sja1105_init_static_vlan()
477 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_init_static_vlan()
479 if (table->entry_count) { in sja1105_init_static_vlan()
480 kfree(table->entries); in sja1105_init_static_vlan()
481 table->entry_count = 0; in sja1105_init_static_vlan()
484 table->entries = kzalloc(table->ops->unpacked_entry_size, in sja1105_init_static_vlan()
486 if (!table->entries) in sja1105_init_static_vlan()
487 return -ENOMEM; in sja1105_init_static_vlan()
489 table->entry_count = 1; in sja1105_init_static_vlan()
491 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_static_vlan()
500 priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
501 priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
505 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; in sja1105_init_static_vlan()
512 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_forwarding()
519 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; in sja1105_init_l2_forwarding()
521 if (table->entry_count) { in sja1105_init_l2_forwarding()
522 kfree(table->entries); in sja1105_init_l2_forwarding()
523 table->entry_count = 0; in sja1105_init_l2_forwarding()
526 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding()
527 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding()
528 if (!table->entries) in sja1105_init_l2_forwarding()
529 return -ENOMEM; in sja1105_init_l2_forwarding()
531 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding()
533 l2fwd = table->entries; in sja1105_init_l2_forwarding()
539 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
548 * only to the always-on domain (CPU port and DSA links) in sja1105_init_l2_forwarding()
550 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
554 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
567 * always-on domain). These can send packets to any enabled port except in sja1105_init_l2_forwarding()
570 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
574 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
589 * another switch which also has its own CPU port), TX packets can loop in sja1105_init_l2_forwarding()
594 * stack termination. in sja1105_init_l2_forwarding()
596 dst = ds->dst; in sja1105_init_l2_forwarding()
598 list_for_each_entry(dl, &dst->rtable, list) { in sja1105_init_l2_forwarding()
599 if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) in sja1105_init_l2_forwarding()
602 from = dl->dp->index; in sja1105_init_l2_forwarding()
605 dev_warn(ds->dev, in sja1105_init_l2_forwarding()
606 "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n", in sja1105_init_l2_forwarding()
618 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
622 priv->ucast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
623 priv->bcast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
627 * Create a one-to-one mapping. in sja1105_init_l2_forwarding()
630 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
634 l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; in sja1105_init_l2_forwarding()
637 l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; in sja1105_init_l2_forwarding()
646 struct dsa_switch *ds = priv->ds; in sja1110_init_pcp_remapping()
650 table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING]; in sja1110_init_pcp_remapping()
653 if (!table->ops->max_entry_count) in sja1110_init_pcp_remapping()
656 if (table->entry_count) { in sja1110_init_pcp_remapping()
657 kfree(table->entries); in sja1110_init_pcp_remapping()
658 table->entry_count = 0; in sja1110_init_pcp_remapping()
661 table->entries = kcalloc(table->ops->max_entry_count, in sja1110_init_pcp_remapping()
662 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1110_init_pcp_remapping()
663 if (!table->entries) in sja1110_init_pcp_remapping()
664 return -ENOMEM; in sja1110_init_pcp_remapping()
666 table->entry_count = table->ops->max_entry_count; in sja1110_init_pcp_remapping()
668 pcp_remap = table->entries; in sja1110_init_pcp_remapping()
671 for (port = 0; port < ds->num_ports; port++) { in sja1110_init_pcp_remapping()
687 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_init_l2_forwarding_params()
689 if (table->entry_count) { in sja1105_init_l2_forwarding_params()
690 kfree(table->entries); in sja1105_init_l2_forwarding_params()
691 table->entry_count = 0; in sja1105_init_l2_forwarding_params()
694 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding_params()
695 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding_params()
696 if (!table->entries) in sja1105_init_l2_forwarding_params()
697 return -ENOMEM; in sja1105_init_l2_forwarding_params()
699 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding_params()
702 l2fwd_params = table->entries; in sja1105_init_l2_forwarding_params()
705 l2fwd_params->max_dynp = 0; in sja1105_init_l2_forwarding_params()
707 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; in sja1105_init_l2_forwarding_params()
718 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
719 l2_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
720 l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
722 /* If we have any critical-traffic virtual links, we need to reserve in sja1105_frame_memory_partitioning()
725 * remaining for best-effort traffic. TODO: figure out a more flexible in sja1105_frame_memory_partitioning()
728 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) in sja1105_frame_memory_partitioning()
731 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
732 vl_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
734 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
735 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
741 * -----+----------------+---------------+---------------+---------------
743 * 1 |0, [5:10], retag| [1:2] | [3:4] | -
744 * 2 | 0, [5:10] | [1:3], retag | 4 | -
745 * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
746 * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
747 * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
748 * 14 | 0, [5:10] | [1:4], retag | - | -
749 * 15 | [5:10] | [0:4], retag | - | -
760 if (priv->info->device_id != SJA1110_DEVICE_ID) in sja1110_select_tdmaconfigidx()
763 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1110_select_tdmaconfigidx()
764 general_params = table->entries; in sja1110_select_tdmaconfigidx()
769 port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL; in sja1110_select_tdmaconfigidx()
770 port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
771 port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
789 general_params->tdmaconfigidx = tdmaconfigidx; in sja1110_select_tdmaconfigidx()
795 struct dsa_switch *ds = priv->ds; in sja1105_init_topology()
802 general_params->host_port = ds->num_ports; in sja1105_init_topology()
804 /* Link-local traffic received on casc_port will be forwarded in sja1105_init_topology()
814 if (!priv->info->multiple_cascade_ports) in sja1105_init_topology()
815 general_params->casc_port = ds->num_ports; in sja1105_init_topology()
817 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_topology()
822 * upstream-facing DSA links in sja1105_init_topology()
825 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
826 general_params->host_port = port; in sja1105_init_topology()
828 dev_err(ds->dev, in sja1105_init_topology()
830 general_params->host_port, port); in sja1105_init_topology()
831 return -EINVAL; in sja1105_init_topology()
835 /* Cascade ports are downstream-facing DSA links */ in sja1105_init_topology()
837 if (priv->info->multiple_cascade_ports) { in sja1105_init_topology()
838 general_params->casc_port |= BIT(port); in sja1105_init_topology()
839 } else if (general_params->casc_port == ds->num_ports) { in sja1105_init_topology()
840 general_params->casc_port = port; in sja1105_init_topology()
842 dev_err(ds->dev, in sja1105_init_topology()
844 general_params->casc_port, port); in sja1105_init_topology()
845 return -EINVAL; in sja1105_init_topology()
850 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
851 dev_err(ds->dev, "No host port configured\n"); in sja1105_init_topology()
852 return -EINVAL; in sja1105_init_topology()
863 .switchid = priv->ds->index, in sja1105_init_general_params()
864 /* Priority queue for link-local management frames in sja1105_init_general_params()
865 * (both ingress to and egress from CPU - PTP, STP etc) in sja1105_init_general_params()
877 .mirr_port = priv->ds->num_ports, in sja1105_init_general_params()
882 /* Only update correctionField for 1-step PTP (L2 transport) */ in sja1105_init_general_params()
902 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_init_general_params()
904 if (table->entry_count) { in sja1105_init_general_params()
905 kfree(table->entries); in sja1105_init_general_params()
906 table->entry_count = 0; in sja1105_init_general_params()
909 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_general_params()
910 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_general_params()
911 if (!table->entries) in sja1105_init_general_params()
912 return -ENOMEM; in sja1105_init_general_params()
914 table->entry_count = table->ops->max_entry_count; in sja1105_init_general_params()
916 general_params = table->entries; in sja1105_init_general_params()
931 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; in sja1105_init_avb_params()
934 if (table->entry_count) { in sja1105_init_avb_params()
935 kfree(table->entries); in sja1105_init_avb_params()
936 table->entry_count = 0; in sja1105_init_avb_params()
939 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_avb_params()
940 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_avb_params()
941 if (!table->entries) in sja1105_init_avb_params()
942 return -ENOMEM; in sja1105_init_avb_params()
944 table->entry_count = table->ops->max_entry_count; in sja1105_init_avb_params()
946 avb = table->entries; in sja1105_init_avb_params()
949 avb->destmeta = SJA1105_META_DMAC; in sja1105_init_avb_params()
950 avb->srcmeta = SJA1105_META_SMAC; in sja1105_init_avb_params()
958 avb->cas_master = false; in sja1105_init_avb_params()
963 /* The L2 policing table is 2-stage. The table is looked up for each frame
971 * +------------+--------+ +---------------------------------+
973 * +------------+--------+ +---------------------------------+
975 * +------------+--------+ +---------------------------------+
977 * +------------+--------+ +---------------------------------+
979 * +------------+--------+ +---------------------------------+
981 * +------------+--------+ +---------------------------------+
983 * +------------+--------+ +---------------------------------+
985 * +------------+--------+ +---------------------------------+
987 * +------------+--------+ +---------------------------------+
989 * +------------+--------+
991 * +------------+--------+
993 * +------------+--------+
995 * +------------+--------+ +---------------------------------+
997 * +------------+--------+ +---------------------------------+
999 * In this driver, we shall use policers 0-4 as statically alocated port
1011 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_policing()
1015 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; in sja1105_init_l2_policing()
1018 if (table->entry_count) { in sja1105_init_l2_policing()
1019 kfree(table->entries); in sja1105_init_l2_policing()
1020 table->entry_count = 0; in sja1105_init_l2_policing()
1023 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_policing()
1024 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_policing()
1025 if (!table->entries) in sja1105_init_l2_policing()
1026 return -ENOMEM; in sja1105_init_l2_policing()
1028 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_policing()
1030 policing = table->entries; in sja1105_init_l2_policing()
1033 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1034 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; in sja1105_init_l2_policing()
1035 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; in sja1105_init_l2_policing()
1042 if (mcast < table->ops->max_entry_count) in sja1105_init_l2_policing()
1047 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1066 sja1105_static_config_free(&priv->static_config); in sja1105_static_config_load()
1067 rc = sja1105_static_config_init(&priv->static_config, in sja1105_static_config_load()
1068 priv->info->static_ops, in sja1105_static_config_load()
1069 priv->info->device_id); in sja1105_static_config_load()
1113 * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
1118 * Previously we were acting upon the "phy-mode" property when we were
1119 * operating in fixed-link, basically acting as a PHY, but with a reversed
1122 * TX direction. So if anything, RX delays should have been added by the MAC,
1123 * but we were adding TX delays.
1125 * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
1126 * back to the legacy behavior and apply delays on fixed-link ports based on
1127 * the reverse interpretation of the phy-mode. This is a deviation from the
1130 * "{rx,tx}-internal-delay-ps" with a value of 0.
1135 phy_interface_t phy_mode = priv->phy_mode[port]; in sja1105_parse_rgmii_delays()
1136 struct device *dev = &priv->spidev->dev; in sja1105_parse_rgmii_delays()
1137 int rx_delay = -1, tx_delay = -1; in sja1105_parse_rgmii_delays()
1142 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in sja1105_parse_rgmii_delays()
1143 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in sja1105_parse_rgmii_delays()
1145 if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { in sja1105_parse_rgmii_delays()
1147 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in sja1105_parse_rgmii_delays()
1148 "please update device tree to specify \"rx-internal-delay-ps\" and " in sja1105_parse_rgmii_delays()
1149 "\"tx-internal-delay-ps\"", in sja1105_parse_rgmii_delays()
1166 if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { in sja1105_parse_rgmii_delays()
1168 return -EINVAL; in sja1105_parse_rgmii_delays()
1178 return -ERANGE; in sja1105_parse_rgmii_delays()
1181 priv->rgmii_rx_delay_ps[port] = rx_delay; in sja1105_parse_rgmii_delays()
1182 priv->rgmii_tx_delay_ps[port] = tx_delay; in sja1105_parse_rgmii_delays()
1190 struct device *dev = &priv->spidev->dev; in sja1105_parse_ports_node()
1202 return -ENODEV; in sja1105_parse_ports_node()
1208 dev_err(dev, "Failed to read phy-mode or " in sja1105_parse_ports_node()
1209 "phy-interface-type property for port %d\n", in sja1105_parse_ports_node()
1211 return -ENODEV; in sja1105_parse_ports_node()
1214 phy_node = of_parse_phandle(child, "phy-handle", 0); in sja1105_parse_ports_node()
1217 dev_err(dev, "phy-handle or fixed-link " in sja1105_parse_ports_node()
1219 return -ENODEV; in sja1105_parse_ports_node()
1221 /* phy-handle is missing, but fixed-link isn't. in sja1105_parse_ports_node()
1224 priv->fixed_link[index] = true; in sja1105_parse_ports_node()
1229 priv->phy_mode[index] = phy_mode; in sja1105_parse_ports_node()
1241 struct device *dev = &priv->spidev->dev; in sja1105_parse_dt()
1242 struct device_node *switch_node = dev->of_node; in sja1105_parse_dt()
1248 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); in sja1105_parse_dt()
1251 return -ENODEV; in sja1105_parse_dt()
1264 if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) in sja1105_port_speed_to_ethtool()
1266 if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) in sja1105_port_speed_to_ethtool()
1268 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) in sja1105_port_speed_to_ethtool()
1270 if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) in sja1105_port_speed_to_ethtool()
1280 struct device *dev = priv->ds->dev; in sja1105_adjust_port_config()
1290 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_adjust_port_config()
1295 * the state->interface, but AN has not completed and the in sja1105_adjust_port_config()
1298 * ok for power consumption in case AN will never complete - in sja1105_adjust_port_config()
1301 speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_adjust_port_config()
1304 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS]; in sja1105_adjust_port_config()
1307 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS]; in sja1105_adjust_port_config()
1310 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1313 speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1317 return -EINVAL; in sja1105_adjust_port_config()
1327 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) in sja1105_adjust_port_config()
1328 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_adjust_port_config()
1329 else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_adjust_port_config()
1330 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_adjust_port_config()
1348 if (!phy_interface_mode_is_rgmii(priv->phy_mode[port])) in sja1105_adjust_port_config()
1358 struct sja1105_private *priv = dp->ds->priv; in sja1105_mac_select_pcs()
1359 struct dw_xpcs *xpcs = priv->xpcs[dp->index]; in sja1105_mac_select_pcs()
1362 return &xpcs->pcs; in sja1105_mac_select_pcs()
1379 sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true); in sja1105_mac_link_down()
1390 struct sja1105_private *priv = dp->ds->priv; in sja1105_mac_link_up()
1391 int port = dp->index; in sja1105_mac_link_up()
1401 struct sja1105_private *priv = ds->priv; in sja1105_phylink_get_caps()
1405 phy_mode = priv->phy_mode[port]; in sja1105_phylink_get_caps()
1410 * changes between SGMII and 2500base-X. in sja1105_phylink_get_caps()
1412 if (priv->info->supports_sgmii[port]) in sja1105_phylink_get_caps()
1414 config->supported_interfaces); in sja1105_phylink_get_caps()
1416 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1418 config->supported_interfaces); in sja1105_phylink_get_caps()
1424 __set_bit(phy_mode, config->supported_interfaces); in sja1105_phylink_get_caps()
1428 * support half-duplex traffic modes. in sja1105_phylink_get_caps()
1430 config->mac_capabilities = MAC_10FD | MAC_100FD; in sja1105_phylink_get_caps()
1432 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; in sja1105_phylink_get_caps()
1433 if (mii->xmii_mode[port] == XMII_MODE_RGMII || in sja1105_phylink_get_caps()
1434 mii->xmii_mode[port] == XMII_MODE_SGMII) in sja1105_phylink_get_caps()
1435 config->mac_capabilities |= MAC_1000FD; in sja1105_phylink_get_caps()
1437 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1438 config->mac_capabilities |= MAC_2500FD; in sja1105_phylink_get_caps()
1449 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_find_static_fdb_entry()
1450 l2_lookup = table->entries; in sja1105_find_static_fdb_entry()
1452 for (i = 0; i < table->entry_count; i++) in sja1105_find_static_fdb_entry()
1453 if (l2_lookup[i].macaddr == requested->macaddr && in sja1105_find_static_fdb_entry()
1454 l2_lookup[i].vlanid == requested->vlanid && in sja1105_find_static_fdb_entry()
1458 return -1; in sja1105_find_static_fdb_entry()
1475 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_static_fdb_change()
1484 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_static_fdb_change()
1488 match = table->entry_count - 1; in sja1105_static_fdb_change()
1492 l2_lookup = table->entries; in sja1105_static_fdb_change()
1508 l2_lookup[match] = l2_lookup[table->entry_count - 1]; in sja1105_static_fdb_change()
1509 return sja1105_table_resize(table, table->entry_count - 1); in sja1105_static_fdb_change()
1512 /* First-generation switches have a 4-way set associative TCAM that
1552 return -1; in sja1105et_is_fdb_entry_in_bin()
1559 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_add()
1560 struct device *dev = ds->dev; in sja1105et_fdb_add()
1561 int last_unused = -1; in sja1105et_fdb_add()
1594 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly in sja1105et_fdb_add()
1621 if (rc == -ENOENT) in sja1105et_fdb_add()
1644 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_del()
1679 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_add()
1685 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_add()
1693 if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { in sja1105pqrs_fdb_add()
1710 * This is slightly inefficient because the strategy is knock-knock at in sja1105pqrs_fdb_add()
1720 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); in sja1105pqrs_fdb_add()
1721 return -EINVAL; in sja1105pqrs_fdb_add()
1750 dev_err(ds->dev, in sja1105pqrs_fdb_add()
1770 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_del()
1776 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_del()
1807 struct sja1105_private *priv = ds->priv; in sja1105_fdb_add()
1819 return -EOPNOTSUPP; in sja1105_fdb_add()
1823 mutex_lock(&priv->fdb_lock); in sja1105_fdb_add()
1824 rc = priv->info->fdb_add_cmd(ds, port, addr, vid); in sja1105_fdb_add()
1825 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_add()
1834 struct sja1105_private *priv = ds->priv; in __sja1105_fdb_del()
1845 return -EOPNOTSUPP; in __sja1105_fdb_del()
1849 return priv->info->fdb_del_cmd(ds, port, addr, vid); in __sja1105_fdb_del()
1856 struct sja1105_private *priv = ds->priv; in sja1105_fdb_del()
1859 mutex_lock(&priv->fdb_lock); in sja1105_fdb_del()
1861 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_del()
1869 struct sja1105_private *priv = ds->priv; in sja1105_fdb_dump()
1870 struct device *dev = ds->dev; in sja1105_fdb_dump()
1881 if (rc == -ENOENT) in sja1105_fdb_dump()
1891 * 1024-sized FDB table needs to be traversed 4 times through in sja1105_fdb_dump()
1918 struct sja1105_private *priv = ds->priv; in sja1105_fast_age()
1928 mutex_lock(&priv->fdb_lock); in sja1105_fast_age()
1938 if (rc == -ENOENT) in sja1105_fast_age()
1941 dev_err(ds->dev, "Failed to read FDB: %pe\n", in sja1105_fast_age()
1957 dev_err(ds->dev, in sja1105_fast_age()
1964 mutex_unlock(&priv->fdb_lock); in sja1105_fast_age()
1971 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_add()
1978 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_del()
1990 struct dsa_switch *ds = priv->ds; in sja1105_manage_flood_domains()
1993 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_manage_flood_domains()
1995 for (from = 0; from < ds->num_ports; from++) { in sja1105_manage_flood_domains()
1998 for (to = 0; to < priv->ds->num_ports; to++) { in sja1105_manage_flood_domains()
2002 if (priv->ucast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2004 if (priv->bcast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2029 struct sja1105_private *priv = ds->priv; in sja1105_bridge_member()
2032 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_bridge_member()
2034 for (i = 0; i < ds->num_ports; i++) { in sja1105_bridge_member()
2078 struct sja1105_private *priv = ds->priv; in sja1105_bridge_stp_state_set()
2081 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_bridge_stp_state_set()
2103 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2108 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2111 dev_err(ds->dev, "invalid STP state: %d\n", state); in sja1105_bridge_stp_state_set()
2148 #define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
2155 if (priv->info->fixed_cbs_mapping) { in sja1105_find_cbs_shaper()
2157 if (i >= 0 && i < priv->info->num_cbs_shapers) in sja1105_find_cbs_shaper()
2160 return -1; in sja1105_find_cbs_shaper()
2163 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_cbs_shaper()
2164 if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) in sja1105_find_cbs_shaper()
2167 return -1; in sja1105_find_cbs_shaper()
2174 if (priv->info->fixed_cbs_mapping) in sja1105_find_unused_cbs_shaper()
2175 return -1; in sja1105_find_unused_cbs_shaper()
2177 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_unused_cbs_shaper()
2178 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper()
2181 return -1; in sja1105_find_unused_cbs_shaper()
2189 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_delete_cbs_shaper()
2190 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper()
2192 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper()
2205 struct sja1105_private *priv = ds->priv; in sja1105_setup_tc_cbs()
2210 if (!offload->enable) in sja1105_setup_tc_cbs()
2211 return sja1105_delete_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2214 index = sja1105_find_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2216 /* That isn't the case - see if we can allocate a new one */ in sja1105_setup_tc_cbs()
2219 return -ENOSPC; in sja1105_setup_tc_cbs()
2222 cbs = &priv->cbs[index]; in sja1105_setup_tc_cbs()
2223 cbs->port = port; in sja1105_setup_tc_cbs()
2224 cbs->prio = offload->queue; in sja1105_setup_tc_cbs()
2228 cbs->credit_hi = offload->hicredit; in sja1105_setup_tc_cbs()
2229 cbs->credit_lo = abs(offload->locredit); in sja1105_setup_tc_cbs()
2231 * link speed. Since the given offload->sendslope is good only for the in sja1105_setup_tc_cbs()
2234 * but deduce the port transmit rate from idleslope - sendslope. in sja1105_setup_tc_cbs()
2236 port_transmit_rate_kbps = offload->idleslope - offload->sendslope; in sja1105_setup_tc_cbs()
2237 cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, in sja1105_setup_tc_cbs()
2239 cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), in sja1105_setup_tc_cbs()
2241 /* Convert the negative values from 64-bit 2's complement in sja1105_setup_tc_cbs()
2242 * to 32-bit 2's complement (for the case of 0x80000000 whose in sja1105_setup_tc_cbs()
2245 cbs->credit_lo &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2246 cbs->send_slope &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2259 if (!priv->cbs) in sja1105_reload_cbs()
2262 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_reload_cbs()
2263 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_reload_cbs()
2265 if (!cbs->idle_slope && !cbs->send_slope) in sja1105_reload_cbs()
2280 [SJA1105_SCHEDULING] = "Time-aware scheduling",
2281 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
2299 struct dsa_switch *ds = priv->ds; in sja1105_static_config_reload()
2305 mutex_lock(&priv->fdb_lock); in sja1105_static_config_reload()
2306 mutex_lock(&priv->mgmt_lock); in sja1105_static_config_reload()
2308 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_static_config_reload()
2311 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the in sja1105_static_config_reload()
2315 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2318 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_static_config_reload()
2320 if (priv->xpcs[i]) in sja1105_static_config_reload()
2321 bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, in sja1105_static_config_reload()
2326 mutex_lock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2330 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2337 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2343 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2351 /* Mid point, corresponds to pre-reset PTPCLKVAL */ in sja1105_static_config_reload()
2352 t12 = t1 + (t2 - t1) / 2; in sja1105_static_config_reload()
2353 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ in sja1105_static_config_reload()
2354 t34 = t3 + (t4 - t3) / 2; in sja1105_static_config_reload()
2356 now += (t34 - t12); in sja1105_static_config_reload()
2360 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2362 dev_info(priv->ds->dev, in sja1105_static_config_reload()
2370 if (priv->info->clocking_setup) { in sja1105_static_config_reload()
2371 rc = priv->info->clocking_setup(priv); in sja1105_static_config_reload()
2376 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2377 struct dw_xpcs *xpcs = priv->xpcs[i]; in sja1105_static_config_reload()
2392 rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode); in sja1105_static_config_reload()
2399 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_static_config_reload()
2408 xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i], in sja1105_static_config_reload()
2417 mutex_unlock(&priv->mgmt_lock); in sja1105_static_config_reload()
2418 mutex_unlock(&priv->fdb_lock); in sja1105_static_config_reload()
2427 struct sja1105_private *priv = ds->priv; in sja1105_get_tag_protocol()
2429 return priv->info->tag_proto; in sja1105_get_tag_protocol()
2440 struct sja1105_private *priv = ds->priv; in sja1105_vlan_filtering()
2446 list_for_each_entry(rule, &priv->flow_block.rules, list) { in sja1105_vlan_filtering()
2447 if (rule->type == SJA1105_RULE_VL) { in sja1105_vlan_filtering()
2450 return -EBUSY; in sja1105_vlan_filtering()
2464 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_vlan_filtering()
2465 general_params = table->entries; in sja1105_vlan_filtering()
2466 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ in sja1105_vlan_filtering()
2467 general_params->tpid = tpid; in sja1105_vlan_filtering()
2468 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ in sja1105_vlan_filtering()
2469 general_params->tpid2 = tpid2; in sja1105_vlan_filtering()
2471 for (port = 0; port < ds->num_ports; port++) { in sja1105_vlan_filtering()
2494 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_add()
2498 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_vlan_add()
2501 match = table->entry_count - 1; in sja1105_vlan_add()
2505 vlan = table->entries; in sja1105_vlan_add()
2532 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_del()
2540 vlan = table->entries; in sja1105_vlan_del()
2571 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_add()
2572 u16 flags = vlan->flags; in sja1105_bridge_vlan_add()
2577 if (vid_is_dsa_8021q(vlan->vid)) { in sja1105_bridge_vlan_add()
2579 "Range 3072-4095 reserved for dsa_8021q operation"); in sja1105_bridge_vlan_add()
2580 return -EBUSY; in sja1105_bridge_vlan_add()
2583 /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ in sja1105_bridge_vlan_add()
2587 rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); in sja1105_bridge_vlan_add()
2591 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) in sja1105_bridge_vlan_add()
2592 priv->bridge_pvid[port] = vlan->vid; in sja1105_bridge_vlan_add()
2600 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_del()
2603 rc = sja1105_vlan_del(priv, port, vlan->vid); in sja1105_bridge_vlan_del()
2616 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_add()
2631 priv->tag_8021q_pvid[port] = vid; in sja1105_dsa_8021q_vlan_add()
2638 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_del()
2646 struct netlink_ext_ack *extack = info->info.extack; in sja1105_prechangeupper()
2647 struct net_device *upper = info->upper_dev; in sja1105_prechangeupper()
2648 struct dsa_switch_tree *dst = ds->dst; in sja1105_prechangeupper()
2653 return -EBUSY; in sja1105_prechangeupper()
2657 list_for_each_entry(dp, &dst->ports, list) { in sja1105_prechangeupper()
2662 "Only one VLAN-aware bridge is supported"); in sja1105_prechangeupper()
2663 return -EBUSY; in sja1105_prechangeupper()
2675 struct sja1105_private *priv = ds->priv; in sja1105_mgmt_xmit()
2682 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); in sja1105_mgmt_xmit()
2696 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); in sja1105_mgmt_xmit()
2703 dev_err_ratelimited(priv->ds->dev, in sja1105_mgmt_xmit()
2713 } while (mgmt_route.enfport && --timeout); in sja1105_mgmt_xmit()
2716 /* Clean up the management route so that a follow-up in sja1105_mgmt_xmit()
2718 * This is only hardware supported on P/Q/R/S - on E/T it is in sja1105_mgmt_xmit()
2719 * a no-op and we are silently discarding the -EOPNOTSUPP. in sja1105_mgmt_xmit()
2723 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); in sja1105_mgmt_xmit()
2739 struct sk_buff *clone, *skb = xmit_work->skb; in sja1105_port_deferred_xmit()
2740 struct dsa_switch *ds = xmit_work->dp->ds; in sja1105_port_deferred_xmit()
2741 struct sja1105_private *priv = ds->priv; in sja1105_port_deferred_xmit()
2742 int port = xmit_work->dp->index; in sja1105_port_deferred_xmit()
2744 clone = SJA1105_SKB_CB(skb)->clone; in sja1105_port_deferred_xmit()
2746 mutex_lock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2754 mutex_unlock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2762 struct sja1105_private *priv = ds->priv; in sja1105_connect_tag_protocol()
2765 if (proto != priv->info->tag_proto) in sja1105_connect_tag_protocol()
2766 return -EPROTONOSUPPORT; in sja1105_connect_tag_protocol()
2769 tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; in sja1105_connect_tag_protocol()
2770 tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; in sja1105_connect_tag_protocol()
2782 struct sja1105_private *priv = ds->priv; in sja1105_set_ageing_time()
2786 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_set_ageing_time()
2787 l2_lookup_params = table->entries; in sja1105_set_ageing_time()
2791 if (l2_lookup_params->maxage == maxage) in sja1105_set_ageing_time()
2794 l2_lookup_params->maxage = maxage; in sja1105_set_ageing_time()
2802 struct sja1105_private *priv = ds->priv; in sja1105_change_mtu()
2809 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_change_mtu()
2821 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; in sja1105_get_max_mtu()
2834 return -EOPNOTSUPP; in sja1105_port_setup_tc()
2849 struct dsa_switch *ds = priv->ds; in sja1105_mirror_apply()
2855 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_mirror_apply()
2856 general_params = table->entries; in sja1105_mirror_apply()
2858 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_mirror_apply()
2860 already_enabled = (general_params->mirr_port != ds->num_ports); in sja1105_mirror_apply()
2861 if (already_enabled && enabled && general_params->mirr_port != to) { in sja1105_mirror_apply()
2862 dev_err(priv->ds->dev, in sja1105_mirror_apply()
2864 general_params->mirr_port); in sja1105_mirror_apply()
2865 return -EBUSY; in sja1105_mirror_apply()
2874 for (port = 0; port < ds->num_ports; port++) { in sja1105_mirror_apply()
2882 new_mirr_port = ds->num_ports; in sja1105_mirror_apply()
2884 if (new_mirr_port != general_params->mirr_port) { in sja1105_mirror_apply()
2885 general_params->mirr_port = new_mirr_port; in sja1105_mirror_apply()
2906 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_add()
2913 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_del()
2914 mirror->ingress, false); in sja1105_mirror_del()
2921 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_add()
2923 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_add()
2929 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, in sja1105_port_policer_add()
2931 policing[port].smax = policer->burst; in sja1105_port_policer_add()
2939 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_del()
2941 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_del()
2954 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_port_set_learning()
2967 priv->ucast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2969 priv->ucast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2974 priv->bcast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2976 priv->bcast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2990 mutex_lock(&priv->fdb_lock); in sja1105_port_mcast_flood()
2992 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_port_mcast_flood()
2993 l2_lookup = table->entries; in sja1105_port_mcast_flood()
2995 for (match = 0; match < table->entry_count; match++) in sja1105_port_mcast_flood()
3000 if (match == table->entry_count) { in sja1105_port_mcast_flood()
3003 rc = -ENOSPC; in sja1105_port_mcast_flood()
3016 mutex_unlock(&priv->fdb_lock); in sja1105_port_mcast_flood()
3025 struct sja1105_private *priv = ds->priv; in sja1105_port_pre_bridge_flags()
3029 return -EINVAL; in sja1105_port_pre_bridge_flags()
3032 !priv->info->can_limit_mcast_flood) { in sja1105_port_pre_bridge_flags()
3039 return -EINVAL; in sja1105_port_pre_bridge_flags()
3050 struct sja1105_private *priv = ds->priv; in sja1105_port_bridge_flags()
3071 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { in sja1105_port_bridge_flags()
3081 /* The programming model for the SJA1105 switch is "all-at-once" via static
3095 struct sja1105_private *priv = ds->priv; in sja1105_setup()
3098 if (priv->info->disable_microcontroller) { in sja1105_setup()
3099 rc = priv->info->disable_microcontroller(priv); in sja1105_setup()
3101 dev_err(ds->dev, in sja1105_setup()
3111 dev_err(ds->dev, "Failed to load static config: %d\n", rc); in sja1105_setup()
3116 if (priv->info->clocking_setup) { in sja1105_setup()
3117 rc = priv->info->clocking_setup(priv); in sja1105_setup()
3119 dev_err(ds->dev, in sja1105_setup()
3131 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); in sja1105_setup()
3137 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", in sja1105_setup()
3160 ds->vlan_filtering_is_global = true; in sja1105_setup()
3161 ds->untag_bridge_pvid = true; in sja1105_setup()
3162 ds->fdb_isolation = true; in sja1105_setup()
3163 ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; in sja1105_setup()
3166 ds->num_tx_queues = SJA1105_NUM_TC; in sja1105_setup()
3168 ds->mtu_enforcement_ingress = true; in sja1105_setup()
3169 ds->assisted_learning_on_cpu_port = true; in sja1105_setup()
3183 sja1105_static_config_free(&priv->static_config); in sja1105_setup()
3190 struct sja1105_private *priv = ds->priv; in sja1105_teardown()
3201 sja1105_static_config_free(&priv->static_config); in sja1105_teardown()
3260 const struct sja1105_regs *regs = priv->info->regs; in sja1105_check_device_id()
3262 struct device *dev = &priv->spidev->dev; in sja1105_check_device_id()
3268 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, in sja1105_check_device_id()
3273 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, in sja1105_check_device_id()
3280 for (match = sja1105_dt_ids; match->compatible[0]; match++) { in sja1105_check_device_id()
3281 const struct sja1105_info *info = match->data; in sja1105_check_device_id()
3284 if (info->device_id != device_id || info->part_no != part_no) in sja1105_check_device_id()
3288 if (priv->info->device_id != device_id || in sja1105_check_device_id()
3289 priv->info->part_no != part_no) { in sja1105_check_device_id()
3290 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n", in sja1105_check_device_id()
3291 priv->info->name, info->name); in sja1105_check_device_id()
3293 priv->info = info; in sja1105_check_device_id()
3302 return -ENODEV; in sja1105_check_device_id()
3307 struct device *dev = &spi->dev; in sja1105_probe()
3313 if (!dev->of_node) { in sja1105_probe()
3315 return -EINVAL; in sja1105_probe()
3324 return -ENOMEM; in sja1105_probe()
3329 priv->spidev = spi; in sja1105_probe()
3333 spi->bits_per_word = 8; in sja1105_probe()
3353 /* We need to send at least one 64-bit word of SPI payload per message in sja1105_probe()
3358 return -EINVAL; in sja1105_probe()
3361 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; in sja1105_probe()
3362 if (priv->max_xfer_len > max_xfer) in sja1105_probe()
3363 priv->max_xfer_len = max_xfer; in sja1105_probe()
3364 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) in sja1105_probe()
3365 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; in sja1105_probe()
3367 priv->info = of_device_get_match_data(dev); in sja1105_probe()
3376 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); in sja1105_probe()
3380 return -ENOMEM; in sja1105_probe()
3382 ds->dev = dev; in sja1105_probe()
3383 ds->num_ports = priv->info->num_ports; in sja1105_probe()
3384 ds->ops = &sja1105_switch_ops; in sja1105_probe()
3385 ds->phylink_mac_ops = &sja1105_phylink_mac_ops; in sja1105_probe()
3386 ds->priv = priv; in sja1105_probe()
3387 priv->ds = ds; in sja1105_probe()
3389 mutex_init(&priv->ptp_data.lock); in sja1105_probe()
3390 mutex_init(&priv->dynamic_config_lock); in sja1105_probe()
3391 mutex_init(&priv->mgmt_lock); in sja1105_probe()
3392 mutex_init(&priv->fdb_lock); in sja1105_probe()
3393 spin_lock_init(&priv->ts_id_lock); in sja1105_probe()
3397 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); in sja1105_probe()
3402 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, in sja1105_probe()
3405 if (!priv->cbs) in sja1105_probe()
3406 return -ENOMEM; in sja1105_probe()
3409 return dsa_register_switch(priv->ds); in sja1105_probe()
3419 dsa_unregister_switch(priv->ds); in sja1105_remove()
3429 dsa_switch_shutdown(priv->ds); in sja1105_shutdown()
3478 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");