Lines Matching +full:rx +full:- +full:pcs +full:- +full:input
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
76 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_is_vlan_configured()
77 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; in sja1105_is_vlan_configured()
84 return -1; in sja1105_is_vlan_configured()
89 struct sja1105_private *priv = ds->priv; in sja1105_drop_untagged()
92 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_drop_untagged()
107 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_pvid_apply()
122 struct sja1105_private *priv = ds->priv; in sja1105_commit_pvid()
129 pvid = priv->bridge_pvid[port]; in sja1105_commit_pvid()
131 pvid = priv->tag_8021q_pvid[port]; in sja1105_commit_pvid()
138 * VLAN-aware bridge. When the tag_8021q pvid is used, we are in sja1105_commit_pvid()
139 * deliberately removing the RX VLAN from the port's VMEMB_PORT list, in sja1105_commit_pvid()
144 if (pvid == priv->bridge_pvid[port]) { in sja1105_commit_pvid()
145 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; in sja1105_commit_pvid()
163 * Every queue i holds top[i] - base[i] frames. in sja1105_init_mac_settings()
164 * Sum of top[i] - base[i] is 511 (max hardware limit). in sja1105_init_mac_settings()
174 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO], in sja1105_init_mac_settings()
175 /* No static correction for 1-step 1588 events */ in sja1105_init_mac_settings()
187 /* Don't drop double-tagged traffic */ in sja1105_init_mac_settings()
193 /* Disable learning and I/O on user ports by default - in sja1105_init_mac_settings()
201 struct dsa_switch *ds = priv->ds; in sja1105_init_mac_settings()
205 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; in sja1105_init_mac_settings()
208 if (table->entry_count) { in sja1105_init_mac_settings()
209 kfree(table->entries); in sja1105_init_mac_settings()
210 table->entry_count = 0; in sja1105_init_mac_settings()
213 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mac_settings()
214 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mac_settings()
215 if (!table->entries) in sja1105_init_mac_settings()
216 return -ENOMEM; in sja1105_init_mac_settings()
218 table->entry_count = table->ops->max_entry_count; in sja1105_init_mac_settings()
220 mac = table->entries; in sja1105_init_mac_settings()
222 list_for_each_entry(dp, &ds->dst->ports, list) { in sja1105_init_mac_settings()
223 if (dp->ds != ds) in sja1105_init_mac_settings()
226 mac[dp->index] = default_mac; in sja1105_init_mac_settings()
229 * enabled for the DSA ports. CPU ports use software-assisted in sja1105_init_mac_settings()
232 * CPU ports in a cross-chip topology if multiple CPU ports in sja1105_init_mac_settings()
236 dp->learning = true; in sja1105_init_mac_settings()
242 mac[dp->index].drpuntag = true; in sja1105_init_mac_settings()
250 struct device *dev = &priv->spidev->dev; in sja1105_init_mii_settings()
252 struct dsa_switch *ds = priv->ds; in sja1105_init_mii_settings()
256 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; in sja1105_init_mii_settings()
259 if (table->entry_count) { in sja1105_init_mii_settings()
260 kfree(table->entries); in sja1105_init_mii_settings()
261 table->entry_count = 0; in sja1105_init_mii_settings()
264 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_mii_settings()
265 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_mii_settings()
266 if (!table->entries) in sja1105_init_mii_settings()
267 return -ENOMEM; in sja1105_init_mii_settings()
270 table->entry_count = table->ops->max_entry_count; in sja1105_init_mii_settings()
272 mii = table->entries; in sja1105_init_mii_settings()
274 for (i = 0; i < ds->num_ports; i++) { in sja1105_init_mii_settings()
277 if (dsa_is_unused_port(priv->ds, i)) in sja1105_init_mii_settings()
280 switch (priv->phy_mode[i]) { in sja1105_init_mii_settings()
282 if (priv->info->internal_phy[i] == SJA1105_NO_PHY) in sja1105_init_mii_settings()
285 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
286 if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX) in sja1105_init_mii_settings()
287 mii->special[i] = true; in sja1105_init_mii_settings()
294 if (!priv->info->supports_mii[i]) in sja1105_init_mii_settings()
297 mii->xmii_mode[i] = XMII_MODE_MII; in sja1105_init_mii_settings()
303 if (!priv->info->supports_rmii[i]) in sja1105_init_mii_settings()
306 mii->xmii_mode[i] = XMII_MODE_RMII; in sja1105_init_mii_settings()
312 if (!priv->info->supports_rgmii[i]) in sja1105_init_mii_settings()
315 mii->xmii_mode[i] = XMII_MODE_RGMII; in sja1105_init_mii_settings()
318 if (!priv->info->supports_sgmii[i]) in sja1105_init_mii_settings()
321 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
322 mii->special[i] = true; in sja1105_init_mii_settings()
325 if (!priv->info->supports_2500basex[i]) in sja1105_init_mii_settings()
328 mii->xmii_mode[i] = XMII_MODE_SGMII; in sja1105_init_mii_settings()
329 mii->special[i] = true; in sja1105_init_mii_settings()
334 phy_modes(priv->phy_mode[i]), i); in sja1105_init_mii_settings()
335 return -EINVAL; in sja1105_init_mii_settings()
338 mii->phy_mac[i] = role; in sja1105_init_mii_settings()
349 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_init_static_fdb()
352 * entries, except for a special entry at the end which is a catch-all in sja1105_init_static_fdb()
355 if (table->entry_count) { in sja1105_init_static_fdb()
356 kfree(table->entries); in sja1105_init_static_fdb()
357 table->entry_count = 0; in sja1105_init_static_fdb()
360 if (!priv->info->can_limit_mcast_flood) in sja1105_init_static_fdb()
363 table->entries = kcalloc(1, table->ops->unpacked_entry_size, in sja1105_init_static_fdb()
365 if (!table->entries) in sja1105_init_static_fdb()
366 return -ENOMEM; in sja1105_init_static_fdb()
368 table->entry_count = 1; in sja1105_init_static_fdb()
369 l2_lookup = table->entries; in sja1105_init_static_fdb()
375 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1; in sja1105_init_static_fdb()
378 for (port = 0; port < priv->ds->num_ports; port++) in sja1105_init_static_fdb()
379 if (!dsa_is_unused_port(priv->ds, port)) in sja1105_init_static_fdb()
398 /* Don't discard management traffic based on ENFPORT - in sja1105_init_l2_lookup_params()
415 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_lookup_params()
420 for (port = 0; port < ds->num_ports; port++) in sja1105_init_l2_lookup_params()
426 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_lookup_params()
433 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_init_l2_lookup_params()
435 if (table->entry_count) { in sja1105_init_l2_lookup_params()
436 kfree(table->entries); in sja1105_init_l2_lookup_params()
437 table->entry_count = 0; in sja1105_init_l2_lookup_params()
440 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_lookup_params()
441 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_lookup_params()
442 if (!table->entries) in sja1105_init_l2_lookup_params()
443 return -ENOMEM; in sja1105_init_l2_lookup_params()
445 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_lookup_params()
448 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = in sja1105_init_l2_lookup_params()
456 * All DT-defined ports are members of this VLAN, and there are no
473 struct dsa_switch *ds = priv->ds; in sja1105_init_static_vlan()
476 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_init_static_vlan()
478 if (table->entry_count) { in sja1105_init_static_vlan()
479 kfree(table->entries); in sja1105_init_static_vlan()
480 table->entry_count = 0; in sja1105_init_static_vlan()
483 table->entries = kzalloc(table->ops->unpacked_entry_size, in sja1105_init_static_vlan()
485 if (!table->entries) in sja1105_init_static_vlan()
486 return -ENOMEM; in sja1105_init_static_vlan()
488 table->entry_count = 1; in sja1105_init_static_vlan()
490 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_static_vlan()
499 priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
500 priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; in sja1105_init_static_vlan()
504 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; in sja1105_init_static_vlan()
511 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_forwarding()
518 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; in sja1105_init_l2_forwarding()
520 if (table->entry_count) { in sja1105_init_l2_forwarding()
521 kfree(table->entries); in sja1105_init_l2_forwarding()
522 table->entry_count = 0; in sja1105_init_l2_forwarding()
525 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding()
526 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding()
527 if (!table->entries) in sja1105_init_l2_forwarding()
528 return -ENOMEM; in sja1105_init_l2_forwarding()
530 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding()
532 l2fwd = table->entries; in sja1105_init_l2_forwarding()
538 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
547 * only to the always-on domain (CPU port and DSA links) in sja1105_init_l2_forwarding()
549 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
553 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
566 * always-on domain). These can send packets to any enabled port except in sja1105_init_l2_forwarding()
569 for (from = 0; from < ds->num_ports; from++) { in sja1105_init_l2_forwarding()
573 for (to = 0; to < ds->num_ports; to++) { in sja1105_init_l2_forwarding()
591 * cutting RX from DSA links towards our CPU port, if the remote switch in sja1105_init_l2_forwarding()
595 dst = ds->dst; in sja1105_init_l2_forwarding()
597 list_for_each_entry(dl, &dst->rtable, list) { in sja1105_init_l2_forwarding()
598 if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) in sja1105_init_l2_forwarding()
601 from = dl->dp->index; in sja1105_init_l2_forwarding()
604 dev_warn(ds->dev, in sja1105_init_l2_forwarding()
605 "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n", in sja1105_init_l2_forwarding()
617 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
621 priv->ucast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
622 priv->bcast_egress_floods |= BIT(port); in sja1105_init_l2_forwarding()
626 * Create a one-to-one mapping. in sja1105_init_l2_forwarding()
629 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_forwarding()
633 l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; in sja1105_init_l2_forwarding()
636 l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; in sja1105_init_l2_forwarding()
645 struct dsa_switch *ds = priv->ds; in sja1110_init_pcp_remapping()
649 table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING]; in sja1110_init_pcp_remapping()
652 if (!table->ops->max_entry_count) in sja1110_init_pcp_remapping()
655 if (table->entry_count) { in sja1110_init_pcp_remapping()
656 kfree(table->entries); in sja1110_init_pcp_remapping()
657 table->entry_count = 0; in sja1110_init_pcp_remapping()
660 table->entries = kcalloc(table->ops->max_entry_count, in sja1110_init_pcp_remapping()
661 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1110_init_pcp_remapping()
662 if (!table->entries) in sja1110_init_pcp_remapping()
663 return -ENOMEM; in sja1110_init_pcp_remapping()
665 table->entry_count = table->ops->max_entry_count; in sja1110_init_pcp_remapping()
667 pcp_remap = table->entries; in sja1110_init_pcp_remapping()
670 for (port = 0; port < ds->num_ports; port++) { in sja1110_init_pcp_remapping()
686 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_init_l2_forwarding_params()
688 if (table->entry_count) { in sja1105_init_l2_forwarding_params()
689 kfree(table->entries); in sja1105_init_l2_forwarding_params()
690 table->entry_count = 0; in sja1105_init_l2_forwarding_params()
693 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_forwarding_params()
694 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_forwarding_params()
695 if (!table->entries) in sja1105_init_l2_forwarding_params()
696 return -ENOMEM; in sja1105_init_l2_forwarding_params()
698 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_forwarding_params()
701 l2fwd_params = table->entries; in sja1105_init_l2_forwarding_params()
704 l2fwd_params->max_dynp = 0; in sja1105_init_l2_forwarding_params()
706 l2fwd_params->part_spc[0] = priv->info->max_frame_mem; in sja1105_init_l2_forwarding_params()
717 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
718 l2_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
719 l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
721 /* If we have any critical-traffic virtual links, we need to reserve in sja1105_frame_memory_partitioning()
724 * remaining for best-effort traffic. TODO: figure out a more flexible in sja1105_frame_memory_partitioning()
727 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count) in sja1105_frame_memory_partitioning()
730 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS]; in sja1105_frame_memory_partitioning()
731 vl_fwd_params = table->entries; in sja1105_frame_memory_partitioning()
733 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
734 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY; in sja1105_frame_memory_partitioning()
740 * -----+----------------+---------------+---------------+---------------
742 * 1 |0, [5:10], retag| [1:2] | [3:4] | -
743 * 2 | 0, [5:10] | [1:3], retag | 4 | -
744 * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
745 * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
746 * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
747 * 14 | 0, [5:10] | [1:4], retag | - | -
748 * 15 | [5:10] | [0:4], retag | - | -
759 if (priv->info->device_id != SJA1110_DEVICE_ID) in sja1110_select_tdmaconfigidx()
762 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1110_select_tdmaconfigidx()
763 general_params = table->entries; in sja1110_select_tdmaconfigidx()
768 port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL; in sja1110_select_tdmaconfigidx()
769 port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
770 port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX; in sja1110_select_tdmaconfigidx()
788 general_params->tdmaconfigidx = tdmaconfigidx; in sja1110_select_tdmaconfigidx()
794 struct dsa_switch *ds = priv->ds; in sja1105_init_topology()
801 general_params->host_port = ds->num_ports; in sja1105_init_topology()
803 /* Link-local traffic received on casc_port will be forwarded in sja1105_init_topology()
805 * info in the destination MAC address, and no RX timestamps will be in sja1105_init_topology()
813 if (!priv->info->multiple_cascade_ports) in sja1105_init_topology()
814 general_params->casc_port = ds->num_ports; in sja1105_init_topology()
816 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_topology()
821 * upstream-facing DSA links in sja1105_init_topology()
824 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
825 general_params->host_port = port; in sja1105_init_topology()
827 dev_err(ds->dev, in sja1105_init_topology()
829 general_params->host_port, port); in sja1105_init_topology()
830 return -EINVAL; in sja1105_init_topology()
834 /* Cascade ports are downstream-facing DSA links */ in sja1105_init_topology()
836 if (priv->info->multiple_cascade_ports) { in sja1105_init_topology()
837 general_params->casc_port |= BIT(port); in sja1105_init_topology()
838 } else if (general_params->casc_port == ds->num_ports) { in sja1105_init_topology()
839 general_params->casc_port = port; in sja1105_init_topology()
841 dev_err(ds->dev, in sja1105_init_topology()
843 general_params->casc_port, port); in sja1105_init_topology()
844 return -EINVAL; in sja1105_init_topology()
849 if (general_params->host_port == ds->num_ports) { in sja1105_init_topology()
850 dev_err(ds->dev, "No host port configured\n"); in sja1105_init_topology()
851 return -EINVAL; in sja1105_init_topology()
862 .switchid = priv->ds->index, in sja1105_init_general_params()
863 /* Priority queue for link-local management frames in sja1105_init_general_params()
864 * (both ingress to and egress from CPU - PTP, STP etc) in sja1105_init_general_params()
876 .mirr_port = priv->ds->num_ports, in sja1105_init_general_params()
881 /* Only update correctionField for 1-step PTP (L2 transport) */ in sja1105_init_general_params()
901 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_init_general_params()
903 if (table->entry_count) { in sja1105_init_general_params()
904 kfree(table->entries); in sja1105_init_general_params()
905 table->entry_count = 0; in sja1105_init_general_params()
908 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_general_params()
909 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_general_params()
910 if (!table->entries) in sja1105_init_general_params()
911 return -ENOMEM; in sja1105_init_general_params()
913 table->entry_count = table->ops->max_entry_count; in sja1105_init_general_params()
915 general_params = table->entries; in sja1105_init_general_params()
930 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; in sja1105_init_avb_params()
933 if (table->entry_count) { in sja1105_init_avb_params()
934 kfree(table->entries); in sja1105_init_avb_params()
935 table->entry_count = 0; in sja1105_init_avb_params()
938 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_avb_params()
939 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_avb_params()
940 if (!table->entries) in sja1105_init_avb_params()
941 return -ENOMEM; in sja1105_init_avb_params()
943 table->entry_count = table->ops->max_entry_count; in sja1105_init_avb_params()
945 avb = table->entries; in sja1105_init_avb_params()
948 avb->destmeta = SJA1105_META_DMAC; in sja1105_init_avb_params()
949 avb->srcmeta = SJA1105_META_SMAC; in sja1105_init_avb_params()
950 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by in sja1105_init_avb_params()
957 avb->cas_master = false; in sja1105_init_avb_params()
962 /* The L2 policing table is 2-stage. The table is looked up for each frame
970 * +------------+--------+ +---------------------------------+
972 * +------------+--------+ +---------------------------------+
974 * +------------+--------+ +---------------------------------+
976 * +------------+--------+ +---------------------------------+
978 * +------------+--------+ +---------------------------------+
980 * +------------+--------+ +---------------------------------+
982 * +------------+--------+ +---------------------------------+
984 * +------------+--------+ +---------------------------------+
986 * +------------+--------+ +---------------------------------+
988 * +------------+--------+
990 * +------------+--------+
992 * +------------+--------+
994 * +------------+--------+ +---------------------------------+
996 * +------------+--------+ +---------------------------------+
998 * In this driver, we shall use policers 0-4 as statically alocated port
1010 struct dsa_switch *ds = priv->ds; in sja1105_init_l2_policing()
1014 table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; in sja1105_init_l2_policing()
1017 if (table->entry_count) { in sja1105_init_l2_policing()
1018 kfree(table->entries); in sja1105_init_l2_policing()
1019 table->entry_count = 0; in sja1105_init_l2_policing()
1022 table->entries = kcalloc(table->ops->max_entry_count, in sja1105_init_l2_policing()
1023 table->ops->unpacked_entry_size, GFP_KERNEL); in sja1105_init_l2_policing()
1024 if (!table->entries) in sja1105_init_l2_policing()
1025 return -ENOMEM; in sja1105_init_l2_policing()
1027 table->entry_count = table->ops->max_entry_count; in sja1105_init_l2_policing()
1029 policing = table->entries; in sja1105_init_l2_policing()
1032 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1033 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port; in sja1105_init_l2_policing()
1034 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port; in sja1105_init_l2_policing()
1041 if (mcast < table->ops->max_entry_count) in sja1105_init_l2_policing()
1046 for (port = 0; port < ds->num_ports; port++) { in sja1105_init_l2_policing()
1065 sja1105_static_config_free(&priv->static_config); in sja1105_static_config_load()
1066 rc = sja1105_static_config_init(&priv->static_config, in sja1105_static_config_load()
1067 priv->info->static_ops, in sja1105_static_config_load()
1068 priv->info->device_id); in sja1105_static_config_load()
1112 * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
1117 * Previously we were acting upon the "phy-mode" property when we were
1118 * operating in fixed-link, basically acting as a PHY, but with a reversed
1121 * TX direction. So if anything, RX delays should have been added by the MAC,
1124 * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
1125 * back to the legacy behavior and apply delays on fixed-link ports based on
1126 * the reverse interpretation of the phy-mode. This is a deviation from the
1129 * "{rx,tx}-internal-delay-ps" with a value of 0.
1134 phy_interface_t phy_mode = priv->phy_mode[port]; in sja1105_parse_rgmii_delays()
1135 struct device *dev = &priv->spidev->dev; in sja1105_parse_rgmii_delays()
1136 int rx_delay = -1, tx_delay = -1; in sja1105_parse_rgmii_delays()
1141 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in sja1105_parse_rgmii_delays()
1142 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in sja1105_parse_rgmii_delays()
1144 if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { in sja1105_parse_rgmii_delays()
1146 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in sja1105_parse_rgmii_delays()
1147 "please update device tree to specify \"rx-internal-delay-ps\" and " in sja1105_parse_rgmii_delays()
1148 "\"tx-internal-delay-ps\"", in sja1105_parse_rgmii_delays()
1165 if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { in sja1105_parse_rgmii_delays()
1167 return -EINVAL; in sja1105_parse_rgmii_delays()
1177 return -ERANGE; in sja1105_parse_rgmii_delays()
1180 priv->rgmii_rx_delay_ps[port] = rx_delay; in sja1105_parse_rgmii_delays()
1181 priv->rgmii_tx_delay_ps[port] = tx_delay; in sja1105_parse_rgmii_delays()
1189 struct device *dev = &priv->spidev->dev; in sja1105_parse_ports_node()
1201 return -ENODEV; in sja1105_parse_ports_node()
1207 dev_err(dev, "Failed to read phy-mode or " in sja1105_parse_ports_node()
1208 "phy-interface-type property for port %d\n", in sja1105_parse_ports_node()
1210 return -ENODEV; in sja1105_parse_ports_node()
1213 phy_node = of_parse_phandle(child, "phy-handle", 0); in sja1105_parse_ports_node()
1216 dev_err(dev, "phy-handle or fixed-link " in sja1105_parse_ports_node()
1218 return -ENODEV; in sja1105_parse_ports_node()
1220 /* phy-handle is missing, but fixed-link isn't. in sja1105_parse_ports_node()
1223 priv->fixed_link[index] = true; in sja1105_parse_ports_node()
1228 priv->phy_mode[index] = phy_mode; in sja1105_parse_ports_node()
1240 struct device *dev = &priv->spidev->dev; in sja1105_parse_dt()
1241 struct device_node *switch_node = dev->of_node; in sja1105_parse_dt()
1247 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); in sja1105_parse_dt()
1250 return -ENODEV; in sja1105_parse_dt()
1271 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_set_port_speed()
1276 * the state->interface, but AN has not completed and the in sja1105_set_port_speed()
1279 * ok for power consumption in case AN will never complete - in sja1105_set_port_speed()
1282 speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_set_port_speed()
1285 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS]; in sja1105_set_port_speed()
1288 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS]; in sja1105_set_port_speed()
1291 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_set_port_speed()
1294 speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_set_port_speed()
1297 dev_err(priv->ds->dev, "Invalid speed %iMbps\n", speed_mbps); in sja1105_set_port_speed()
1298 return -EINVAL; in sja1105_set_port_speed()
1306 * we need to configure the PCS only (if even that). in sja1105_set_port_speed()
1308 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) in sja1105_set_port_speed()
1309 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; in sja1105_set_port_speed()
1310 else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_set_port_speed()
1311 speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; in sja1105_set_port_speed()
1324 struct device *dev = priv->ds->dev; in sja1105_set_port_config()
1333 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_set_port_config()
1349 if (!phy_interface_mode_is_rgmii(priv->phy_mode[port])) in sja1105_set_port_config()
1359 struct sja1105_private *priv = dp->ds->priv; in sja1105_mac_select_pcs()
1361 return priv->pcs[dp->index]; in sja1105_mac_select_pcs()
1376 sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true); in sja1105_mac_link_down()
1387 struct sja1105_private *priv = dp->ds->priv; in sja1105_mac_link_up()
1388 int port = dp->index; in sja1105_mac_link_up()
1399 struct sja1105_private *priv = ds->priv; in sja1105_phylink_get_caps()
1403 phy_mode = priv->phy_mode[port]; in sja1105_phylink_get_caps()
1408 * changes between SGMII and 2500base-X. in sja1105_phylink_get_caps()
1410 if (priv->info->supports_sgmii[port]) in sja1105_phylink_get_caps()
1412 config->supported_interfaces); in sja1105_phylink_get_caps()
1414 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1416 config->supported_interfaces); in sja1105_phylink_get_caps()
1422 __set_bit(phy_mode, config->supported_interfaces); in sja1105_phylink_get_caps()
1426 * support half-duplex traffic modes. in sja1105_phylink_get_caps()
1428 config->mac_capabilities = MAC_10FD | MAC_100FD; in sja1105_phylink_get_caps()
1430 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; in sja1105_phylink_get_caps()
1431 if (mii->xmii_mode[port] == XMII_MODE_RGMII || in sja1105_phylink_get_caps()
1432 mii->xmii_mode[port] == XMII_MODE_SGMII) in sja1105_phylink_get_caps()
1433 config->mac_capabilities |= MAC_1000FD; in sja1105_phylink_get_caps()
1435 if (priv->info->supports_2500basex[port]) in sja1105_phylink_get_caps()
1436 config->mac_capabilities |= MAC_2500FD; in sja1105_phylink_get_caps()
1447 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_find_static_fdb_entry()
1448 l2_lookup = table->entries; in sja1105_find_static_fdb_entry()
1450 for (i = 0; i < table->entry_count; i++) in sja1105_find_static_fdb_entry()
1451 if (l2_lookup[i].macaddr == requested->macaddr && in sja1105_find_static_fdb_entry()
1452 l2_lookup[i].vlanid == requested->vlanid && in sja1105_find_static_fdb_entry()
1456 return -1; in sja1105_find_static_fdb_entry()
1473 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_static_fdb_change()
1482 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_static_fdb_change()
1486 match = table->entry_count - 1; in sja1105_static_fdb_change()
1490 l2_lookup = table->entries; in sja1105_static_fdb_change()
1506 l2_lookup[match] = l2_lookup[table->entry_count - 1]; in sja1105_static_fdb_change()
1507 return sja1105_table_resize(table, table->entry_count - 1); in sja1105_static_fdb_change()
1510 /* First-generation switches have a 4-way set associative TCAM that
1550 return -1; in sja1105et_is_fdb_entry_in_bin()
1557 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_add()
1558 struct device *dev = ds->dev; in sja1105et_fdb_add()
1559 int last_unused = -1; in sja1105et_fdb_add()
1592 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly in sja1105et_fdb_add()
1619 if (rc == -ENOENT) in sja1105et_fdb_add()
1642 struct sja1105_private *priv = ds->priv; in sja1105et_fdb_del()
1677 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_add()
1683 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_add()
1691 if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { in sja1105pqrs_fdb_add()
1708 * This is slightly inefficient because the strategy is knock-knock at in sja1105pqrs_fdb_add()
1718 dev_err(ds->dev, "FDB is full, cannot add entry.\n"); in sja1105pqrs_fdb_add()
1719 return -EINVAL; in sja1105pqrs_fdb_add()
1748 dev_err(ds->dev, in sja1105pqrs_fdb_add()
1768 struct sja1105_private *priv = ds->priv; in sja1105pqrs_fdb_del()
1774 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); in sja1105pqrs_fdb_del()
1805 struct sja1105_private *priv = ds->priv; in sja1105_fdb_add()
1817 return -EOPNOTSUPP; in sja1105_fdb_add()
1821 mutex_lock(&priv->fdb_lock); in sja1105_fdb_add()
1822 rc = priv->info->fdb_add_cmd(ds, port, addr, vid); in sja1105_fdb_add()
1823 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_add()
1832 struct sja1105_private *priv = ds->priv; in __sja1105_fdb_del()
1843 return -EOPNOTSUPP; in __sja1105_fdb_del()
1847 return priv->info->fdb_del_cmd(ds, port, addr, vid); in __sja1105_fdb_del()
1854 struct sja1105_private *priv = ds->priv; in sja1105_fdb_del()
1857 mutex_lock(&priv->fdb_lock); in sja1105_fdb_del()
1859 mutex_unlock(&priv->fdb_lock); in sja1105_fdb_del()
1867 struct sja1105_private *priv = ds->priv; in sja1105_fdb_dump()
1868 struct device *dev = ds->dev; in sja1105_fdb_dump()
1879 if (rc == -ENOENT) in sja1105_fdb_dump()
1889 * 1024-sized FDB table needs to be traversed 4 times through in sja1105_fdb_dump()
1916 struct sja1105_private *priv = ds->priv; in sja1105_fast_age()
1926 mutex_lock(&priv->fdb_lock); in sja1105_fast_age()
1936 if (rc == -ENOENT) in sja1105_fast_age()
1939 dev_err(ds->dev, "Failed to read FDB: %pe\n", in sja1105_fast_age()
1955 dev_err(ds->dev, in sja1105_fast_age()
1962 mutex_unlock(&priv->fdb_lock); in sja1105_fast_age()
1969 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_add()
1976 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); in sja1105_mdb_del()
1988 struct dsa_switch *ds = priv->ds; in sja1105_manage_flood_domains()
1991 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_manage_flood_domains()
1993 for (from = 0; from < ds->num_ports; from++) { in sja1105_manage_flood_domains()
1996 for (to = 0; to < priv->ds->num_ports; to++) { in sja1105_manage_flood_domains()
2000 if (priv->ucast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2002 if (priv->bcast_egress_floods & BIT(to)) in sja1105_manage_flood_domains()
2027 struct sja1105_private *priv = ds->priv; in sja1105_bridge_member()
2030 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; in sja1105_bridge_member()
2032 for (i = 0; i < ds->num_ports; i++) { in sja1105_bridge_member()
2076 struct sja1105_private *priv = ds->priv; in sja1105_bridge_stp_state_set()
2079 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_bridge_stp_state_set()
2101 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2106 mac[port].dyn_learn = dp->learning; in sja1105_bridge_stp_state_set()
2109 dev_err(ds->dev, "invalid STP state: %d\n", state); in sja1105_bridge_stp_state_set()
2146 #define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
2153 if (priv->info->fixed_cbs_mapping) { in sja1105_find_cbs_shaper()
2155 if (i >= 0 && i < priv->info->num_cbs_shapers) in sja1105_find_cbs_shaper()
2158 return -1; in sja1105_find_cbs_shaper()
2161 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_cbs_shaper()
2162 if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) in sja1105_find_cbs_shaper()
2165 return -1; in sja1105_find_cbs_shaper()
2172 if (priv->info->fixed_cbs_mapping) in sja1105_find_unused_cbs_shaper()
2173 return -1; in sja1105_find_unused_cbs_shaper()
2175 for (i = 0; i < priv->info->num_cbs_shapers; i++) in sja1105_find_unused_cbs_shaper()
2176 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) in sja1105_find_unused_cbs_shaper()
2179 return -1; in sja1105_find_unused_cbs_shaper()
2187 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_delete_cbs_shaper()
2188 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_delete_cbs_shaper()
2190 if (cbs->port == port && cbs->prio == prio) { in sja1105_delete_cbs_shaper()
2203 struct sja1105_private *priv = ds->priv; in sja1105_setup_tc_cbs()
2208 if (!offload->enable) in sja1105_setup_tc_cbs()
2209 return sja1105_delete_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2212 index = sja1105_find_cbs_shaper(priv, port, offload->queue); in sja1105_setup_tc_cbs()
2214 /* That isn't the case - see if we can allocate a new one */ in sja1105_setup_tc_cbs()
2217 return -ENOSPC; in sja1105_setup_tc_cbs()
2220 cbs = &priv->cbs[index]; in sja1105_setup_tc_cbs()
2221 cbs->port = port; in sja1105_setup_tc_cbs()
2222 cbs->prio = offload->queue; in sja1105_setup_tc_cbs()
2226 cbs->credit_hi = offload->hicredit; in sja1105_setup_tc_cbs()
2227 cbs->credit_lo = abs(offload->locredit); in sja1105_setup_tc_cbs()
2229 * link speed. Since the given offload->sendslope is good only for the in sja1105_setup_tc_cbs()
2232 * but deduce the port transmit rate from idleslope - sendslope. in sja1105_setup_tc_cbs()
2234 port_transmit_rate_kbps = offload->idleslope - offload->sendslope; in sja1105_setup_tc_cbs()
2235 cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, in sja1105_setup_tc_cbs()
2237 cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), in sja1105_setup_tc_cbs()
2239 /* Convert the negative values from 64-bit 2's complement in sja1105_setup_tc_cbs()
2240 * to 32-bit 2's complement (for the case of 0x80000000 whose in sja1105_setup_tc_cbs()
2243 cbs->credit_lo &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2244 cbs->send_slope &= GENMASK_ULL(31, 0); in sja1105_setup_tc_cbs()
2257 if (!priv->cbs) in sja1105_reload_cbs()
2260 for (i = 0; i < priv->info->num_cbs_shapers; i++) { in sja1105_reload_cbs()
2261 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; in sja1105_reload_cbs()
2263 if (!cbs->idle_slope && !cbs->send_slope) in sja1105_reload_cbs()
2278 [SJA1105_SCHEDULING] = "Time-aware scheduling",
2279 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
2297 struct dsa_switch *ds = priv->ds; in sja1105_static_config_reload()
2303 mutex_lock(&priv->fdb_lock); in sja1105_static_config_reload()
2304 mutex_lock(&priv->mgmt_lock); in sja1105_static_config_reload()
2306 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_static_config_reload()
2309 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the in sja1105_static_config_reload()
2313 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2315 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; in sja1105_static_config_reload()
2317 if (priv->pcs[i]) in sja1105_static_config_reload()
2318 bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, in sja1105_static_config_reload()
2323 mutex_lock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2327 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2334 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2340 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2348 /* Mid point, corresponds to pre-reset PTPCLKVAL */ in sja1105_static_config_reload()
2349 t12 = t1 + (t2 - t1) / 2; in sja1105_static_config_reload()
2350 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ in sja1105_static_config_reload()
2351 t34 = t3 + (t4 - t3) / 2; in sja1105_static_config_reload()
2353 now += (t34 - t12); in sja1105_static_config_reload()
2357 mutex_unlock(&priv->ptp_data.lock); in sja1105_static_config_reload()
2359 dev_info(priv->ds->dev, in sja1105_static_config_reload()
2367 if (priv->info->clocking_setup) { in sja1105_static_config_reload()
2368 rc = priv->info->clocking_setup(priv); in sja1105_static_config_reload()
2373 for (i = 0; i < ds->num_ports; i++) { in sja1105_static_config_reload()
2374 struct phylink_pcs *pcs = priv->pcs[i]; in sja1105_static_config_reload() local
2382 if (!pcs) in sja1105_static_config_reload()
2390 rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i], in sja1105_static_config_reload()
2398 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX) in sja1105_static_config_reload()
2407 pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i], in sja1105_static_config_reload()
2416 mutex_unlock(&priv->mgmt_lock); in sja1105_static_config_reload()
2417 mutex_unlock(&priv->fdb_lock); in sja1105_static_config_reload()
2426 struct sja1105_private *priv = ds->priv; in sja1105_get_tag_protocol()
2428 return priv->info->tag_proto; in sja1105_get_tag_protocol()
2439 struct sja1105_private *priv = ds->priv; in sja1105_vlan_filtering()
2445 list_for_each_entry(rule, &priv->flow_block.rules, list) { in sja1105_vlan_filtering()
2446 if (rule->type == SJA1105_RULE_VL) { in sja1105_vlan_filtering()
2449 return -EBUSY; in sja1105_vlan_filtering()
2463 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_vlan_filtering()
2464 general_params = table->entries; in sja1105_vlan_filtering()
2465 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ in sja1105_vlan_filtering()
2466 general_params->tpid = tpid; in sja1105_vlan_filtering()
2467 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ in sja1105_vlan_filtering()
2468 general_params->tpid2 = tpid2; in sja1105_vlan_filtering()
2470 for (port = 0; port < ds->num_ports; port++) { in sja1105_vlan_filtering()
2493 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_add()
2497 rc = sja1105_table_resize(table, table->entry_count + 1); in sja1105_vlan_add()
2500 match = table->entry_count - 1; in sja1105_vlan_add()
2504 vlan = table->entries; in sja1105_vlan_add()
2531 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; in sja1105_vlan_del()
2539 vlan = table->entries; in sja1105_vlan_del()
2570 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_add()
2571 u16 flags = vlan->flags; in sja1105_bridge_vlan_add()
2576 if (vid_is_dsa_8021q(vlan->vid)) { in sja1105_bridge_vlan_add()
2578 "Range 3072-4095 reserved for dsa_8021q operation"); in sja1105_bridge_vlan_add()
2579 return -EBUSY; in sja1105_bridge_vlan_add()
2582 /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ in sja1105_bridge_vlan_add()
2586 rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); in sja1105_bridge_vlan_add()
2590 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) in sja1105_bridge_vlan_add()
2591 priv->bridge_pvid[port] = vlan->vid; in sja1105_bridge_vlan_add()
2599 struct sja1105_private *priv = ds->priv; in sja1105_bridge_vlan_del()
2602 rc = sja1105_vlan_del(priv, port, vlan->vid); in sja1105_bridge_vlan_del()
2615 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_add()
2630 priv->tag_8021q_pvid[port] = vid; in sja1105_dsa_8021q_vlan_add()
2637 struct sja1105_private *priv = ds->priv; in sja1105_dsa_8021q_vlan_del()
2645 struct netlink_ext_ack *extack = info->info.extack; in sja1105_prechangeupper()
2646 struct net_device *upper = info->upper_dev; in sja1105_prechangeupper()
2647 struct dsa_switch_tree *dst = ds->dst; in sja1105_prechangeupper()
2652 return -EBUSY; in sja1105_prechangeupper()
2656 list_for_each_entry(dp, &dst->ports, list) { in sja1105_prechangeupper()
2661 "Only one VLAN-aware bridge is supported"); in sja1105_prechangeupper()
2662 return -EBUSY; in sja1105_prechangeupper()
2674 struct sja1105_private *priv = ds->priv; in sja1105_mgmt_xmit()
2681 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); in sja1105_mgmt_xmit()
2695 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); in sja1105_mgmt_xmit()
2702 dev_err_ratelimited(priv->ds->dev, in sja1105_mgmt_xmit()
2712 } while (mgmt_route.enfport && --timeout); in sja1105_mgmt_xmit()
2715 /* Clean up the management route so that a follow-up in sja1105_mgmt_xmit()
2717 * This is only hardware supported on P/Q/R/S - on E/T it is in sja1105_mgmt_xmit()
2718 * a no-op and we are silently discarding the -EOPNOTSUPP. in sja1105_mgmt_xmit()
2722 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); in sja1105_mgmt_xmit()
2738 struct sk_buff *clone, *skb = xmit_work->skb; in sja1105_port_deferred_xmit()
2739 struct dsa_switch *ds = xmit_work->dp->ds; in sja1105_port_deferred_xmit()
2740 struct sja1105_private *priv = ds->priv; in sja1105_port_deferred_xmit()
2741 int port = xmit_work->dp->index; in sja1105_port_deferred_xmit()
2743 clone = SJA1105_SKB_CB(skb)->clone; in sja1105_port_deferred_xmit()
2745 mutex_lock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2753 mutex_unlock(&priv->mgmt_lock); in sja1105_port_deferred_xmit()
2761 struct sja1105_private *priv = ds->priv; in sja1105_connect_tag_protocol()
2764 if (proto != priv->info->tag_proto) in sja1105_connect_tag_protocol()
2765 return -EPROTONOSUPPORT; in sja1105_connect_tag_protocol()
2768 tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; in sja1105_connect_tag_protocol()
2769 tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; in sja1105_connect_tag_protocol()
2781 struct sja1105_private *priv = ds->priv; in sja1105_set_ageing_time()
2785 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; in sja1105_set_ageing_time()
2786 l2_lookup_params = table->entries; in sja1105_set_ageing_time()
2790 if (l2_lookup_params->maxage == maxage) in sja1105_set_ageing_time()
2793 l2_lookup_params->maxage = maxage; in sja1105_set_ageing_time()
2801 struct sja1105_private *priv = ds->priv; in sja1105_change_mtu()
2808 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_change_mtu()
2820 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN; in sja1105_get_max_mtu()
2833 return -EOPNOTSUPP; in sja1105_port_setup_tc()
2848 struct dsa_switch *ds = priv->ds; in sja1105_mirror_apply()
2854 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; in sja1105_mirror_apply()
2855 general_params = table->entries; in sja1105_mirror_apply()
2857 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_mirror_apply()
2859 already_enabled = (general_params->mirr_port != ds->num_ports); in sja1105_mirror_apply()
2860 if (already_enabled && enabled && general_params->mirr_port != to) { in sja1105_mirror_apply()
2861 dev_err(priv->ds->dev, in sja1105_mirror_apply()
2863 general_params->mirr_port); in sja1105_mirror_apply()
2864 return -EBUSY; in sja1105_mirror_apply()
2873 for (port = 0; port < ds->num_ports; port++) { in sja1105_mirror_apply()
2881 new_mirr_port = ds->num_ports; in sja1105_mirror_apply()
2883 if (new_mirr_port != general_params->mirr_port) { in sja1105_mirror_apply()
2884 general_params->mirr_port = new_mirr_port; in sja1105_mirror_apply()
2905 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_add()
2912 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, in sja1105_mirror_del()
2913 mirror->ingress, false); in sja1105_mirror_del()
2920 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_add()
2922 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_add()
2928 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, in sja1105_port_policer_add()
2930 policing[port].smax = policer->burst; in sja1105_port_policer_add()
2938 struct sja1105_private *priv = ds->priv; in sja1105_port_policer_del()
2940 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; in sja1105_port_policer_del()
2953 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; in sja1105_port_set_learning()
2966 priv->ucast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2968 priv->ucast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2973 priv->bcast_egress_floods |= BIT(to); in sja1105_port_ucast_bcast_flood()
2975 priv->bcast_egress_floods &= ~BIT(to); in sja1105_port_ucast_bcast_flood()
2989 mutex_lock(&priv->fdb_lock); in sja1105_port_mcast_flood()
2991 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; in sja1105_port_mcast_flood()
2992 l2_lookup = table->entries; in sja1105_port_mcast_flood()
2994 for (match = 0; match < table->entry_count; match++) in sja1105_port_mcast_flood()
2999 if (match == table->entry_count) { in sja1105_port_mcast_flood()
3002 rc = -ENOSPC; in sja1105_port_mcast_flood()
3015 mutex_unlock(&priv->fdb_lock); in sja1105_port_mcast_flood()
3024 struct sja1105_private *priv = ds->priv; in sja1105_port_pre_bridge_flags()
3028 return -EINVAL; in sja1105_port_pre_bridge_flags()
3031 !priv->info->can_limit_mcast_flood) { in sja1105_port_pre_bridge_flags()
3038 return -EINVAL; in sja1105_port_pre_bridge_flags()
3049 struct sja1105_private *priv = ds->priv; in sja1105_port_bridge_flags()
3070 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) { in sja1105_port_bridge_flags()
3080 /* The programming model for the SJA1105 switch is "all-at-once" via static
3094 struct sja1105_private *priv = ds->priv; in sja1105_setup()
3097 if (priv->info->disable_microcontroller) { in sja1105_setup()
3098 rc = priv->info->disable_microcontroller(priv); in sja1105_setup()
3100 dev_err(ds->dev, in sja1105_setup()
3110 dev_err(ds->dev, "Failed to load static config: %d\n", rc); in sja1105_setup()
3115 if (priv->info->clocking_setup) { in sja1105_setup()
3116 rc = priv->info->clocking_setup(priv); in sja1105_setup()
3118 dev_err(ds->dev, in sja1105_setup()
3130 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); in sja1105_setup()
3136 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", in sja1105_setup()
3159 ds->vlan_filtering_is_global = true; in sja1105_setup()
3160 ds->fdb_isolation = true; in sja1105_setup()
3161 ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; in sja1105_setup()
3164 ds->num_tx_queues = SJA1105_NUM_TC; in sja1105_setup()
3166 ds->mtu_enforcement_ingress = true; in sja1105_setup()
3167 ds->assisted_learning_on_cpu_port = true; in sja1105_setup()
3181 sja1105_static_config_free(&priv->static_config); in sja1105_setup()
3188 struct sja1105_private *priv = ds->priv; in sja1105_teardown()
3199 sja1105_static_config_free(&priv->static_config); in sja1105_teardown()
3258 const struct sja1105_regs *regs = priv->info->regs; in sja1105_check_device_id()
3260 struct device *dev = &priv->spidev->dev; in sja1105_check_device_id()
3266 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, in sja1105_check_device_id()
3271 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, in sja1105_check_device_id()
3278 for (match = sja1105_dt_ids; match->compatible[0]; match++) { in sja1105_check_device_id()
3279 const struct sja1105_info *info = match->data; in sja1105_check_device_id()
3282 if (info->device_id != device_id || info->part_no != part_no) in sja1105_check_device_id()
3286 if (priv->info->device_id != device_id || in sja1105_check_device_id()
3287 priv->info->part_no != part_no) { in sja1105_check_device_id()
3289 priv->info->name, info->name); in sja1105_check_device_id()
3291 priv->info = info; in sja1105_check_device_id()
3300 return -ENODEV; in sja1105_check_device_id()
3305 struct device *dev = &spi->dev; in sja1105_probe()
3311 if (!dev->of_node) { in sja1105_probe()
3313 return -EINVAL; in sja1105_probe()
3322 return -ENOMEM; in sja1105_probe()
3327 priv->spidev = spi; in sja1105_probe()
3331 spi->bits_per_word = 8; in sja1105_probe()
3351 /* We need to send at least one 64-bit word of SPI payload per message in sja1105_probe()
3356 return -EINVAL; in sja1105_probe()
3359 priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN; in sja1105_probe()
3360 if (priv->max_xfer_len > max_xfer) in sja1105_probe()
3361 priv->max_xfer_len = max_xfer; in sja1105_probe()
3362 if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER) in sja1105_probe()
3363 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER; in sja1105_probe()
3365 priv->info = of_device_get_match_data(dev); in sja1105_probe()
3374 dev_info(dev, "Probed switch chip: %s\n", priv->info->name); in sja1105_probe()
3378 return -ENOMEM; in sja1105_probe()
3380 ds->dev = dev; in sja1105_probe()
3381 ds->num_ports = priv->info->num_ports; in sja1105_probe()
3382 ds->ops = &sja1105_switch_ops; in sja1105_probe()
3383 ds->phylink_mac_ops = &sja1105_phylink_mac_ops; in sja1105_probe()
3384 ds->priv = priv; in sja1105_probe()
3385 priv->ds = ds; in sja1105_probe()
3387 mutex_init(&priv->ptp_data.lock); in sja1105_probe()
3388 mutex_init(&priv->dynamic_config_lock); in sja1105_probe()
3389 mutex_init(&priv->mgmt_lock); in sja1105_probe()
3390 mutex_init(&priv->fdb_lock); in sja1105_probe()
3391 spin_lock_init(&priv->ts_id_lock); in sja1105_probe()
3395 dev_err(ds->dev, "Failed to parse DT: %d\n", rc); in sja1105_probe()
3400 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, in sja1105_probe()
3403 if (!priv->cbs) in sja1105_probe()
3404 return -ENOMEM; in sja1105_probe()
3407 return dsa_register_switch(priv->ds); in sja1105_probe()
3417 dsa_unregister_switch(priv->ds); in sja1105_remove()
3427 dsa_switch_shutdown(priv->ds); in sja1105_shutdown()
3476 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");