Lines Matching +full:rx +full:- +full:pcs +full:- +full:m
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
30 /* Caller must hold &ocelot->mact_lock */
36 /* Caller must hold &ocelot->mact_lock */
48 /* Caller must hold &ocelot->mact_lock */
90 if (mc_ports & BIT(ocelot->num_phys_ports)) in __ocelot_mact_learn()
109 mutex_lock(&ocelot->mact_lock); in ocelot_mact_learn()
111 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_learn()
122 mutex_lock(&ocelot->mact_lock); in ocelot_mact_forget()
133 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_forget()
145 mutex_lock(&ocelot->mact_lock); in ocelot_mact_lookup()
155 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_lookup()
156 return -ETIMEDOUT; in ocelot_mact_lookup()
162 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_lookup()
165 return -ENOENT; in ocelot_mact_lookup()
182 mutex_lock(&ocelot->mact_lock); in ocelot_mact_learn_streamdata()
193 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_learn_streamdata()
202 * - Do not copy the frame to the CPU extraction queues. in ocelot_mact_init()
203 * - Use the vlan and mac_cpoy for dmac lookup. in ocelot_mact_init()
212 * holding &ocelot->mact_lock is pointless. in ocelot_mact_init()
222 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, in ocelot_pll5_init()
225 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, in ocelot_pll5_init()
237 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, in ocelot_pll5_init()
267 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_single_vlan_aware_bridge()
268 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_single_vlan_aware_bridge()
270 if (!ocelot_port || !ocelot_port->bridge || in ocelot_single_vlan_aware_bridge()
271 !br_vlan_enabled(ocelot_port->bridge)) in ocelot_single_vlan_aware_bridge()
275 bridge = ocelot_port->bridge; in ocelot_single_vlan_aware_bridge()
279 if (bridge == ocelot_port->bridge) in ocelot_single_vlan_aware_bridge()
283 "Only one VLAN-aware bridge is supported"); in ocelot_single_vlan_aware_bridge()
284 return -EBUSY; in ocelot_single_vlan_aware_bridge()
325 list_for_each_entry(vlan, &ocelot->vlans, list) { in ocelot_port_num_untagged_vlans()
326 if (!(vlan->portmask & BIT(port))) in ocelot_port_num_untagged_vlans()
331 * the bridge VLANs, which only matter in VLAN-aware mode. in ocelot_port_num_untagged_vlans()
333 if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START) in ocelot_port_num_untagged_vlans()
336 if (vlan->untagged & BIT(port)) in ocelot_port_num_untagged_vlans()
348 list_for_each_entry(vlan, &ocelot->vlans, list) { in ocelot_port_num_tagged_vlans()
349 if (!(vlan->portmask & BIT(port))) in ocelot_port_num_tagged_vlans()
352 if (!(vlan->untagged & BIT(port))) in ocelot_port_num_tagged_vlans()
359 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly
360 * _one_ egress-untagged VLAN (_the_ native VLAN)
373 list_for_each_entry(vlan, &ocelot->vlans, list) in ocelot_port_find_native_vlan()
374 if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) in ocelot_port_find_native_vlan()
386 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_manage_port_tag()
390 if (ocelot_port->vlan_aware) { in ocelot_port_manage_port_tag()
417 REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), in ocelot_port_manage_port_tag()
428 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_bridge_num_find()
429 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_bridge_num_find()
431 if (ocelot_port && ocelot_port->bridge == bridge) in ocelot_bridge_num_find()
432 return ocelot_port->bridge_num; in ocelot_bridge_num_find()
435 return -1; in ocelot_bridge_num_find()
452 /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */ in ocelot_vlan_unaware_pvid()
453 return VLAN_N_VID - bridge_num - 1; in ocelot_vlan_unaware_pvid()
457 * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
462 * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
463 * component conformance" suggest that a C-VLAN component should only recognize
464 * and filter on C-Tags, and an S-VLAN component should only recognize and
465 * process based on C-Tags.
467 * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
469 * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
472 * design is non-conformant, because the switch assigns each frame to a VLAN
476 * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
492 * In the VLAN Table, the TAG_TYPE information is not accessible - just the
493 * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
494 * C-VLAN X, and S-VLAN X.
497 * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
501 * should be treated as 802.1Q-untagged, and classified to the PVID of that
510 * if those packets were processed as VLAN-untagged.
514 * VLAN-unaware.
519 struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1]; in ocelot_update_vlan_reclassify_rule()
520 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_update_vlan_reclassify_rule()
527 pvid_vlan = ocelot_port->pvid_vlan; in ocelot_update_vlan_reclassify_rule()
528 vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan; in ocelot_update_vlan_reclassify_rule()
547 /* Treating as VLAN-untagged means using as classified VID equal to in ocelot_update_vlan_reclassify_rule()
550 vid = pvid_vlan->vid; in ocelot_update_vlan_reclassify_rule()
559 if (filter->action.vid != vid) { in ocelot_update_vlan_reclassify_rule()
560 filter->action.vid = vid; in ocelot_update_vlan_reclassify_rule()
563 if (filter->action.pcp != pcp) { in ocelot_update_vlan_reclassify_rule()
564 filter->action.pcp = pcp; in ocelot_update_vlan_reclassify_rule()
567 if (filter->action.dei != dei) { in ocelot_update_vlan_reclassify_rule()
568 filter->action.dei = dei; in ocelot_update_vlan_reclassify_rule()
581 return -ENOMEM; in ocelot_update_vlan_reclassify_rule()
583 filter->key_type = OCELOT_VCAP_KEY_ANY; in ocelot_update_vlan_reclassify_rule()
584 filter->ingress_port_mask = BIT(port); in ocelot_update_vlan_reclassify_rule()
585 filter->vlan.tpid = OCELOT_VCAP_BIT_1; in ocelot_update_vlan_reclassify_rule()
586 filter->prio = 1; in ocelot_update_vlan_reclassify_rule()
587 filter->id.cookie = cookie; in ocelot_update_vlan_reclassify_rule()
588 filter->id.tc_offload = false; in ocelot_update_vlan_reclassify_rule()
589 filter->block_id = VCAP_IS1; in ocelot_update_vlan_reclassify_rule()
590 filter->type = OCELOT_VCAP_FILTER_OFFLOAD; in ocelot_update_vlan_reclassify_rule()
591 filter->lookup = 0; in ocelot_update_vlan_reclassify_rule()
592 filter->action.vid_replace_ena = true; in ocelot_update_vlan_reclassify_rule()
593 filter->action.pcp_dei_ena = true; in ocelot_update_vlan_reclassify_rule()
594 filter->action.vid = vid; in ocelot_update_vlan_reclassify_rule()
595 filter->action.pcp = pcp; in ocelot_update_vlan_reclassify_rule()
596 filter->action.dei = dei; in ocelot_update_vlan_reclassify_rule()
609 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_pvid()
610 u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); in ocelot_port_set_pvid()
613 ocelot_port->pvid_vlan = pvid_vlan; in ocelot_port_set_pvid()
615 if (ocelot_port->vlan_aware && pvid_vlan) in ocelot_port_set_pvid()
616 pvid = pvid_vlan->vid; in ocelot_port_set_pvid()
625 * classified to VLAN 0, but that is always in our RX filter, so it in ocelot_port_set_pvid()
629 * 802.1ad-tagged frames (carrying S-Tags) should be considered in ocelot_port_set_pvid()
630 * 802.1Q-untagged, and also dropped. in ocelot_port_set_pvid()
632 if (!pvid_vlan && ocelot_port->vlan_aware) in ocelot_port_set_pvid()
651 list_for_each_entry(vlan, &ocelot->vlans, list) in ocelot_bridge_vlan_find()
652 if (vlan->vid == vid) in ocelot_bridge_vlan_find()
666 portmask = vlan->portmask | BIT(port); in ocelot_vlan_member_add()
672 vlan->portmask = portmask; in ocelot_vlan_member_add()
674 * egress-tagging setting, so make sure to override an untagged in ocelot_vlan_member_add()
678 vlan->untagged |= BIT(port); in ocelot_vlan_member_add()
680 vlan->untagged &= ~BIT(port); in ocelot_vlan_member_add()
687 return -ENOMEM; in ocelot_vlan_member_add()
697 vlan->vid = vid; in ocelot_vlan_member_add()
698 vlan->portmask = portmask; in ocelot_vlan_member_add()
700 vlan->untagged = BIT(port); in ocelot_vlan_member_add()
701 INIT_LIST_HEAD(&vlan->list); in ocelot_vlan_member_add()
702 list_add_tail(&vlan->list, &ocelot->vlans); in ocelot_vlan_member_add()
716 portmask = vlan->portmask & ~BIT(port); in ocelot_vlan_member_del()
722 vlan->portmask = portmask; in ocelot_vlan_member_del()
723 if (vlan->portmask) in ocelot_vlan_member_del()
726 list_del(&vlan->list); in ocelot_vlan_member_del()
751 struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; in ocelot_port_vlan_filtering()
752 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_vlan_filtering()
757 list_for_each_entry(filter, &block->rules, list) { in ocelot_port_vlan_filtering()
758 if (filter->ingress_port_mask & BIT(port) && in ocelot_port_vlan_filtering()
759 filter->action.vid_replace_ena) { in ocelot_port_vlan_filtering()
762 return -EBUSY; in ocelot_port_vlan_filtering()
772 ocelot_port->bridge); in ocelot_port_vlan_filtering()
773 else if (ocelot_port->bridge) in ocelot_port_vlan_filtering()
775 ocelot_port->bridge); in ocelot_port_vlan_filtering()
779 ocelot_port->vlan_aware = vlan_aware; in ocelot_port_vlan_filtering()
791 err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); in ocelot_port_vlan_filtering()
805 /* We are adding an egress-tagged VLAN */ in ocelot_vlan_prepare()
808 "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); in ocelot_vlan_prepare()
809 return -EBUSY; in ocelot_vlan_prepare()
812 /* We are adding an egress-tagged VLAN */ in ocelot_vlan_prepare()
815 "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); in ocelot_vlan_prepare()
816 return -EBUSY; in ocelot_vlan_prepare()
822 "VLAN range 4000-4095 reserved for VLAN-unaware bridging"); in ocelot_vlan_prepare()
823 return -EBUSY; in ocelot_vlan_prepare()
833 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_vlan_add()
836 /* Ignore VID 0 added to our RX filter by the 8021q module, since in ocelot_vlan_add()
838 * egress-untagged to egress-tagged. in ocelot_vlan_add()
853 } else if (ocelot_port->pvid_vlan && in ocelot_vlan_add()
854 ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { in ocelot_vlan_add()
869 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_vlan_del()
876 if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) in ocelot_vlan_del()
899 unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); in ocelot_vlan_init()
922 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_vlan_init()
953 * Worst-case delays for 10 kilobyte jumbo frames are: in ocelot_port_flush()
954 * 8 ms on a 10M port in ocelot_port_flush()
955 * 800 μs on a 100M port in ocelot_port_flush()
980 /* Re-enable flow control */ in ocelot_port_flush()
989 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_configure_serdes()
990 struct device *dev = ocelot->dev; in ocelot_port_configure_serdes()
994 if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_QSGMII) in ocelot_port_configure_serdes()
1000 if (ocelot_port->phy_mode != PHY_INTERFACE_MODE_INTERNAL) { in ocelot_port_configure_serdes()
1012 ocelot_port->phy_mode); in ocelot_port_configure_serdes()
1029 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_config()
1040 /* Enable PCS */ in ocelot_phylink_mac_config()
1056 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_link_down()
1059 ocelot_port->speed = SPEED_UNKNOWN; in ocelot_phylink_mac_link_down()
1064 if (ocelot->ops->cut_through_fwd) { in ocelot_phylink_mac_link_down()
1065 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_down()
1066 ocelot->ops->cut_through_fwd(ocelot); in ocelot_phylink_mac_link_down()
1067 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_down()
1074 dev_err(ocelot->dev, "failed to flush port %d: %d\n", in ocelot_phylink_mac_link_down()
1097 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_link_up()
1101 ocelot_port->speed = speed; in ocelot_phylink_mac_link_up()
1104 * and it's the PCS who is performing the rate adaptation, so we have in ocelot_phylink_mac_link_up()
1144 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", in ocelot_phylink_mac_link_up()
1166 if (port != ocelot->npi) in ocelot_phylink_mac_link_up()
1176 /* If the port supports cut-through forwarding, update the masks before in ocelot_phylink_mac_link_up()
1179 if (ocelot->ops->cut_through_fwd) { in ocelot_phylink_mac_link_up()
1180 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_up()
1181 /* Workaround for hardware bug - FP doesn't work in ocelot_phylink_mac_link_up()
1183 * below also calls ocelot->ops->cut_through_fwd(), in ocelot_phylink_mac_link_up()
1187 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_up()
1204 return -EIO; in ocelot_rx_frame_word()
1213 return -EIO; in ocelot_rx_frame_word()
1245 return (err < 0) ? err : -EIO; in ocelot_xtr_poll_xfh()
1258 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); in ocelot_ptp_rx_timestamp()
1262 full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | in ocelot_ptp_rx_timestamp()
1270 shhwtstamps->hwtstamp = full_ts_in_ns; in ocelot_ptp_rx_timestamp()
1275 __acquires(&ocelot->inj_lock) in ocelot_lock_inj_grp()
1277 spin_lock(&ocelot->inj_lock); in ocelot_lock_inj_grp()
1282 __releases(&ocelot->inj_lock) in ocelot_unlock_inj_grp()
1284 spin_unlock(&ocelot->inj_lock); in ocelot_unlock_inj_grp()
1289 __acquires(&ocelot->inj_lock) in ocelot_lock_xtr_grp()
1291 spin_lock(&ocelot->inj_lock); in ocelot_lock_xtr_grp()
1296 __releases(&ocelot->inj_lock) in ocelot_unlock_xtr_grp()
1298 spin_unlock(&ocelot->inj_lock); in ocelot_unlock_xtr_grp()
1303 __acquires(&ocelot->xtr_lock) in ocelot_lock_xtr_grp_bh()
1305 spin_lock_bh(&ocelot->xtr_lock); in ocelot_lock_xtr_grp_bh()
1310 __releases(&ocelot->xtr_lock) in ocelot_unlock_xtr_grp_bh()
1312 spin_unlock_bh(&ocelot->xtr_lock); in ocelot_unlock_xtr_grp_bh()
1326 lockdep_assert_held(&ocelot->xtr_lock); in ocelot_xtr_poll_frame()
1336 if (WARN_ON(src_port >= ocelot->num_phys_ports)) in ocelot_xtr_poll_frame()
1337 return -EINVAL; in ocelot_xtr_poll_frame()
1339 dev = ocelot->ops->port_to_netdev(ocelot, src_port); in ocelot_xtr_poll_frame()
1341 return -EINVAL; in ocelot_xtr_poll_frame()
1346 return -ENOMEM; in ocelot_xtr_poll_frame()
1349 buf_len = len - ETH_FCS_LEN; in ocelot_xtr_poll_frame()
1371 len -= ETH_FCS_LEN - sz; in ocelot_xtr_poll_frame()
1373 if (unlikely(dev->features & NETIF_F_RXFCS)) { in ocelot_xtr_poll_frame()
1378 if (ocelot->ptp) in ocelot_xtr_poll_frame()
1384 if (ocelot->ports[src_port]->bridge) in ocelot_xtr_poll_frame()
1385 skb->offload_fwd_mark = 1; in ocelot_xtr_poll_frame()
1387 skb->protocol = eth_type_trans(skb, dev); in ocelot_xtr_poll_frame()
1403 lockdep_assert_held(&ocelot->inj_lock); in ocelot_can_inject()
1415 * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
1428 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_ifh_set_basic()
1429 struct net_device *dev = skb->dev; in ocelot_ifh_set_basic()
1433 ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci, in ocelot_ifh_set_basic()
1437 netdev_get_prio_tc_map(dev, skb->priority) : skb->priority; in ocelot_ifh_set_basic()
1441 ocelot_ifh_set_src(ifh, ocelot->num_phys_ports); in ocelot_ifh_set_basic()
1457 lockdep_assert_held(&ocelot->inj_lock); in ocelot_port_inject_frame()
1467 count = DIV_ROUND_UP(skb->len, 4); in ocelot_port_inject_frame()
1468 last = skb->len % 4; in ocelot_port_inject_frame()
1470 ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); in ocelot_port_inject_frame()
1480 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | in ocelot_port_inject_frame()
1488 skb->dev->stats.tx_packets++; in ocelot_port_inject_frame()
1489 skb->dev->stats.tx_bytes += skb->len; in ocelot_port_inject_frame()
1495 lockdep_assert_held(&ocelot->xtr_lock); in ocelot_drain_cpu_queue()
1522 /* Caller must hold &ocelot->mact_lock */
1539 return -ETIMEDOUT; in ocelot_mact_read()
1544 return -EINVAL; in ocelot_mact_read()
1551 return -EINVAL; in ocelot_mact_read()
1564 entry->vid = (mach >> 16) & 0xfff; in ocelot_mact_read()
1565 ether_addr_copy(entry->mac, mac); in ocelot_mact_read()
1574 mutex_lock(&ocelot->mact_lock); in ocelot_mact_flush()
1587 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_flush()
1601 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_flush()
1616 mutex_lock(&ocelot->mact_lock); in ocelot_fdb_dump()
1619 for (i = 0; i < ocelot->num_mact_rows; i++) { in ocelot_fdb_dump()
1628 if (err == -EINVAL) in ocelot_fdb_dump()
1636 * VLAN-unaware bridging. in ocelot_fdb_dump()
1647 mutex_unlock(&ocelot->mact_lock); in ocelot_fdb_dump()
1662 block_vcap_is2 = &ocelot->block[VCAP_IS2]; in ocelot_trap_add()
1669 return -ENOMEM; in ocelot_trap_add()
1672 trap->prio = 1; in ocelot_trap_add()
1673 trap->id.cookie = cookie; in ocelot_trap_add()
1674 trap->id.tc_offload = false; in ocelot_trap_add()
1675 trap->block_id = VCAP_IS2; in ocelot_trap_add()
1676 trap->type = OCELOT_VCAP_FILTER_OFFLOAD; in ocelot_trap_add()
1677 trap->lookup = 0; in ocelot_trap_add()
1678 trap->action.cpu_copy_ena = true; in ocelot_trap_add()
1679 trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; in ocelot_trap_add()
1680 trap->action.port_mask = 0; in ocelot_trap_add()
1681 trap->take_ts = take_ts; in ocelot_trap_add()
1682 trap->is_trap = true; in ocelot_trap_add()
1686 trap->ingress_port_mask |= BIT(port); in ocelot_trap_add()
1693 trap->ingress_port_mask &= ~BIT(port); in ocelot_trap_add()
1694 if (!trap->ingress_port_mask) in ocelot_trap_add()
1707 block_vcap_is2 = &ocelot->block[VCAP_IS2]; in ocelot_trap_del()
1714 trap->ingress_port_mask &= ~BIT(port); in ocelot_trap_del()
1715 if (!trap->ingress_port_mask) in ocelot_trap_del()
1726 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_get_bond_mask()
1728 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_get_bond_mask()
1729 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_get_bond_mask()
1734 if (ocelot_port->bond == bond) in ocelot_get_bond_mask()
1749 return -ENOENT; in ocelot_bond_get_id()
1771 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_dsa_8021q_cpu_assigned_ports()
1772 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_dsa_8021q_cpu_assigned_ports()
1777 if (ocelot_port->dsa_8021q_cpu == cpu) in ocelot_dsa_8021q_cpu_assigned_ports()
1781 if (cpu->bond) in ocelot_dsa_8021q_cpu_assigned_ports()
1782 mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond); in ocelot_dsa_8021q_cpu_assigned_ports()
1792 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_assigned_dsa_8021q_cpu_mask()
1793 struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu; in ocelot_port_assigned_dsa_8021q_cpu_mask()
1798 if (cpu_port->bond) in ocelot_port_assigned_dsa_8021q_cpu_mask()
1799 return ocelot_get_bond_mask(ocelot, cpu_port->bond); in ocelot_port_assigned_dsa_8021q_cpu_mask()
1801 return BIT(cpu_port->index); in ocelot_port_assigned_dsa_8021q_cpu_mask()
1807 struct ocelot_port *ocelot_port = ocelot->ports[src_port]; in ocelot_get_bridge_fwd_mask()
1812 if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) in ocelot_get_bridge_fwd_mask()
1815 bridge = ocelot_port->bridge; in ocelot_get_bridge_fwd_mask()
1819 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_get_bridge_fwd_mask()
1820 ocelot_port = ocelot->ports[port]; in ocelot_get_bridge_fwd_mask()
1825 if (ocelot_port->stp_state == BR_STATE_FORWARDING && in ocelot_get_bridge_fwd_mask()
1826 ocelot_port->bridge == bridge) in ocelot_get_bridge_fwd_mask()
1838 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_apply_bridge_fwd_mask()
1840 /* If cut-through forwarding is supported, update the masks before a in ocelot_apply_bridge_fwd_mask()
1844 if (joining && ocelot->ops->cut_through_fwd) in ocelot_apply_bridge_fwd_mask()
1845 ocelot->ops->cut_through_fwd(ocelot); in ocelot_apply_bridge_fwd_mask()
1850 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_apply_bridge_fwd_mask()
1851 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_apply_bridge_fwd_mask()
1857 } else if (ocelot_port->is_dsa_8021q_cpu) { in ocelot_apply_bridge_fwd_mask()
1863 } else if (ocelot_port->bridge) { in ocelot_apply_bridge_fwd_mask()
1864 struct net_device *bond = ocelot_port->bond; in ocelot_apply_bridge_fwd_mask()
1886 /* If cut-through forwarding is supported and a port is leaving, there in ocelot_apply_bridge_fwd_mask()
1887 * is a chance that cut-through was disabled on the other ports due to in ocelot_apply_bridge_fwd_mask()
1889 * update the cut-through masks of the remaining ports no earlier than in ocelot_apply_bridge_fwd_mask()
1891 * the cut-through update and the forwarding domain update. in ocelot_apply_bridge_fwd_mask()
1893 if (!joining && ocelot->ops->cut_through_fwd) in ocelot_apply_bridge_fwd_mask()
1894 ocelot->ops->cut_through_fwd(ocelot); in ocelot_apply_bridge_fwd_mask()
1909 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_update_pgid_cpu()
1910 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_update_pgid_cpu()
1912 if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu) in ocelot_update_pgid_cpu()
1919 pgid_cpu = BIT(ocelot->num_phys_ports); in ocelot_update_pgid_cpu()
1926 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_setup_dsa_8021q_cpu()
1929 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_setup_dsa_8021q_cpu()
1931 cpu_port->is_dsa_8021q_cpu = true; in ocelot_port_setup_dsa_8021q_cpu()
1938 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_setup_dsa_8021q_cpu()
1944 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_teardown_dsa_8021q_cpu()
1947 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_teardown_dsa_8021q_cpu()
1949 cpu_port->is_dsa_8021q_cpu = false; in ocelot_port_teardown_dsa_8021q_cpu()
1952 ocelot_vlan_member_del(ocelot, cpu_port->index, vid); in ocelot_port_teardown_dsa_8021q_cpu()
1956 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_teardown_dsa_8021q_cpu()
1963 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_assign_dsa_8021q_cpu()
1965 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_assign_dsa_8021q_cpu()
1967 ocelot->ports[port]->dsa_8021q_cpu = cpu_port; in ocelot_port_assign_dsa_8021q_cpu()
1970 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_assign_dsa_8021q_cpu()
1976 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_unassign_dsa_8021q_cpu()
1978 ocelot->ports[port]->dsa_8021q_cpu = NULL; in ocelot_port_unassign_dsa_8021q_cpu()
1981 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_unassign_dsa_8021q_cpu()
1987 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_bridge_stp_state_set()
1990 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_bridge_stp_state_set()
1992 ocelot_port->stp_state = state; in ocelot_bridge_stp_state_set()
1995 ocelot_port->learn_ena) in ocelot_bridge_stp_state_set()
2003 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_bridge_stp_state_set()
2027 list_for_each_entry(mc, &ocelot->multicast, list) { in ocelot_multicast_get()
2028 if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) in ocelot_multicast_get()
2051 return ERR_PTR(-ENOMEM); in ocelot_pgid_alloc()
2053 pgid->ports = ports; in ocelot_pgid_alloc()
2054 pgid->index = index; in ocelot_pgid_alloc()
2055 refcount_set(&pgid->refcount, 1); in ocelot_pgid_alloc()
2056 list_add_tail(&pgid->list, &ocelot->pgids); in ocelot_pgid_alloc()
2063 if (!refcount_dec_and_test(&pgid->refcount)) in ocelot_pgid_free()
2066 list_del(&pgid->list); in ocelot_pgid_free()
2081 if (mc->entry_type == ENTRYTYPE_MACv4 || in ocelot_mdb_get_pgid()
2082 mc->entry_type == ENTRYTYPE_MACv6) in ocelot_mdb_get_pgid()
2083 return ocelot_pgid_alloc(ocelot, 0, mc->ports); in ocelot_mdb_get_pgid()
2085 list_for_each_entry(pgid, &ocelot->pgids, list) { in ocelot_mdb_get_pgid()
2089 if (pgid->index && pgid->ports == mc->ports) { in ocelot_mdb_get_pgid()
2090 refcount_inc(&pgid->refcount); in ocelot_mdb_get_pgid()
2099 list_for_each_entry(pgid, &ocelot->pgids, list) { in ocelot_mdb_get_pgid()
2100 if (pgid->index == index) { in ocelot_mdb_get_pgid()
2107 return ocelot_pgid_alloc(ocelot, index, mc->ports); in ocelot_mdb_get_pgid()
2110 return ERR_PTR(-ENOSPC); in ocelot_mdb_get_pgid()
2116 ether_addr_copy(addr, mc->addr); in ocelot_encode_ports_to_mdb()
2118 if (mc->entry_type == ENTRYTYPE_MACv4) { in ocelot_encode_ports_to_mdb()
2120 addr[1] = mc->ports >> 8; in ocelot_encode_ports_to_mdb()
2121 addr[2] = mc->ports & 0xff; in ocelot_encode_ports_to_mdb()
2122 } else if (mc->entry_type == ENTRYTYPE_MACv6) { in ocelot_encode_ports_to_mdb()
2123 addr[0] = mc->ports >> 8; in ocelot_encode_ports_to_mdb()
2124 addr[1] = mc->ports & 0xff; in ocelot_encode_ports_to_mdb()
2135 u16 vid = mdb->vid; in ocelot_port_mdb_add()
2140 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); in ocelot_port_mdb_add()
2143 mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); in ocelot_port_mdb_add()
2145 return -ENOMEM; in ocelot_port_mdb_add()
2147 mc->entry_type = ocelot_classify_mdb(mdb->addr); in ocelot_port_mdb_add()
2148 ether_addr_copy(mc->addr, mdb->addr); in ocelot_port_mdb_add()
2149 mc->vid = vid; in ocelot_port_mdb_add()
2151 list_add_tail(&mc->list, &ocelot->multicast); in ocelot_port_mdb_add()
2156 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_port_mdb_add()
2161 mc->ports |= BIT(port); in ocelot_port_mdb_add()
2165 dev_err(ocelot->dev, in ocelot_port_mdb_add()
2167 mc->addr, mc->vid); in ocelot_port_mdb_add()
2168 devm_kfree(ocelot->dev, mc); in ocelot_port_mdb_add()
2171 mc->pgid = pgid; in ocelot_port_mdb_add()
2175 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_port_mdb_add()
2176 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_port_mdb_add()
2177 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_port_mdb_add()
2178 pgid->index); in ocelot_port_mdb_add()
2180 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_port_mdb_add()
2181 mc->entry_type); in ocelot_port_mdb_add()
2192 u16 vid = mdb->vid; in ocelot_port_mdb_del()
2197 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); in ocelot_port_mdb_del()
2199 return -ENOENT; in ocelot_port_mdb_del()
2204 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_port_mdb_del()
2205 mc->ports &= ~BIT(port); in ocelot_port_mdb_del()
2206 if (!mc->ports) { in ocelot_port_mdb_del()
2207 list_del(&mc->list); in ocelot_port_mdb_del()
2208 devm_kfree(ocelot->dev, mc); in ocelot_port_mdb_del()
2216 mc->pgid = pgid; in ocelot_port_mdb_del()
2220 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_port_mdb_del()
2221 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_port_mdb_del()
2222 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_port_mdb_del()
2223 pgid->index); in ocelot_port_mdb_del()
2225 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_port_mdb_del()
2226 mc->entry_type); in ocelot_port_mdb_del()
2234 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_bridge_join()
2241 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_join()
2243 ocelot_port->bridge = bridge; in ocelot_port_bridge_join()
2244 ocelot_port->bridge_num = bridge_num; in ocelot_port_bridge_join()
2248 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_join()
2260 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_bridge_leave()
2262 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_leave()
2267 ocelot_port->bridge = NULL; in ocelot_port_bridge_leave()
2268 ocelot_port->bridge_num = -1; in ocelot_port_bridge_leave()
2274 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_leave()
2280 unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); in ocelot_set_aggr_pgids()
2288 ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), in ocelot_set_aggr_pgids()
2299 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_set_aggr_pgids()
2300 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2302 if (!ocelot_port || !ocelot_port->bond) in ocelot_set_aggr_pgids()
2309 for (lag = 0; lag < ocelot->num_phys_ports; lag++) { in ocelot_set_aggr_pgids()
2310 struct net_device *bond = ocelot->ports[lag]->bond; in ocelot_set_aggr_pgids()
2320 for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { in ocelot_set_aggr_pgids()
2321 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2327 if (ocelot_port->lag_tx_active) in ocelot_set_aggr_pgids()
2347 for (port = lag; port < ocelot->num_phys_ports; port++) { in ocelot_set_aggr_pgids()
2348 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2353 if (ocelot_port->bond == bond) in ocelot_set_aggr_pgids()
2368 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_setup_logical_port_ids()
2369 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_setup_logical_port_ids()
2375 bond = ocelot_port->bond; in ocelot_setup_logical_port_ids()
2397 u16 vid = mc->vid; in ocelot_migrate_mc()
2399 dev_dbg(ocelot->dev, in ocelot_migrate_mc()
2401 mc->addr, mc->vid, from_mask, to_mask); in ocelot_migrate_mc()
2406 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_migrate_mc()
2410 mc->ports &= ~from_mask; in ocelot_migrate_mc()
2411 mc->ports |= to_mask; in ocelot_migrate_mc()
2415 dev_err(ocelot->dev, in ocelot_migrate_mc()
2417 mc->addr, mc->vid); in ocelot_migrate_mc()
2418 devm_kfree(ocelot->dev, mc); in ocelot_migrate_mc()
2421 mc->pgid = pgid; in ocelot_migrate_mc()
2425 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_migrate_mc()
2426 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_migrate_mc()
2427 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_migrate_mc()
2428 pgid->index); in ocelot_migrate_mc()
2430 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_migrate_mc()
2431 mc->entry_type); in ocelot_migrate_mc()
2440 list_for_each_entry(mc, &ocelot->multicast, list) { in ocelot_migrate_mdbs()
2441 if (!(mc->ports & from_mask)) in ocelot_migrate_mdbs()
2470 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_migrate_lag_fdbs()
2472 list_for_each_entry(fdb, &ocelot->lag_fdbs, list) { in ocelot_migrate_lag_fdbs()
2473 if (fdb->bond != bond) in ocelot_migrate_lag_fdbs()
2476 err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid); in ocelot_migrate_lag_fdbs()
2478 dev_err(ocelot->dev, in ocelot_migrate_lag_fdbs()
2480 bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); in ocelot_migrate_lag_fdbs()
2483 err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid, in ocelot_migrate_lag_fdbs()
2486 dev_err(ocelot->dev, in ocelot_migrate_lag_fdbs()
2488 bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); in ocelot_migrate_lag_fdbs()
2498 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { in ocelot_port_lag_join()
2501 return -EOPNOTSUPP; in ocelot_port_lag_join()
2504 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_join()
2506 ocelot->ports[port]->bond = bond; in ocelot_port_lag_join()
2512 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_join()
2523 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_leave()
2527 ocelot->ports[port]->bond = NULL; in ocelot_port_lag_leave()
2538 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_leave()
2544 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_lag_change()
2546 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_change()
2548 ocelot_port->lag_tx_active = lag_tx_active; in ocelot_port_lag_change()
2553 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_change()
2566 return -ENOMEM; in ocelot_lag_fdb_add()
2568 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2573 ether_addr_copy(fdb->addr, addr); in ocelot_lag_fdb_add()
2574 fdb->vid = vid; in ocelot_lag_fdb_add()
2575 fdb->bond = bond; in ocelot_lag_fdb_add()
2581 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2586 list_add_tail(&fdb->list, &ocelot->lag_fdbs); in ocelot_lag_fdb_add()
2587 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2599 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2604 list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) { in ocelot_lag_fdb_del()
2605 if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid || in ocelot_lag_fdb_del()
2606 fdb->bond != bond) in ocelot_lag_fdb_del()
2610 list_del(&fdb->list); in ocelot_lag_fdb_del()
2611 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2617 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2619 return -ENOENT; in ocelot_lag_fdb_del()
2623 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
2631 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_maxlen()
2636 if (port == ocelot->npi) { in ocelot_port_set_maxlen()
2639 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) in ocelot_port_set_maxlen()
2641 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) in ocelot_port_set_maxlen()
2656 atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / in ocelot_port_set_maxlen()
2659 ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); in ocelot_port_set_maxlen()
2660 ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); in ocelot_port_set_maxlen()
2666 int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN; in ocelot_get_max_mtu()
2668 if (port == ocelot->npi) { in ocelot_get_max_mtu()
2669 max_mtu -= OCELOT_TAG_LEN; in ocelot_get_max_mtu()
2671 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) in ocelot_get_max_mtu()
2672 max_mtu -= OCELOT_SHORT_PREFIX_LEN; in ocelot_get_max_mtu()
2673 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) in ocelot_get_max_mtu()
2674 max_mtu -= OCELOT_LONG_PREFIX_LEN; in ocelot_get_max_mtu()
2684 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_learning()
2693 ocelot_port->learn_ena = enabled; in ocelot_port_set_learning()
2736 return -EINVAL; in ocelot_port_pre_bridge_flags()
2774 return -ERANGE; in ocelot_port_set_default_prio()
2793 return -EOPNOTSUPP; in ocelot_port_get_dscp_prio()
2797 /* Re-read ANA_DSCP_CFG for the translated DSCP */ in ocelot_port_get_dscp_prio()
2802 * to VLAN PCP or port-based default. in ocelot_port_get_dscp_prio()
2805 return -EOPNOTSUPP; in ocelot_port_get_dscp_prio()
2816 return -ERANGE; in ocelot_port_add_dscp_prio()
2882 struct ocelot_mirror *m = ocelot->mirror; in ocelot_mirror_get() local
2884 if (m) { in ocelot_mirror_get()
2885 if (m->to != to) { in ocelot_mirror_get()
2888 return ERR_PTR(-EBUSY); in ocelot_mirror_get()
2891 refcount_inc(&m->refcount); in ocelot_mirror_get()
2892 return m; in ocelot_mirror_get()
2895 m = kzalloc(sizeof(*m), GFP_KERNEL); in ocelot_mirror_get()
2896 if (!m) in ocelot_mirror_get()
2897 return ERR_PTR(-ENOMEM); in ocelot_mirror_get()
2899 m->to = to; in ocelot_mirror_get()
2900 refcount_set(&m->refcount, 1); in ocelot_mirror_get()
2901 ocelot->mirror = m; in ocelot_mirror_get()
2906 return m; in ocelot_mirror_get()
2911 struct ocelot_mirror *m = ocelot->mirror; in ocelot_mirror_put() local
2913 if (!refcount_dec_and_test(&m->refcount)) in ocelot_mirror_put()
2917 ocelot->mirror = NULL; in ocelot_mirror_put()
2918 kfree(m); in ocelot_mirror_put()
2924 struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack); in ocelot_port_mirror_add() local
2926 if (IS_ERR(m)) in ocelot_port_mirror_add()
2927 return PTR_ERR(m); in ocelot_port_mirror_add()
2957 struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); in ocelot_port_reset_mqprio()
2966 struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); in ocelot_port_mqprio()
2967 struct netlink_ext_ack *extack = mqprio->extack; in ocelot_port_mqprio()
2968 struct tc_mqprio_qopt *qopt = &mqprio->qopt; in ocelot_port_mqprio()
2969 int num_tc = qopt->num_tc; in ocelot_port_mqprio()
2982 if (qopt->count[tc] != 1) { in ocelot_port_mqprio()
2985 return -EINVAL; in ocelot_port_mqprio()
2988 err = netdev_set_tc_queue(dev, tc, 1, qopt->offset[tc]); in ocelot_port_mqprio()
2997 ocelot_port_change_fp(ocelot, port, mqprio->preemptible_tcs); in ocelot_port_mqprio()
3009 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_init_port()
3011 skb_queue_head_init(&ocelot_port->tx_skbs); in ocelot_init_port()
3078 int cpu = ocelot->num_phys_ports; in ocelot_cpu_port_init()
3112 * the number of 240-byte free memory words (aka 4-cell chunks) and not in ocelot_detect_features()
3116 ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); in ocelot_detect_features()
3119 ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); in ocelot_detect_features()
3127 err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], in ocelot_mem_init_status()
3138 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); in ocelot_reset()
3142 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); in ocelot_reset()
3146 /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be in ocelot_reset()
3154 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); in ocelot_reset()
3158 return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); in ocelot_reset()
3167 if (ocelot->ops->reset) { in ocelot_init()
3168 ret = ocelot->ops->reset(ocelot); in ocelot_init()
3170 dev_err(ocelot->dev, "Switch reset failed\n"); in ocelot_init()
3175 mutex_init(&ocelot->mact_lock); in ocelot_init()
3176 mutex_init(&ocelot->fwd_domain_lock); in ocelot_init()
3177 spin_lock_init(&ocelot->ptp_clock_lock); in ocelot_init()
3178 spin_lock_init(&ocelot->ts_id_lock); in ocelot_init()
3179 spin_lock_init(&ocelot->inj_lock); in ocelot_init()
3180 spin_lock_init(&ocelot->xtr_lock); in ocelot_init()
3182 ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); in ocelot_init()
3183 if (!ocelot->owq) in ocelot_init()
3184 return -ENOMEM; in ocelot_init()
3190 INIT_LIST_HEAD(&ocelot->multicast); in ocelot_init()
3191 INIT_LIST_HEAD(&ocelot->pgids); in ocelot_init()
3192 INIT_LIST_HEAD(&ocelot->vlans); in ocelot_init()
3193 INIT_LIST_HEAD(&ocelot->lag_fdbs); in ocelot_init()
3200 if (ocelot->ops->psfp_init) in ocelot_init()
3201 ocelot->ops->psfp_init(ocelot); in ocelot_init()
3203 if (ocelot->mm_supported) { in ocelot_init()
3209 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_init()
3216 /* Only use S-Tag */ in ocelot_init()
3236 regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); in ocelot_init()
3238 /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ in ocelot_init()
3243 for (i = 0; i < ocelot->num_flooding_pgids; i++) in ocelot_init()
3254 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_init()
3267 u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); in ocelot_init()
3275 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3276 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3278 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3279 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3310 destroy_workqueue(ocelot->owq); in ocelot_init()
3318 destroy_workqueue(ocelot->owq); in ocelot_deinit()
3324 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_deinit_port()
3326 skb_queue_purge(&ocelot_port->tx_skbs); in ocelot_deinit_port()