Lines Matching +full:eee +full:- +full:pcs
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2017-2025 Microchip Technology Inc.
14 #include <linux/platform_data/microchip-ksz.h>
206 * struct ksz_drive_strength - drive strength mapping
215 /* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants
223 * - for high speed signals
233 * - for low speed signals
250 /* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
270 * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
274 * a detailed explanation of EEE/LPI handling in KSZ switches.
281 * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
287 * LPI management, enabling phylink to control EEE advertisement during
290 * Hardware Management of EEE/LPI State:
291 * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
292 * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
294 * the auto-negotiation results. (Note: While the datasheet describes EEE
299 * autonomously via strapping, means MAC-level software intervention is not
300 * required or exposed for managing the LPI state once EEE is negotiated.
302 * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
306 * lack documented MAC-level LPI control.
449 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_select_pcs()
450 struct ksz_port *p = &dev->ports[dp->index]; in ksz_phylink_mac_select_pcs()
452 if (ksz_is_sgmii_port(dev, dp->index) && in ksz_phylink_mac_select_pcs()
455 return p->pcs; in ksz_phylink_mac_select_pcs()
1569 * port map is NOT continuous. The per-port register
2152 if (chip->chip_id == prod_num) in ksz_lookup_info()
2164 if (dev->pdata) { in ksz_check_device_id()
2165 expected_chip_id = dev->pdata->chip_id; in ksz_check_device_id()
2168 return -ENODEV; in ksz_check_device_id()
2170 expected_chip_data = of_device_get_match_data(dev->dev); in ksz_check_device_id()
2171 expected_chip_id = expected_chip_data->chip_id; in ksz_check_device_id()
2174 if (expected_chip_id != dev->chip_id) { in ksz_check_device_id()
2175 dev_err(dev->dev, in ksz_check_device_id()
2177 expected_chip_data->dev_name, dev->info->dev_name); in ksz_check_device_id()
2178 return -ENODEV; in ksz_check_device_id()
2187 struct ksz_device *dev = ds->priv; in ksz_phylink_get_caps()
2189 if (dev->info->supports_mii[port]) in ksz_phylink_get_caps()
2190 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); in ksz_phylink_get_caps()
2192 if (dev->info->supports_rmii[port]) in ksz_phylink_get_caps()
2194 config->supported_interfaces); in ksz_phylink_get_caps()
2196 if (dev->info->supports_rgmii[port]) in ksz_phylink_get_caps()
2197 phy_interface_set_rgmii(config->supported_interfaces); in ksz_phylink_get_caps()
2199 if (dev->info->internal_phy[port]) { in ksz_phylink_get_caps()
2201 config->supported_interfaces); in ksz_phylink_get_caps()
2203 * phy-mode property is absent in ksz_phylink_get_caps()
2206 config->supported_interfaces); in ksz_phylink_get_caps()
2209 if (dev->dev_ops->get_caps) in ksz_phylink_get_caps()
2210 dev->dev_ops->get_caps(dev, port, config); in ksz_phylink_get_caps()
2212 if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) { in ksz_phylink_get_caps()
2213 memcpy(config->lpi_interfaces, config->supported_interfaces, in ksz_phylink_get_caps()
2214 sizeof(config->lpi_interfaces)); in ksz_phylink_get_caps()
2216 config->lpi_capabilities = MAC_100FD; in ksz_phylink_get_caps()
2217 if (dev->info->gbit_capable[port]) in ksz_phylink_get_caps()
2218 config->lpi_capabilities |= MAC_1000FD; in ksz_phylink_get_caps()
2220 /* EEE is fully operational */ in ksz_phylink_get_caps()
2221 config->eee_enabled_default = true; in ksz_phylink_get_caps()
2233 mib = &dev->ports[port].mib; in ksz_r_mib_stats64()
2234 stats = &mib->stats64; in ksz_r_mib_stats64()
2235 pstats = &mib->pause_stats; in ksz_r_mib_stats64()
2236 raw = (struct ksz_stats_raw *)mib->counters; in ksz_r_mib_stats64()
2238 spin_lock(&mib->stats64_lock); in ksz_r_mib_stats64()
2240 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz_r_mib_stats64()
2241 raw->rx_pause; in ksz_r_mib_stats64()
2242 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz_r_mib_stats64()
2243 raw->tx_pause; in ksz_r_mib_stats64()
2248 stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2249 stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2251 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz_r_mib_stats64()
2252 raw->rx_oversize; in ksz_r_mib_stats64()
2254 stats->rx_crc_errors = raw->rx_crc_err; in ksz_r_mib_stats64()
2255 stats->rx_frame_errors = raw->rx_align_err; in ksz_r_mib_stats64()
2256 stats->rx_dropped = raw->rx_discards; in ksz_r_mib_stats64()
2257 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz_r_mib_stats64()
2258 stats->rx_frame_errors + stats->rx_dropped; in ksz_r_mib_stats64()
2260 stats->tx_window_errors = raw->tx_late_col; in ksz_r_mib_stats64()
2261 stats->tx_fifo_errors = raw->tx_discards; in ksz_r_mib_stats64()
2262 stats->tx_aborted_errors = raw->tx_exc_col; in ksz_r_mib_stats64()
2263 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz_r_mib_stats64()
2264 stats->tx_aborted_errors; in ksz_r_mib_stats64()
2266 stats->multicast = raw->rx_mcast; in ksz_r_mib_stats64()
2267 stats->collisions = raw->tx_total_col; in ksz_r_mib_stats64()
2269 pstats->tx_pause_frames = raw->tx_pause; in ksz_r_mib_stats64()
2270 pstats->rx_pause_frames = raw->rx_pause; in ksz_r_mib_stats64()
2272 spin_unlock(&mib->stats64_lock); in ksz_r_mib_stats64()
2274 if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) { in ksz_r_mib_stats64()
2275 ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col); in ksz_r_mib_stats64()
2277 dev_err(dev->dev, "Failed to monitor transmission halt\n"); in ksz_r_mib_stats64()
2288 mib = &dev->ports[port].mib; in ksz88xx_r_mib_stats64()
2289 stats = &mib->stats64; in ksz88xx_r_mib_stats64()
2290 pstats = &mib->pause_stats; in ksz88xx_r_mib_stats64()
2291 raw = (struct ksz88xx_stats_raw *)mib->counters; in ksz88xx_r_mib_stats64()
2293 spin_lock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2295 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz88xx_r_mib_stats64()
2296 raw->rx_pause; in ksz88xx_r_mib_stats64()
2297 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz88xx_r_mib_stats64()
2298 raw->tx_pause; in ksz88xx_r_mib_stats64()
2303 stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2304 stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2306 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz88xx_r_mib_stats64()
2307 raw->rx_oversize; in ksz88xx_r_mib_stats64()
2309 stats->rx_crc_errors = raw->rx_crc_err; in ksz88xx_r_mib_stats64()
2310 stats->rx_frame_errors = raw->rx_align_err; in ksz88xx_r_mib_stats64()
2311 stats->rx_dropped = raw->rx_discards; in ksz88xx_r_mib_stats64()
2312 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz88xx_r_mib_stats64()
2313 stats->rx_frame_errors + stats->rx_dropped; in ksz88xx_r_mib_stats64()
2315 stats->tx_window_errors = raw->tx_late_col; in ksz88xx_r_mib_stats64()
2316 stats->tx_fifo_errors = raw->tx_discards; in ksz88xx_r_mib_stats64()
2317 stats->tx_aborted_errors = raw->tx_exc_col; in ksz88xx_r_mib_stats64()
2318 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz88xx_r_mib_stats64()
2319 stats->tx_aborted_errors; in ksz88xx_r_mib_stats64()
2321 stats->multicast = raw->rx_mcast; in ksz88xx_r_mib_stats64()
2322 stats->collisions = raw->tx_total_col; in ksz88xx_r_mib_stats64()
2324 pstats->tx_pause_frames = raw->tx_pause; in ksz88xx_r_mib_stats64()
2325 pstats->rx_pause_frames = raw->rx_pause; in ksz88xx_r_mib_stats64()
2327 spin_unlock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2333 struct ksz_device *dev = ds->priv; in ksz_get_stats64()
2336 mib = &dev->ports[port].mib; in ksz_get_stats64()
2338 spin_lock(&mib->stats64_lock); in ksz_get_stats64()
2339 memcpy(s, &mib->stats64, sizeof(*s)); in ksz_get_stats64()
2340 spin_unlock(&mib->stats64_lock); in ksz_get_stats64()
2346 struct ksz_device *dev = ds->priv; in ksz_get_pause_stats()
2349 mib = &dev->ports[port].mib; in ksz_get_pause_stats()
2351 spin_lock(&mib->stats64_lock); in ksz_get_pause_stats()
2352 memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats)); in ksz_get_pause_stats()
2353 spin_unlock(&mib->stats64_lock); in ksz_get_pause_stats()
2359 struct ksz_device *dev = ds->priv; in ksz_get_strings()
2365 for (i = 0; i < dev->info->mib_cnt; i++) in ksz_get_strings()
2366 ethtool_puts(&buf, dev->info->mib_names[i].string); in ksz_get_strings()
2370 * ksz_update_port_member - Adjust port forwarding rules based on STP state and
2394 struct ksz_port *p = &dev->ports[port]; in ksz_update_port_member()
2395 struct dsa_switch *ds = dev->ds; in ksz_update_port_member()
2406 for (i = 0; i < ds->num_ports; i++) { in ksz_update_port_member()
2408 struct ksz_port *other_p = &dev->ports[i]; in ksz_update_port_member()
2417 if (other_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2426 if (p->stp_state == BR_STATE_FORWARDING && in ksz_update_port_member()
2427 !(p->isolated && other_p->isolated)) { in ksz_update_port_member()
2433 for (j = 0; j < ds->num_ports; j++) { in ksz_update_port_member()
2443 third_p = &dev->ports[j]; in ksz_update_port_member()
2444 if (third_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2458 !(other_p->isolated && third_p->isolated)) in ksz_update_port_member()
2462 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); in ksz_update_port_member()
2468 if (!port_member && p->stp_state == BR_STATE_FORWARDING && in ksz_update_port_member()
2469 (dev->hsr_ports & BIT(port))) in ksz_update_port_member()
2470 port_member = dev->hsr_ports; in ksz_update_port_member()
2471 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); in ksz_update_port_member()
2476 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_read()
2480 ret = dev->dev_ops->r_phy(dev, addr, regnum, &val); in ksz_sw_mdio_read()
2490 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_write()
2492 return dev->dev_ops->w_phy(dev, addr, regnum, val); in ksz_sw_mdio_write()
2496 * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
2510 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_read()
2512 return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum); in ksz_parent_mdio_read()
2516 * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
2531 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_write()
2533 return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val); in ksz_parent_mdio_write()
2537 * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
2547 * Return: Port index on success, or -EINVAL if no matching port is found.
2551 struct dsa_switch *ds = dev->ds; in ksz_phy_addr_to_port()
2555 if (dev->info->internal_phy[dp->index] && in ksz_phy_addr_to_port()
2556 dev->phy_addr_map[dp->index] == addr) in ksz_phy_addr_to_port()
2557 return dp->index; in ksz_phy_addr_to_port()
2560 return -EINVAL; in ksz_phy_addr_to_port()
2564 * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
2575 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_setup()
2581 if (BIT(phy) & ds->phys_mii_mask) { in ksz_irq_phy_setup()
2588 irq = irq_find_mapping(dev->ports[port].pirq.domain, in ksz_irq_phy_setup()
2594 ds->user_mii_bus->irq[phy] = irq; in ksz_irq_phy_setup()
2599 while (phy--) in ksz_irq_phy_setup()
2600 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_setup()
2601 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_setup()
2607 * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
2615 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_free()
2619 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_free()
2620 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_free()
2624 * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
2631 * `phy-handle` properties are correctly set and that the internal PHYs match
2646 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_parse_dt_phy_config()
2647 if (!dev->info->internal_phy[dp->index]) in ksz_parse_dt_phy_config()
2650 phy_node = of_parse_phandle(dp->dn, "phy-handle", 0); in ksz_parse_dt_phy_config()
2652 dev_err(dev->dev, "failed to parse phy-handle for port %d.\n", in ksz_parse_dt_phy_config()
2653 dp->index); in ksz_parse_dt_phy_config()
2660 dev_err(dev->dev, "failed to get PHY-parent node for port %d\n", in ksz_parse_dt_phy_config()
2661 dp->index); in ksz_parse_dt_phy_config()
2664 dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n", in ksz_parse_dt_phy_config()
2665 dp->index, mdio_np, phy_parent_node); in ksz_parse_dt_phy_config()
2670 dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n", in ksz_parse_dt_phy_config()
2671 dp->index, ret); in ksz_parse_dt_phy_config()
2673 } else if (phy_addr != dev->phy_addr_map[dp->index]) { in ksz_parse_dt_phy_config()
2674 dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n", in ksz_parse_dt_phy_config()
2675 dp->index, dev->phy_addr_map[dp->index], in ksz_parse_dt_phy_config()
2679 bus->phy_mask |= BIT(phy_addr); in ksz_parse_dt_phy_config()
2688 return -EINVAL; in ksz_parse_dt_phy_config()
2694 * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
2700 * "mdio-parent-bus" device tree property to directly manage internal PHYs.
2709 struct dsa_switch *ds = dev->ds; in ksz_mdio_register()
2714 mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio"); in ksz_mdio_register()
2718 parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0); in ksz_mdio_register()
2719 if (parent_bus_node && !dev->info->phy_side_mdio_supported) { in ksz_mdio_register()
2720 …dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property… in ksz_mdio_register()
2721 ret = -EINVAL; in ksz_mdio_register()
2727 ret = -EPROBE_DEFER; in ksz_mdio_register()
2732 dev->parent_mdio_bus = parent_bus; in ksz_mdio_register()
2735 bus = devm_mdiobus_alloc(ds->dev); in ksz_mdio_register()
2737 ret = -ENOMEM; in ksz_mdio_register()
2741 if (dev->dev_ops->mdio_bus_preinit) { in ksz_mdio_register()
2742 ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus); in ksz_mdio_register()
2747 if (dev->dev_ops->create_phy_addr_map) { in ksz_mdio_register()
2748 ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus); in ksz_mdio_register()
2752 for (i = 0; i < dev->info->port_cnt; i++) in ksz_mdio_register()
2753 dev->phy_addr_map[i] = i; in ksz_mdio_register()
2756 bus->priv = dev; in ksz_mdio_register()
2758 bus->read = ksz_parent_mdio_read; in ksz_mdio_register()
2759 bus->write = ksz_parent_mdio_write; in ksz_mdio_register()
2760 bus->name = "KSZ side MDIO"; in ksz_mdio_register()
2761 snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d", in ksz_mdio_register()
2762 ds->index); in ksz_mdio_register()
2764 bus->read = ksz_sw_mdio_read; in ksz_mdio_register()
2765 bus->write = ksz_sw_mdio_write; in ksz_mdio_register()
2766 bus->name = "ksz user smi"; in ksz_mdio_register()
2767 if (ds->dst->index != 0) { in ksz_mdio_register()
2768 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index); in ksz_mdio_register()
2770 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); in ksz_mdio_register()
2778 ds->phys_mii_mask = bus->phy_mask; in ksz_mdio_register()
2779 bus->parent = ds->dev; in ksz_mdio_register()
2781 ds->user_mii_bus = bus; in ksz_mdio_register()
2783 if (dev->irq > 0) { in ksz_mdio_register()
2789 ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); in ksz_mdio_register()
2791 dev_err(ds->dev, "unable to register MDIO bus %s\n", in ksz_mdio_register()
2792 bus->id); in ksz_mdio_register()
2793 if (dev->irq > 0) in ksz_mdio_register()
2808 kirq->masked |= BIT(d->hwirq); in ksz_irq_mask()
2815 kirq->masked &= ~BIT(d->hwirq); in ksz_irq_unmask()
2822 mutex_lock(&kirq->dev->lock_irq); in ksz_irq_bus_lock()
2828 struct ksz_device *dev = kirq->dev; in ksz_irq_bus_sync_unlock()
2831 ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); in ksz_irq_bus_sync_unlock()
2833 dev_err(dev->dev, "failed to change IRQ mask\n"); in ksz_irq_bus_sync_unlock()
2835 mutex_unlock(&dev->lock_irq); in ksz_irq_bus_sync_unlock()
2839 .name = "ksz-irq",
2849 irq_set_chip_data(irq, d->host_data); in ksz_irq_domain_map()
2865 free_irq(kirq->irq_num, kirq); in ksz_irq_free()
2867 for (irq = 0; irq < kirq->nirqs; irq++) { in ksz_irq_free()
2868 virq = irq_find_mapping(kirq->domain, irq); in ksz_irq_free()
2872 irq_domain_remove(kirq->domain); in ksz_irq_free()
2885 dev = kirq->dev; in ksz_irq_thread_fn()
2888 ret = ksz_read8(dev, kirq->reg_status, &data); in ksz_irq_thread_fn()
2892 for (n = 0; n < kirq->nirqs; ++n) { in ksz_irq_thread_fn()
2894 sub_irq = irq_find_mapping(kirq->domain, n); in ksz_irq_thread_fn()
2907 kirq->dev = dev; in ksz_irq_common_setup()
2908 kirq->masked = ~0; in ksz_irq_common_setup()
2910 kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0, in ksz_irq_common_setup()
2912 if (!kirq->domain) in ksz_irq_common_setup()
2913 return -ENOMEM; in ksz_irq_common_setup()
2915 for (n = 0; n < kirq->nirqs; n++) in ksz_irq_common_setup()
2916 irq_create_mapping(kirq->domain, n); in ksz_irq_common_setup()
2918 ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn, in ksz_irq_common_setup()
2919 IRQF_ONESHOT, kirq->name, kirq); in ksz_irq_common_setup()
2933 struct ksz_irq *girq = &dev->girq; in ksz_girq_setup()
2935 girq->nirqs = dev->info->port_cnt; in ksz_girq_setup()
2936 girq->reg_mask = REG_SW_PORT_INT_MASK__1; in ksz_girq_setup()
2937 girq->reg_status = REG_SW_PORT_INT_STATUS__1; in ksz_girq_setup()
2938 snprintf(girq->name, sizeof(girq->name), "global_port_irq"); in ksz_girq_setup()
2940 girq->irq_num = dev->irq; in ksz_girq_setup()
2947 struct ksz_irq *pirq = &dev->ports[p].pirq; in ksz_pirq_setup()
2949 pirq->nirqs = dev->info->port_nirqs; in ksz_pirq_setup()
2950 pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK); in ksz_pirq_setup()
2951 pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS); in ksz_pirq_setup()
2952 snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p); in ksz_pirq_setup()
2954 pirq->irq_num = irq_find_mapping(dev->girq.domain, p); in ksz_pirq_setup()
2955 if (pirq->irq_num < 0) in ksz_pirq_setup()
2956 return pirq->irq_num; in ksz_pirq_setup()
2965 struct ksz_device *dev = ds->priv; in ksz_setup()
2972 regs = dev->info->regs; in ksz_setup()
2974 dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), in ksz_setup()
2975 dev->info->num_vlans, GFP_KERNEL); in ksz_setup()
2976 if (!dev->vlan_cache) in ksz_setup()
2977 return -ENOMEM; in ksz_setup()
2979 ret = dev->dev_ops->reset(dev); in ksz_setup()
2981 dev_err(ds->dev, "failed to reset switch\n"); in ksz_setup()
2989 if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) { in ksz_setup()
2990 ret = dev->dev_ops->pcs_create(dev); in ksz_setup()
3005 dev->dev_ops->config_cpu_port(ds); in ksz_setup()
3007 dev->dev_ops->enable_stp_addr(dev); in ksz_setup()
3009 ds->num_tx_queues = dev->info->num_tx_queues; in ksz_setup()
3016 ds->configure_vlan_while_not_filtering = false; in ksz_setup()
3017 ds->dscp_prio_mapping_is_global = true; in ksz_setup()
3019 if (dev->dev_ops->setup) { in ksz_setup()
3020 ret = dev->dev_ops->setup(ds); in ksz_setup()
3030 p = &dev->ports[dev->cpu_port]; in ksz_setup()
3031 p->learning = true; in ksz_setup()
3033 if (dev->irq > 0) { in ksz_setup()
3038 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_setup()
3039 ret = ksz_pirq_setup(dev, dp->index); in ksz_setup()
3043 if (dev->info->ptp_capable) { in ksz_setup()
3044 ret = ksz_ptp_irq_setup(ds, dp->index); in ksz_setup()
3051 if (dev->info->ptp_capable) { in ksz_setup()
3054 dev_err(dev->dev, "Failed to register PTP clock: %d\n", in ksz_setup()
3062 dev_err(dev->dev, "failed to register the mdio"); in ksz_setup()
3077 if (dev->info->ptp_capable) in ksz_setup()
3080 if (dev->irq > 0 && dev->info->ptp_capable) in ksz_setup()
3081 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
3082 ksz_ptp_irq_free(ds, dp->index); in ksz_setup()
3084 if (dev->irq > 0) in ksz_setup()
3085 dsa_switch_for_each_user_port(dp, dev->ds) in ksz_setup()
3086 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_setup()
3088 if (dev->irq > 0) in ksz_setup()
3089 ksz_irq_free(&dev->girq); in ksz_setup()
3096 struct ksz_device *dev = ds->priv; in ksz_teardown()
3099 if (dev->info->ptp_capable) in ksz_teardown()
3102 if (dev->irq > 0) { in ksz_teardown()
3103 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_teardown()
3104 if (dev->info->ptp_capable) in ksz_teardown()
3105 ksz_ptp_irq_free(ds, dp->index); in ksz_teardown()
3107 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_teardown()
3110 ksz_irq_free(&dev->girq); in ksz_teardown()
3113 if (dev->dev_ops->teardown) in ksz_teardown()
3114 dev->dev_ops->teardown(ds); in ksz_teardown()
3119 struct ksz_port_mib *mib = &dev->ports[port].mib; in port_r_cnt()
3123 while (mib->cnt_ptr < dev->info->reg_mib_cnt) { in port_r_cnt()
3124 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, in port_r_cnt()
3125 &mib->counters[mib->cnt_ptr]); in port_r_cnt()
3126 ++mib->cnt_ptr; in port_r_cnt()
3130 dropped = &mib->counters[dev->info->mib_cnt]; in port_r_cnt()
3133 while (mib->cnt_ptr < dev->info->mib_cnt) { in port_r_cnt()
3134 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, in port_r_cnt()
3135 dropped, &mib->counters[mib->cnt_ptr]); in port_r_cnt()
3136 ++mib->cnt_ptr; in port_r_cnt()
3138 mib->cnt_ptr = 0; in port_r_cnt()
3149 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_mib_read_work()
3150 if (dsa_is_unused_port(dev->ds, i)) in ksz_mib_read_work()
3153 p = &dev->ports[i]; in ksz_mib_read_work()
3154 mib = &p->mib; in ksz_mib_read_work()
3155 mutex_lock(&mib->cnt_mutex); in ksz_mib_read_work()
3160 if (!p->read) { in ksz_mib_read_work()
3161 const struct dsa_port *dp = dsa_to_port(dev->ds, i); in ksz_mib_read_work()
3163 if (!netif_carrier_ok(dp->user)) in ksz_mib_read_work()
3164 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_mib_read_work()
3167 p->read = false; in ksz_mib_read_work()
3169 if (dev->dev_ops->r_mib_stat64) in ksz_mib_read_work()
3170 dev->dev_ops->r_mib_stat64(dev, i); in ksz_mib_read_work()
3172 mutex_unlock(&mib->cnt_mutex); in ksz_mib_read_work()
3175 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_mib_read_work()
3182 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); in ksz_init_mib_timer()
3184 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_init_mib_timer()
3185 struct ksz_port_mib *mib = &dev->ports[i].mib; in ksz_init_mib_timer()
3187 dev->dev_ops->port_init_cnt(dev, i); in ksz_init_mib_timer()
3189 mib->cnt_ptr = 0; in ksz_init_mib_timer()
3190 memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); in ksz_init_mib_timer()
3196 struct ksz_device *dev = ds->priv; in ksz_phy_read16()
3200 ret = dev->dev_ops->r_phy(dev, addr, reg, &val); in ksz_phy_read16()
3209 struct ksz_device *dev = ds->priv; in ksz_phy_write16()
3212 ret = dev->dev_ops->w_phy(dev, addr, reg, val); in ksz_phy_write16()
3221 struct ksz_device *dev = ds->priv; in ksz_get_phy_flags()
3223 switch (dev->chip_id) { in ksz_get_phy_flags()
3226 * Port 1 does not work with LinkMD Cable-Testing. in ksz_get_phy_flags()
3242 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_link_down()
3245 dev->ports[dp->index].read = true; in ksz_phylink_mac_link_down()
3247 if (dev->mib_read_interval) in ksz_phylink_mac_link_down()
3248 schedule_delayed_work(&dev->mib_read, 0); in ksz_phylink_mac_link_down()
3253 struct ksz_device *dev = ds->priv; in ksz_sset_count()
3258 return dev->info->mib_cnt; in ksz_sset_count()
3265 struct ksz_device *dev = ds->priv; in ksz_get_ethtool_stats()
3268 mib = &dev->ports[port].mib; in ksz_get_ethtool_stats()
3269 mutex_lock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3272 if (!netif_carrier_ok(dp->user)) in ksz_get_ethtool_stats()
3273 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_get_ethtool_stats()
3275 memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); in ksz_get_ethtool_stats()
3276 mutex_unlock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3301 struct ksz_device *dev = ds->priv; in ksz_port_fast_age()
3303 dev->dev_ops->flush_dyn_mac_table(dev, port); in ksz_port_fast_age()
3308 struct ksz_device *dev = ds->priv; in ksz_set_ageing_time()
3310 if (!dev->dev_ops->set_ageing_time) in ksz_set_ageing_time()
3311 return -EOPNOTSUPP; in ksz_set_ageing_time()
3313 return dev->dev_ops->set_ageing_time(dev, msecs); in ksz_set_ageing_time()
3320 struct ksz_device *dev = ds->priv; in ksz_port_fdb_add()
3322 if (!dev->dev_ops->fdb_add) in ksz_port_fdb_add()
3323 return -EOPNOTSUPP; in ksz_port_fdb_add()
3325 return dev->dev_ops->fdb_add(dev, port, addr, vid, db); in ksz_port_fdb_add()
3332 struct ksz_device *dev = ds->priv; in ksz_port_fdb_del()
3334 if (!dev->dev_ops->fdb_del) in ksz_port_fdb_del()
3335 return -EOPNOTSUPP; in ksz_port_fdb_del()
3337 return dev->dev_ops->fdb_del(dev, port, addr, vid, db); in ksz_port_fdb_del()
3343 struct ksz_device *dev = ds->priv; in ksz_port_fdb_dump()
3345 if (!dev->dev_ops->fdb_dump) in ksz_port_fdb_dump()
3346 return -EOPNOTSUPP; in ksz_port_fdb_dump()
3348 return dev->dev_ops->fdb_dump(dev, port, cb, data); in ksz_port_fdb_dump()
3355 struct ksz_device *dev = ds->priv; in ksz_port_mdb_add()
3357 if (!dev->dev_ops->mdb_add) in ksz_port_mdb_add()
3358 return -EOPNOTSUPP; in ksz_port_mdb_add()
3360 return dev->dev_ops->mdb_add(dev, port, mdb, db); in ksz_port_mdb_add()
3367 struct ksz_device *dev = ds->priv; in ksz_port_mdb_del()
3369 if (!dev->dev_ops->mdb_del) in ksz_port_mdb_del()
3370 return -EOPNOTSUPP; in ksz_port_mdb_del()
3372 return dev->dev_ops->mdb_del(dev, port, mdb, db); in ksz_port_mdb_del()
3381 for (ipm = 0; ipm < dev->info->num_ipms; ipm++) { in ksz9477_set_default_prio_queue_mapping()
3388 queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues); in ksz9477_set_default_prio_queue_mapping()
3400 struct ksz_device *dev = ds->priv; in ksz_port_setup()
3407 dev->dev_ops->port_setup(dev, port, false); in ksz_port_setup()
3424 struct ksz_device *dev = ds->priv; in ksz_port_stp_state_set()
3429 regs = dev->info->regs; in ksz_port_stp_state_set()
3434 p = &dev->ports[port]; in ksz_port_stp_state_set()
3445 if (!p->learning) in ksz_port_stp_state_set()
3450 if (!p->learning) in ksz_port_stp_state_set()
3457 dev_err(ds->dev, "invalid STP state: %d\n", state); in ksz_port_stp_state_set()
3463 p->stp_state = state; in ksz_port_stp_state_set()
3470 struct ksz_device *dev = ds->priv; in ksz_port_teardown()
3472 switch (dev->chip_id) { in ksz_port_teardown()
3492 return -EINVAL; in ksz_port_pre_bridge_flags()
3501 struct ksz_device *dev = ds->priv; in ksz_port_bridge_flags()
3502 struct ksz_port *p = &dev->ports[port]; in ksz_port_bridge_flags()
3506 p->learning = !!(flags.val & BR_LEARNING); in ksz_port_bridge_flags()
3509 p->isolated = !!(flags.val & BR_ISOLATED); in ksz_port_bridge_flags()
3512 ksz_port_stp_state_set(ds, port, p->stp_state); in ksz_port_bridge_flags()
3522 struct ksz_device *dev = ds->priv; in ksz_get_tag_protocol()
3528 if (dev->chip_id == KSZ88X3_CHIP_ID || in ksz_get_tag_protocol()
3529 dev->chip_id == KSZ8463_CHIP_ID || in ksz_get_tag_protocol()
3530 dev->chip_id == KSZ8563_CHIP_ID || in ksz_get_tag_protocol()
3531 dev->chip_id == KSZ9893_CHIP_ID || in ksz_get_tag_protocol()
3532 dev->chip_id == KSZ9563_CHIP_ID) in ksz_get_tag_protocol()
3535 if (dev->chip_id == KSZ8567_CHIP_ID || in ksz_get_tag_protocol()
3536 dev->chip_id == KSZ9477_CHIP_ID || in ksz_get_tag_protocol()
3537 dev->chip_id == KSZ9896_CHIP_ID || in ksz_get_tag_protocol()
3538 dev->chip_id == KSZ9897_CHIP_ID || in ksz_get_tag_protocol()
3539 dev->chip_id == KSZ9567_CHIP_ID || in ksz_get_tag_protocol()
3540 dev->chip_id == LAN9646_CHIP_ID) in ksz_get_tag_protocol()
3561 tagger_data->xmit_work_fn = ksz_port_deferred_xmit; in ksz_connect_tag_protocol()
3564 return -EPROTONOSUPPORT; in ksz_connect_tag_protocol()
3571 struct ksz_device *dev = ds->priv; in ksz_port_vlan_filtering()
3573 if (!dev->dev_ops->vlan_filtering) in ksz_port_vlan_filtering()
3574 return -EOPNOTSUPP; in ksz_port_vlan_filtering()
3576 return dev->dev_ops->vlan_filtering(dev, port, flag, extack); in ksz_port_vlan_filtering()
3583 struct ksz_device *dev = ds->priv; in ksz_port_vlan_add()
3585 if (!dev->dev_ops->vlan_add) in ksz_port_vlan_add()
3586 return -EOPNOTSUPP; in ksz_port_vlan_add()
3588 return dev->dev_ops->vlan_add(dev, port, vlan, extack); in ksz_port_vlan_add()
3594 struct ksz_device *dev = ds->priv; in ksz_port_vlan_del()
3596 if (!dev->dev_ops->vlan_del) in ksz_port_vlan_del()
3597 return -EOPNOTSUPP; in ksz_port_vlan_del()
3599 return dev->dev_ops->vlan_del(dev, port, vlan); in ksz_port_vlan_del()
3606 struct ksz_device *dev = ds->priv; in ksz_port_mirror_add()
3608 if (!dev->dev_ops->mirror_add) in ksz_port_mirror_add()
3609 return -EOPNOTSUPP; in ksz_port_mirror_add()
3611 return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack); in ksz_port_mirror_add()
3617 struct ksz_device *dev = ds->priv; in ksz_port_mirror_del()
3619 if (dev->dev_ops->mirror_del) in ksz_port_mirror_del()
3620 dev->dev_ops->mirror_del(dev, port, mirror); in ksz_port_mirror_del()
3625 struct ksz_device *dev = ds->priv; in ksz_change_mtu()
3627 if (!dev->dev_ops->change_mtu) in ksz_change_mtu()
3628 return -EOPNOTSUPP; in ksz_change_mtu()
3630 return dev->dev_ops->change_mtu(dev, port, mtu); in ksz_change_mtu()
3635 struct ksz_device *dev = ds->priv; in ksz_max_mtu()
3637 switch (dev->chip_id) { in ksz_max_mtu()
3641 return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3646 return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3661 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3664 return -EOPNOTSUPP; in ksz_max_mtu()
3668 * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
3673 * This function also documents devices where EEE was initially advertised but
3679 * operational EEE, false otherwise.
3683 struct ksz_device *dev = ds->priv; in ksz_support_eee()
3685 if (!dev->info->internal_phy[port]) in ksz_support_eee()
3688 switch (dev->chip_id) { in ksz_support_eee()
3708 /* Energy Efficient Ethernet (EEE) feature select must be in ksz_support_eee()
3710 * The EEE feature is enabled by default, but it is not fully in ksz_support_eee()
3712 * controls. If not disabled, the PHY ports can auto-negotiate in ksz_support_eee()
3713 * to enable EEE, and this feature can cause link drops when in ksz_support_eee()
3714 * linked to another device supporting EEE. in ksz_support_eee()
3727 struct ksz_device *dev = ds->priv; in ksz_set_mac_eee()
3729 if (!e->tx_lpi_enabled) { in ksz_set_mac_eee()
3730 dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); in ksz_set_mac_eee()
3731 return -EINVAL; in ksz_set_mac_eee()
3734 if (e->tx_lpi_timer) { in ksz_set_mac_eee()
3735 dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n"); in ksz_set_mac_eee()
3736 return -EINVAL; in ksz_set_mac_eee()
3745 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_xmii()
3746 struct ksz_port *p = &dev->ports[port]; in ksz_set_xmii()
3747 const u16 *regs = dev->info->regs; in ksz_set_xmii()
3770 /* On KSZ9893, disable RGMII in-band status support */ in ksz_set_xmii()
3771 if (dev->chip_id == KSZ9893_CHIP_ID || in ksz_set_xmii()
3772 dev->chip_id == KSZ8563_CHIP_ID || in ksz_set_xmii()
3773 dev->chip_id == KSZ9563_CHIP_ID || in ksz_set_xmii()
3778 dev_err(dev->dev, "Unsupported interface '%s' for port %d\n", in ksz_set_xmii()
3783 if (p->rgmii_tx_val) in ksz_set_xmii()
3786 if (p->rgmii_rx_val) in ksz_set_xmii()
3795 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_xmii()
3796 const u16 *regs = dev->info->regs; in ksz_get_xmii()
3831 struct ksz_device *dev = dp->ds->priv; in ksz88x3_phylink_mac_config()
3833 dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN); in ksz88x3_phylink_mac_config()
3841 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_config()
3842 int port = dp->index; in ksz_phylink_mac_config()
3845 if (dev->info->internal_phy[port]) in ksz_phylink_mac_config()
3853 dev_err(dev->dev, "In-band AN not supported!\n"); in ksz_phylink_mac_config()
3857 ksz_set_xmii(dev, port, state->interface); in ksz_phylink_mac_config()
3859 if (dev->dev_ops->setup_rgmii_delay) in ksz_phylink_mac_config()
3860 dev->dev_ops->setup_rgmii_delay(dev, port); in ksz_phylink_mac_config()
3865 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_gbit()
3866 const u16 *regs = dev->info->regs; in ksz_get_gbit()
3883 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_gbit()
3884 const u16 *regs = dev->info->regs; in ksz_set_gbit()
3902 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_set_100_10mbit()
3903 const u16 *regs = dev->info->regs; in ksz_set_100_10mbit()
3933 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_duplex_flowctrl()
3934 const u32 *masks = dev->info->masks; in ksz_duplex_flowctrl()
3935 const u16 *regs = dev->info->regs; in ksz_duplex_flowctrl()
3964 struct ksz_device *dev = dp->ds->priv; in ksz9477_phylink_mac_link_up()
3965 int port = dp->index; in ksz9477_phylink_mac_link_up()
3968 p = &dev->ports[port]; in ksz9477_phylink_mac_link_up()
3971 if (dev->info->internal_phy[port]) in ksz9477_phylink_mac_link_up()
3974 p->phydev.speed = speed; in ksz9477_phylink_mac_link_up()
3998 dev->chip_id = KSZ8463_CHIP_ID; in ksz_switch_detect()
4004 dev->chip_id = KSZ8795_CHIP_ID; in ksz_switch_detect()
4008 dev->chip_id = KSZ8765_CHIP_ID; in ksz_switch_detect()
4010 dev->chip_id = KSZ8794_CHIP_ID; in ksz_switch_detect()
4012 return -ENODEV; in ksz_switch_detect()
4017 dev->chip_id = KSZ88X3_CHIP_ID; in ksz_switch_detect()
4019 return -ENODEV; in ksz_switch_detect()
4024 dev->chip_id = KSZ8895_CHIP_ID; in ksz_switch_detect()
4026 return -ENODEV; in ksz_switch_detect()
4031 dev->chip_id = KSZ8864_CHIP_ID; in ksz_switch_detect()
4038 dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32); in ksz_switch_detect()
4054 if (dev->chip_id != LAN9646_CHIP_ID) in ksz_switch_detect()
4055 dev->chip_id = id32; in ksz_switch_detect()
4064 dev->chip_id = KSZ8563_CHIP_ID; in ksz_switch_detect()
4066 dev->chip_id = KSZ9563_CHIP_ID; in ksz_switch_detect()
4068 dev->chip_id = KSZ9893_CHIP_ID; in ksz_switch_detect()
4072 dev_err(dev->dev, in ksz_switch_detect()
4074 return -ENODEV; in ksz_switch_detect()
4083 struct ksz_device *dev = ds->priv; in ksz_cls_flower_add()
4085 switch (dev->chip_id) { in ksz_cls_flower_add()
4098 return -EOPNOTSUPP; in ksz_cls_flower_add()
4104 struct ksz_device *dev = ds->priv; in ksz_cls_flower_del()
4106 switch (dev->chip_id) { in ksz_cls_flower_del()
4119 return -EOPNOTSUPP; in ksz_cls_flower_del()
4123 * is converted to Hex-decimal using the successive multiplication method. On
4134 txrate = idle_slope - send_slope; in cinc_cal()
4137 return -EINVAL; in cinc_cal()
4168 struct ksz_device *dev = ds->priv; in ksz_setup_tc_cbs()
4172 if (!dev->info->tc_cbs_supported) in ksz_setup_tc_cbs()
4173 return -EOPNOTSUPP; in ksz_setup_tc_cbs()
4175 if (qopt->queue > dev->info->num_tx_queues) in ksz_setup_tc_cbs()
4176 return -EINVAL; in ksz_setup_tc_cbs()
4179 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue); in ksz_setup_tc_cbs()
4183 if (!qopt->enable) in ksz_setup_tc_cbs()
4189 qopt->hicredit); in ksz_setup_tc_cbs()
4195 qopt->locredit); in ksz_setup_tc_cbs()
4200 ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw); in ksz_setup_tc_cbs()
4204 if (dev->dev_ops->tc_cbs_set_cinc) { in ksz_setup_tc_cbs()
4205 ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw); in ksz_setup_tc_cbs()
4221 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_disable_egress_rate_limit()
4238 return p->bands - 1 - band; in ksz_ets_band_to_queue()
4246 reg += ((3 - queue) / 2) * 2; in ksz8463_tc_ctrl()
4248 reg -= (queue & 1); in ksz8463_tc_ctrl()
4253 * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
4261 * - No configurable queue-to-priority mapping
4262 * - No weight adjustment in WFQ mode
4279 for (band = 0; band < p->bands; band++) { in ksz88x3_tc_ets_add()
4300 * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
4317 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz88x3_tc_ets_del()
4385 for (band = 0; band < p->bands; band++) { in ksz_tc_ets_add()
4397 for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) { in ksz_tc_ets_add()
4400 if (tc_prio >= dev->info->num_ipms) in ksz_tc_ets_add()
4403 queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]); in ksz_tc_ets_add()
4417 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_tc_ets_del()
4425 /* Revert the queue mapping for TC-priority to its default setting on in ksz_tc_ets_del()
4439 if (p->bands != dev->info->num_tx_queues) { in ksz_tc_ets_validate()
4440 dev_err(dev->dev, "Not supported amount of bands. It should be %d\n", in ksz_tc_ets_validate()
4441 dev->info->num_tx_queues); in ksz_tc_ets_validate()
4442 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4445 for (band = 0; band < p->bands; ++band) { in ksz_tc_ets_validate()
4458 if (p->quanta[band]) { in ksz_tc_ets_validate()
4459 dev_err(dev->dev, "Quanta/weights configuration is not supported.\n"); in ksz_tc_ets_validate()
4460 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4470 struct ksz_device *dev = ds->priv; in ksz_tc_setup_qdisc_ets()
4474 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4476 if (qopt->parent != TC_H_ROOT) { in ksz_tc_setup_qdisc_ets()
4477 dev_err(dev->dev, "Parent should be \"root\"\n"); in ksz_tc_setup_qdisc_ets()
4478 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4481 switch (qopt->command) { in ksz_tc_setup_qdisc_ets()
4483 ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4489 &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4491 return ksz_tc_ets_add(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4499 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4502 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4514 return -EOPNOTSUPP; in ksz_setup_tc()
4519 * ksz_handle_wake_reason - Handle wake reason on a specified port.
4533 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_handle_wake_reason()
4534 const u16 *regs = dev->info->regs; in ksz_handle_wake_reason()
4538 ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4546 dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, in ksz_handle_wake_reason()
4551 return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4556 * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
4559 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4568 struct ksz_device *dev = ds->priv; in ksz_get_wol()
4569 const u16 *regs = dev->info->regs; in ksz_get_wol()
4576 if (!dev->wakeup_source) in ksz_get_wol()
4579 wol->supported = WAKE_PHY; in ksz_get_wol()
4585 if (ksz_is_port_mac_global_usable(dev->ds, port)) in ksz_get_wol()
4586 wol->supported |= WAKE_MAGIC; in ksz_get_wol()
4588 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_get_wol()
4594 wol->wolopts |= WAKE_MAGIC; in ksz_get_wol()
4596 wol->wolopts |= WAKE_PHY; in ksz_get_wol()
4600 * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
4603 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4605 * This function configures Wake-on-LAN (WoL) settings for a specified
4617 struct ksz_device *dev = ds->priv; in ksz_set_wol()
4618 const u16 *regs = dev->info->regs; in ksz_set_wol()
4623 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) in ksz_set_wol()
4624 return -EINVAL; in ksz_set_wol()
4627 return -EOPNOTSUPP; in ksz_set_wol()
4629 if (!dev->wakeup_source) in ksz_set_wol()
4630 return -EOPNOTSUPP; in ksz_set_wol()
4636 if (wol->wolopts & WAKE_MAGIC) in ksz_set_wol()
4638 if (wol->wolopts & WAKE_PHY) in ksz_set_wol()
4641 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4658 ret = ksz_switch_macaddr_get(dev->ds, port, NULL); in ksz_set_wol()
4662 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4665 ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4669 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4677 * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
4678 * considering Wake-on-LAN (WoL) settings.
4684 * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
4690 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_wol_pre_shutdown()
4691 const u16 *regs = dev->info->regs; in ksz_wol_pre_shutdown()
4701 if (!dev->wakeup_source) in ksz_wol_pre_shutdown()
4704 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_wol_pre_shutdown()
4707 ret = ops->pme_pread8(dev, dp->index, in ksz_wol_pre_shutdown()
4715 ksz_handle_wake_reason(dev, dp->index); in ksz_wol_pre_shutdown()
4720 if (dev->pme_active_high) in ksz_wol_pre_shutdown()
4722 ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en); in ksz_wol_pre_shutdown()
4734 if (dp->hsr_dev) { in ksz_port_set_mac_address()
4735 dev_err(ds->dev, in ksz_port_set_mac_address()
4738 return -EBUSY; in ksz_port_set_mac_address()
4746 ksz_get_wol(ds, dp->index, &wol); in ksz_port_set_mac_address()
4748 dev_err(ds->dev, in ksz_port_set_mac_address()
4751 return -EBUSY; in ksz_port_set_mac_address()
4758 * ksz_is_port_mac_global_usable - Check if the MAC address on a given port
4771 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_is_port_mac_global_usable()
4772 const unsigned char *addr = user->dev_addr; in ksz_is_port_mac_global_usable()
4774 struct ksz_device *dev = ds->priv; in ksz_is_port_mac_global_usable()
4778 switch_macaddr = dev->switch_macaddr; in ksz_is_port_mac_global_usable()
4779 if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr)) in ksz_is_port_mac_global_usable()
4786 * ksz_switch_macaddr_get - Program the switch's MAC address register.
4793 * multiple features like HSR self-address filtering and WoL. Other user ports
4803 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_switch_macaddr_get()
4804 const unsigned char *addr = user->dev_addr; in ksz_switch_macaddr_get()
4806 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_get()
4807 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_get()
4813 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_get()
4815 if (!ether_addr_equal(switch_macaddr->addr, addr)) { in ksz_switch_macaddr_get()
4818 switch_macaddr->addr); in ksz_switch_macaddr_get()
4819 return -EBUSY; in ksz_switch_macaddr_get()
4822 refcount_inc(&switch_macaddr->refcount); in ksz_switch_macaddr_get()
4828 return -ENOMEM; in ksz_switch_macaddr_get()
4830 ether_addr_copy(switch_macaddr->addr, addr); in ksz_switch_macaddr_get()
4831 refcount_set(&switch_macaddr->refcount, 1); in ksz_switch_macaddr_get()
4832 dev->switch_macaddr = switch_macaddr; in ksz_switch_macaddr_get()
4853 dev->switch_macaddr = NULL; in ksz_switch_macaddr_get()
4854 refcount_set(&switch_macaddr->refcount, 0); in ksz_switch_macaddr_get()
4863 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_put()
4864 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_put()
4870 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_put()
4871 if (!refcount_dec_and_test(&switch_macaddr->refcount)) in ksz_switch_macaddr_put()
4877 dev->switch_macaddr = NULL; in ksz_switch_macaddr_put()
4884 struct ksz_device *dev = ds->priv; in ksz_hsr_join()
4892 if (dev->chip_id != KSZ9477_CHIP_ID) { in ksz_hsr_join()
4894 return -EOPNOTSUPP; in ksz_hsr_join()
4898 if (dev->hsr_dev && hsr != dev->hsr_dev) { in ksz_hsr_join()
4900 return -EOPNOTSUPP; in ksz_hsr_join()
4906 return -EOPNOTSUPP; in ksz_hsr_join()
4910 if (hweight8(dev->hsr_ports) >= 2) { in ksz_hsr_join()
4912 "Cannot offload more than two ports - using software HSR"); in ksz_hsr_join()
4913 return -EOPNOTSUPP; in ksz_hsr_join()
4924 dev->hsr_dev = hsr; in ksz_hsr_join()
4925 dev->hsr_ports |= BIT(port); in ksz_hsr_join()
4933 struct ksz_device *dev = ds->priv; in ksz_hsr_leave()
4935 WARN_ON(dev->chip_id != KSZ9477_CHIP_ID); in ksz_hsr_leave()
4938 dev->hsr_ports &= ~BIT(port); in ksz_hsr_leave()
4939 if (!dev->hsr_ports) in ksz_hsr_leave()
4940 dev->hsr_dev = NULL; in ksz_hsr_leave()
4949 struct ksz_device *dev = ds->priv; in ksz_suspend()
4951 cancel_delayed_work_sync(&dev->mib_read); in ksz_suspend()
4957 struct ksz_device *dev = ds->priv; in ksz_resume()
4959 if (dev->mib_read_interval) in ksz_resume()
4960 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_resume()
5034 ds->dev = base; in ksz_switch_alloc()
5035 ds->num_ports = DSA_MAX_PORTS; in ksz_switch_alloc()
5036 ds->ops = &ksz_switch_ops; in ksz_switch_alloc()
5042 ds->priv = swdev; in ksz_switch_alloc()
5043 swdev->dev = base; in ksz_switch_alloc()
5045 swdev->ds = ds; in ksz_switch_alloc()
5046 swdev->priv = priv; in ksz_switch_alloc()
5053 * ksz_switch_shutdown - Shutdown routine for the switch device.
5068 if (dev->dev_ops->reset && !wol_enabled) in ksz_switch_shutdown()
5069 dev->dev_ops->reset(dev); in ksz_switch_shutdown()
5071 dsa_switch_shutdown(dev->ds); in ksz_switch_shutdown()
5078 phy_interface_t phy_mode = dev->ports[port_num].interface; in ksz_parse_rgmii_delay()
5079 int rx_delay = -1, tx_delay = -1; in ksz_parse_rgmii_delay()
5084 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in ksz_parse_rgmii_delay()
5085 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in ksz_parse_rgmii_delay()
5087 if (rx_delay == -1 && tx_delay == -1) { in ksz_parse_rgmii_delay()
5088 dev_warn(dev->dev, in ksz_parse_rgmii_delay()
5089 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in ksz_parse_rgmii_delay()
5090 "please update device tree to specify \"rx-internal-delay-ps\" and " in ksz_parse_rgmii_delay()
5091 "\"tx-internal-delay-ps\"", in ksz_parse_rgmii_delay()
5108 dev->ports[port_num].rgmii_rx_val = rx_delay; in ksz_parse_rgmii_delay()
5109 dev->ports[port_num].rgmii_tx_val = tx_delay; in ksz_parse_rgmii_delay()
5113 * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding
5123 * is returned. Otherwise, -EINVAL is returned indicating an invalid value.
5135 return -EINVAL; in ksz_drive_strength_to_reg()
5139 * ksz_drive_strength_error() - Report invalid drive strength value
5170 remaining_size -= added_len; in ksz_drive_strength_error()
5173 dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n", in ksz_drive_strength_error()
5178 * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477
5185 * based on the provided properties. It handles chip-specific nuances and
5199 if (props[KSZ_DRIVER_STRENGTH_IO].value != -1) in ksz9477_drive_strength_write()
5200 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz9477_drive_strength_write()
5203 if (dev->chip_id == KSZ8795_CHIP_ID || in ksz9477_drive_strength_write()
5204 dev->chip_id == KSZ8794_CHIP_ID || in ksz9477_drive_strength_write()
5205 dev->chip_id == KSZ8765_CHIP_ID) in ksz9477_drive_strength_write()
5211 if (props[i].value == -1) in ksz9477_drive_strength_write()
5230 * ksz88x3_drive_strength_write() - Set the drive strength configuration for
5252 if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO) in ksz88x3_drive_strength_write()
5255 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz88x3_drive_strength_write()
5273 * ksz_parse_drive_strength() - Extract and apply drive strength configurations
5288 .name = "microchip,hi-drive-strength-microamp", in ksz_parse_drive_strength()
5290 .value = -1, in ksz_parse_drive_strength()
5293 .name = "microchip,lo-drive-strength-microamp", in ksz_parse_drive_strength()
5295 .value = -1, in ksz_parse_drive_strength()
5298 .name = "microchip,io-drive-strength-microamp", in ksz_parse_drive_strength()
5300 .value = -1, in ksz_parse_drive_strength()
5303 struct device_node *np = dev->dev->of_node; in ksz_parse_drive_strength()
5310 if (ret && ret != -EINVAL) in ksz_parse_drive_strength()
5311 dev_warn(dev->dev, "Failed to read %s\n", in ksz_parse_drive_strength()
5322 switch (dev->chip_id) { in ksz_parse_drive_strength()
5342 if (of_props[i].value == -1) in ksz_parse_drive_strength()
5345 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz_parse_drive_strength()
5359 rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW); in ksz8463_configure_straps_spi()
5363 rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH); in ksz8463_configure_straps_spi()
5371 return -EINVAL; in ksz8463_configure_straps_spi()
5373 pinctrl = devm_pinctrl_get_select(dev->dev, "reset"); in ksz8463_configure_straps_spi()
5382 return pinctrl_select_default_state(dev->dev); in ksz8463_release_straps_spi()
5394 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", in ksz_switch_register()
5396 if (IS_ERR(dev->reset_gpio)) in ksz_switch_register()
5397 return PTR_ERR(dev->reset_gpio); in ksz_switch_register()
5399 if (dev->reset_gpio) { in ksz_switch_register()
5400 if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) { in ksz_switch_register()
5406 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_register()
5408 gpiod_set_value_cansleep(dev->reset_gpio, 0); in ksz_switch_register()
5411 if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) { in ksz_switch_register()
5418 mutex_init(&dev->dev_mutex); in ksz_switch_register()
5419 mutex_init(&dev->regmap_mutex); in ksz_switch_register()
5420 mutex_init(&dev->alu_mutex); in ksz_switch_register()
5421 mutex_init(&dev->vlan_mutex); in ksz_switch_register()
5427 info = ksz_lookup_info(dev->chip_id); in ksz_switch_register()
5429 return -ENODEV; in ksz_switch_register()
5432 dev->info = info; in ksz_switch_register()
5434 dev_info(dev->dev, "found switch: %s, rev %i\n", in ksz_switch_register()
5435 dev->info->dev_name, dev->chip_rev); in ksz_switch_register()
5441 dev->dev_ops = dev->info->ops; in ksz_switch_register()
5443 ret = dev->dev_ops->init(dev); in ksz_switch_register()
5447 dev->ports = devm_kzalloc(dev->dev, in ksz_switch_register()
5448 dev->info->port_cnt * sizeof(struct ksz_port), in ksz_switch_register()
5450 if (!dev->ports) in ksz_switch_register()
5451 return -ENOMEM; in ksz_switch_register()
5453 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_switch_register()
5454 spin_lock_init(&dev->ports[i].mib.stats64_lock); in ksz_switch_register()
5455 mutex_init(&dev->ports[i].mib.cnt_mutex); in ksz_switch_register()
5456 dev->ports[i].mib.counters = in ksz_switch_register()
5457 devm_kzalloc(dev->dev, in ksz_switch_register()
5458 sizeof(u64) * (dev->info->mib_cnt + 1), in ksz_switch_register()
5460 if (!dev->ports[i].mib.counters) in ksz_switch_register()
5461 return -ENOMEM; in ksz_switch_register()
5463 dev->ports[i].ksz_dev = dev; in ksz_switch_register()
5464 dev->ports[i].num = i; in ksz_switch_register()
5468 dev->ds->num_ports = dev->info->port_cnt; in ksz_switch_register()
5471 dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops; in ksz_switch_register()
5476 for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) in ksz_switch_register()
5477 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; in ksz_switch_register()
5478 if (dev->dev->of_node) { in ksz_switch_register()
5479 ret = of_get_phy_mode(dev->dev->of_node, &interface); in ksz_switch_register()
5481 dev->compat_interface = interface; in ksz_switch_register()
5482 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); in ksz_switch_register()
5484 ports = of_get_child_by_name(dev->dev->of_node, "ports"); in ksz_switch_register()
5490 if (!(dev->port_mask & BIT(port_num))) { in ksz_switch_register()
5492 return -EINVAL; in ksz_switch_register()
5495 &dev->ports[port_num].interface); in ksz_switch_register()
5498 dev->ports[port_num].fiber = in ksz_switch_register()
5500 "micrel,fiber-mode"); in ksz_switch_register()
5504 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5505 "microchip,synclko-125"); in ksz_switch_register()
5506 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5507 "microchip,synclko-disable"); in ksz_switch_register()
5508 if (dev->synclko_125 && dev->synclko_disable) { in ksz_switch_register()
5509 dev_err(dev->dev, "inconsistent synclko settings\n"); in ksz_switch_register()
5510 return -EINVAL; in ksz_switch_register()
5513 dev->wakeup_source = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5514 "wakeup-source"); in ksz_switch_register()
5515 dev->pme_active_high = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5516 "microchip,pme-active-high"); in ksz_switch_register()
5519 ret = dsa_register_switch(dev->ds); in ksz_switch_register()
5521 dev->dev_ops->exit(dev); in ksz_switch_register()
5526 dev->mib_read_interval = msecs_to_jiffies(5000); in ksz_switch_register()
5529 schedule_delayed_work(&dev->mib_read, 0); in ksz_switch_register()
5538 if (dev->mib_read_interval) { in ksz_switch_remove()
5539 dev->mib_read_interval = 0; in ksz_switch_remove()
5540 cancel_delayed_work_sync(&dev->mib_read); in ksz_switch_remove()
5543 dev->dev_ops->exit(dev); in ksz_switch_remove()
5544 dsa_unregister_switch(dev->ds); in ksz_switch_remove()
5546 if (dev->reset_gpio) in ksz_switch_remove()
5547 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_remove()
5557 return dsa_switch_suspend(priv->ds); in ksz_switch_suspend()
5565 return dsa_switch_resume(priv->ds); in ksz_switch_resume()