Lines Matching +full:smi +full:- +full:mdio
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2017-2025 Microchip Technology Inc.
14 #include <linux/platform_data/microchip-ksz.h>
206 * struct ksz_drive_strength - drive strength mapping
215 /* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants
223 * - for high speed signals
233 * - for low speed signals
250 /* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
270 * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
281 * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
291 * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
294 * the auto-negotiation results. (Note: While the datasheet describes EEE
299 * autonomously via strapping, means MAC-level software intervention is not
302 * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
306 * lack documented MAC-level LPI control.
449 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_select_pcs()
450 struct ksz_port *p = &dev->ports[dp->index]; in ksz_phylink_mac_select_pcs()
452 if (ksz_is_sgmii_port(dev, dp->index) && in ksz_phylink_mac_select_pcs()
455 return p->pcs; in ksz_phylink_mac_select_pcs()
1569 * port map is NOT continuous. The per-port register
2152 if (chip->chip_id == prod_num) in ksz_lookup_info()
2164 if (dev->pdata) { in ksz_check_device_id()
2165 expected_chip_id = dev->pdata->chip_id; in ksz_check_device_id()
2168 return -ENODEV; in ksz_check_device_id()
2170 expected_chip_data = of_device_get_match_data(dev->dev); in ksz_check_device_id()
2171 expected_chip_id = expected_chip_data->chip_id; in ksz_check_device_id()
2174 if (expected_chip_id != dev->chip_id) { in ksz_check_device_id()
2175 dev_err(dev->dev, in ksz_check_device_id()
2177 expected_chip_data->dev_name, dev->info->dev_name); in ksz_check_device_id()
2178 return -ENODEV; in ksz_check_device_id()
2187 struct ksz_device *dev = ds->priv; in ksz_phylink_get_caps()
2189 if (dev->info->supports_mii[port]) in ksz_phylink_get_caps()
2190 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); in ksz_phylink_get_caps()
2192 if (dev->info->supports_rmii[port]) in ksz_phylink_get_caps()
2194 config->supported_interfaces); in ksz_phylink_get_caps()
2196 if (dev->info->supports_rgmii[port]) in ksz_phylink_get_caps()
2197 phy_interface_set_rgmii(config->supported_interfaces); in ksz_phylink_get_caps()
2199 if (dev->info->internal_phy[port]) { in ksz_phylink_get_caps()
2201 config->supported_interfaces); in ksz_phylink_get_caps()
2203 * phy-mode property is absent in ksz_phylink_get_caps()
2206 config->supported_interfaces); in ksz_phylink_get_caps()
2209 if (dev->dev_ops->get_caps) in ksz_phylink_get_caps()
2210 dev->dev_ops->get_caps(dev, port, config); in ksz_phylink_get_caps()
2212 if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) { in ksz_phylink_get_caps()
2213 memcpy(config->lpi_interfaces, config->supported_interfaces, in ksz_phylink_get_caps()
2214 sizeof(config->lpi_interfaces)); in ksz_phylink_get_caps()
2216 config->lpi_capabilities = MAC_100FD; in ksz_phylink_get_caps()
2217 if (dev->info->gbit_capable[port]) in ksz_phylink_get_caps()
2218 config->lpi_capabilities |= MAC_1000FD; in ksz_phylink_get_caps()
2221 config->eee_enabled_default = true; in ksz_phylink_get_caps()
2233 mib = &dev->ports[port].mib; in ksz_r_mib_stats64()
2234 stats = &mib->stats64; in ksz_r_mib_stats64()
2235 pstats = &mib->pause_stats; in ksz_r_mib_stats64()
2236 raw = (struct ksz_stats_raw *)mib->counters; in ksz_r_mib_stats64()
2238 spin_lock(&mib->stats64_lock); in ksz_r_mib_stats64()
2240 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz_r_mib_stats64()
2241 raw->rx_pause; in ksz_r_mib_stats64()
2242 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz_r_mib_stats64()
2243 raw->tx_pause; in ksz_r_mib_stats64()
2248 stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2249 stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; in ksz_r_mib_stats64()
2251 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz_r_mib_stats64()
2252 raw->rx_oversize; in ksz_r_mib_stats64()
2254 stats->rx_crc_errors = raw->rx_crc_err; in ksz_r_mib_stats64()
2255 stats->rx_frame_errors = raw->rx_align_err; in ksz_r_mib_stats64()
2256 stats->rx_dropped = raw->rx_discards; in ksz_r_mib_stats64()
2257 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz_r_mib_stats64()
2258 stats->rx_frame_errors + stats->rx_dropped; in ksz_r_mib_stats64()
2260 stats->tx_window_errors = raw->tx_late_col; in ksz_r_mib_stats64()
2261 stats->tx_fifo_errors = raw->tx_discards; in ksz_r_mib_stats64()
2262 stats->tx_aborted_errors = raw->tx_exc_col; in ksz_r_mib_stats64()
2263 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz_r_mib_stats64()
2264 stats->tx_aborted_errors; in ksz_r_mib_stats64()
2266 stats->multicast = raw->rx_mcast; in ksz_r_mib_stats64()
2267 stats->collisions = raw->tx_total_col; in ksz_r_mib_stats64()
2269 pstats->tx_pause_frames = raw->tx_pause; in ksz_r_mib_stats64()
2270 pstats->rx_pause_frames = raw->rx_pause; in ksz_r_mib_stats64()
2272 spin_unlock(&mib->stats64_lock); in ksz_r_mib_stats64()
2274 if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) { in ksz_r_mib_stats64()
2275 ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col); in ksz_r_mib_stats64()
2277 dev_err(dev->dev, "Failed to monitor transmission halt\n"); in ksz_r_mib_stats64()
2288 mib = &dev->ports[port].mib; in ksz88xx_r_mib_stats64()
2289 stats = &mib->stats64; in ksz88xx_r_mib_stats64()
2290 pstats = &mib->pause_stats; in ksz88xx_r_mib_stats64()
2291 raw = (struct ksz88xx_stats_raw *)mib->counters; in ksz88xx_r_mib_stats64()
2293 spin_lock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2295 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast + in ksz88xx_r_mib_stats64()
2296 raw->rx_pause; in ksz88xx_r_mib_stats64()
2297 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast + in ksz88xx_r_mib_stats64()
2298 raw->tx_pause; in ksz88xx_r_mib_stats64()
2303 stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2304 stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN; in ksz88xx_r_mib_stats64()
2306 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + in ksz88xx_r_mib_stats64()
2307 raw->rx_oversize; in ksz88xx_r_mib_stats64()
2309 stats->rx_crc_errors = raw->rx_crc_err; in ksz88xx_r_mib_stats64()
2310 stats->rx_frame_errors = raw->rx_align_err; in ksz88xx_r_mib_stats64()
2311 stats->rx_dropped = raw->rx_discards; in ksz88xx_r_mib_stats64()
2312 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + in ksz88xx_r_mib_stats64()
2313 stats->rx_frame_errors + stats->rx_dropped; in ksz88xx_r_mib_stats64()
2315 stats->tx_window_errors = raw->tx_late_col; in ksz88xx_r_mib_stats64()
2316 stats->tx_fifo_errors = raw->tx_discards; in ksz88xx_r_mib_stats64()
2317 stats->tx_aborted_errors = raw->tx_exc_col; in ksz88xx_r_mib_stats64()
2318 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + in ksz88xx_r_mib_stats64()
2319 stats->tx_aborted_errors; in ksz88xx_r_mib_stats64()
2321 stats->multicast = raw->rx_mcast; in ksz88xx_r_mib_stats64()
2322 stats->collisions = raw->tx_total_col; in ksz88xx_r_mib_stats64()
2324 pstats->tx_pause_frames = raw->tx_pause; in ksz88xx_r_mib_stats64()
2325 pstats->rx_pause_frames = raw->rx_pause; in ksz88xx_r_mib_stats64()
2327 spin_unlock(&mib->stats64_lock); in ksz88xx_r_mib_stats64()
2333 struct ksz_device *dev = ds->priv; in ksz_get_stats64()
2336 mib = &dev->ports[port].mib; in ksz_get_stats64()
2338 spin_lock(&mib->stats64_lock); in ksz_get_stats64()
2339 memcpy(s, &mib->stats64, sizeof(*s)); in ksz_get_stats64()
2340 spin_unlock(&mib->stats64_lock); in ksz_get_stats64()
2346 struct ksz_device *dev = ds->priv; in ksz_get_pause_stats()
2349 mib = &dev->ports[port].mib; in ksz_get_pause_stats()
2351 spin_lock(&mib->stats64_lock); in ksz_get_pause_stats()
2352 memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats)); in ksz_get_pause_stats()
2353 spin_unlock(&mib->stats64_lock); in ksz_get_pause_stats()
2359 struct ksz_device *dev = ds->priv; in ksz_get_strings()
2365 for (i = 0; i < dev->info->mib_cnt; i++) in ksz_get_strings()
2366 ethtool_puts(&buf, dev->info->mib_names[i].string); in ksz_get_strings()
2370 * ksz_update_port_member - Adjust port forwarding rules based on STP state and
2394 struct ksz_port *p = &dev->ports[port]; in ksz_update_port_member()
2395 struct dsa_switch *ds = dev->ds; in ksz_update_port_member()
2406 for (i = 0; i < ds->num_ports; i++) { in ksz_update_port_member()
2408 struct ksz_port *other_p = &dev->ports[i]; in ksz_update_port_member()
2417 if (other_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2426 if (p->stp_state == BR_STATE_FORWARDING && in ksz_update_port_member()
2427 !(p->isolated && other_p->isolated)) { in ksz_update_port_member()
2433 for (j = 0; j < ds->num_ports; j++) { in ksz_update_port_member()
2443 third_p = &dev->ports[j]; in ksz_update_port_member()
2444 if (third_p->stp_state != BR_STATE_FORWARDING) in ksz_update_port_member()
2458 !(other_p->isolated && third_p->isolated)) in ksz_update_port_member()
2462 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); in ksz_update_port_member()
2468 if (!port_member && p->stp_state == BR_STATE_FORWARDING && in ksz_update_port_member()
2469 (dev->hsr_ports & BIT(port))) in ksz_update_port_member()
2470 port_member = dev->hsr_ports; in ksz_update_port_member()
2471 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); in ksz_update_port_member()
2476 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_read()
2480 ret = dev->dev_ops->r_phy(dev, addr, regnum, &val); in ksz_sw_mdio_read()
2490 struct ksz_device *dev = bus->priv; in ksz_sw_mdio_write()
2492 return dev->dev_ops->w_phy(dev, addr, regnum, val); in ksz_sw_mdio_write()
2496 * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
2497 * @bus: MDIO bus structure.
2498 * @addr: PHY address on the parent MDIO bus.
2501 * This function provides a direct read operation on the parent MDIO bus for
2502 * accessing PHY registers. By bypassing SPI or I2C, it uses the parent MDIO bus
2510 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_read()
2512 return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum); in ksz_parent_mdio_read()
2516 * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
2517 * @bus: MDIO bus structure.
2518 * @addr: PHY address on the parent MDIO bus.
2522 * This function provides a direct write operation on the parent MDIO bus for
2523 * accessing PHY registers. Bypassing SPI or I2C, it uses the parent MDIO bus
2531 struct ksz_device *dev = bus->priv; in ksz_parent_mdio_write()
2533 return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val); in ksz_parent_mdio_write()
2537 * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
2547 * Return: Port index on success, or -EINVAL if no matching port is found.
2551 struct dsa_switch *ds = dev->ds; in ksz_phy_addr_to_port()
2555 if (dev->info->internal_phy[dp->index] && in ksz_phy_addr_to_port()
2556 dev->phy_addr_map[dp->index] == addr) in ksz_phy_addr_to_port()
2557 return dp->index; in ksz_phy_addr_to_port()
2560 return -EINVAL; in ksz_phy_addr_to_port()
2564 * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
2575 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_setup()
2581 if (BIT(phy) & ds->phys_mii_mask) { in ksz_irq_phy_setup()
2588 irq = irq_find_mapping(dev->ports[port].pirq.domain, in ksz_irq_phy_setup()
2591 ret = -EINVAL; in ksz_irq_phy_setup()
2594 ds->user_mii_bus->irq[phy] = irq; in ksz_irq_phy_setup()
2599 while (phy--) in ksz_irq_phy_setup()
2600 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_setup()
2601 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_setup()
2607 * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
2615 struct dsa_switch *ds = dev->ds; in ksz_irq_phy_free()
2619 if (BIT(phy) & ds->phys_mii_mask) in ksz_irq_phy_free()
2620 irq_dispose_mapping(ds->user_mii_bus->irq[phy]); in ksz_irq_phy_free()
2624 * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
2627 * @mdio_np: pointer to the MDIO node in the device tree
2631 * `phy-handle` properties are correctly set and that the internal PHYs match
2646 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_parse_dt_phy_config()
2647 if (!dev->info->internal_phy[dp->index]) in ksz_parse_dt_phy_config()
2650 phy_node = of_parse_phandle(dp->dn, "phy-handle", 0); in ksz_parse_dt_phy_config()
2652 dev_err(dev->dev, "failed to parse phy-handle for port %d.\n", in ksz_parse_dt_phy_config()
2653 dp->index); in ksz_parse_dt_phy_config()
2660 dev_err(dev->dev, "failed to get PHY-parent node for port %d\n", in ksz_parse_dt_phy_config()
2661 dp->index); in ksz_parse_dt_phy_config()
2664 dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n", in ksz_parse_dt_phy_config()
2665 dp->index, mdio_np, phy_parent_node); in ksz_parse_dt_phy_config()
2670 dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n", in ksz_parse_dt_phy_config()
2671 dp->index, ret); in ksz_parse_dt_phy_config()
2673 } else if (phy_addr != dev->phy_addr_map[dp->index]) { in ksz_parse_dt_phy_config()
2674 dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n", in ksz_parse_dt_phy_config()
2675 dp->index, dev->phy_addr_map[dp->index], in ksz_parse_dt_phy_config()
2679 bus->phy_mask |= BIT(phy_addr); in ksz_parse_dt_phy_config()
2688 return -EINVAL; in ksz_parse_dt_phy_config()
2694 * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
2697 * This function sets up and registers an MDIO bus for the KSZ switch device,
2698 * allowing access to its internal PHYs. If the device supports side MDIO,
2699 * the function will configure the external MDIO controller specified by the
2700 * "mdio-parent-bus" device tree property to directly manage internal PHYs.
2709 struct dsa_switch *ds = dev->ds; in ksz_mdio_register()
2714 mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio"); in ksz_mdio_register()
2718 parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0); in ksz_mdio_register()
2719 if (parent_bus_node && !dev->info->phy_side_mdio_supported) { in ksz_mdio_register()
2720 …dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property… in ksz_mdio_register()
2721 ret = -EINVAL; in ksz_mdio_register()
2727 ret = -EPROBE_DEFER; in ksz_mdio_register()
2732 dev->parent_mdio_bus = parent_bus; in ksz_mdio_register()
2735 bus = devm_mdiobus_alloc(ds->dev); in ksz_mdio_register()
2737 ret = -ENOMEM; in ksz_mdio_register()
2741 if (dev->dev_ops->mdio_bus_preinit) { in ksz_mdio_register()
2742 ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus); in ksz_mdio_register()
2747 if (dev->dev_ops->create_phy_addr_map) { in ksz_mdio_register()
2748 ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus); in ksz_mdio_register()
2752 for (i = 0; i < dev->info->port_cnt; i++) in ksz_mdio_register()
2753 dev->phy_addr_map[i] = i; in ksz_mdio_register()
2756 bus->priv = dev; in ksz_mdio_register()
2758 bus->read = ksz_parent_mdio_read; in ksz_mdio_register()
2759 bus->write = ksz_parent_mdio_write; in ksz_mdio_register()
2760 bus->name = "KSZ side MDIO"; in ksz_mdio_register()
2761 snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d", in ksz_mdio_register()
2762 ds->index); in ksz_mdio_register()
2764 bus->read = ksz_sw_mdio_read; in ksz_mdio_register()
2765 bus->write = ksz_sw_mdio_write; in ksz_mdio_register()
2766 bus->name = "ksz user smi"; in ksz_mdio_register()
2767 if (ds->dst->index != 0) { in ksz_mdio_register()
2768 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index); in ksz_mdio_register()
2770 snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); in ksz_mdio_register()
2778 ds->phys_mii_mask = bus->phy_mask; in ksz_mdio_register()
2779 bus->parent = ds->dev; in ksz_mdio_register()
2781 ds->user_mii_bus = bus; in ksz_mdio_register()
2783 if (dev->irq > 0) { in ksz_mdio_register()
2789 ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); in ksz_mdio_register()
2791 dev_err(ds->dev, "unable to register MDIO bus %s\n", in ksz_mdio_register()
2792 bus->id); in ksz_mdio_register()
2793 if (dev->irq > 0) in ksz_mdio_register()
2808 kirq->masked |= BIT(d->hwirq); in ksz_irq_mask()
2815 kirq->masked &= ~BIT(d->hwirq); in ksz_irq_unmask()
2822 mutex_lock(&kirq->dev->lock_irq); in ksz_irq_bus_lock()
2828 struct ksz_device *dev = kirq->dev; in ksz_irq_bus_sync_unlock()
2831 ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); in ksz_irq_bus_sync_unlock()
2833 dev_err(dev->dev, "failed to change IRQ mask\n"); in ksz_irq_bus_sync_unlock()
2835 mutex_unlock(&dev->lock_irq); in ksz_irq_bus_sync_unlock()
2839 .name = "ksz-irq",
2849 irq_set_chip_data(irq, d->host_data); in ksz_irq_domain_map()
2865 free_irq(kirq->irq_num, kirq); in ksz_irq_free()
2867 for (irq = 0; irq < kirq->nirqs; irq++) { in ksz_irq_free()
2868 virq = irq_find_mapping(kirq->domain, irq); in ksz_irq_free()
2872 irq_domain_remove(kirq->domain); in ksz_irq_free()
2885 dev = kirq->dev; in ksz_irq_thread_fn()
2888 ret = ksz_read8(dev, kirq->reg_status, &data); in ksz_irq_thread_fn()
2892 for (n = 0; n < kirq->nirqs; ++n) { in ksz_irq_thread_fn()
2894 sub_irq = irq_find_mapping(kirq->domain, n); in ksz_irq_thread_fn()
2907 kirq->dev = dev; in ksz_irq_common_setup()
2908 kirq->masked = ~0; in ksz_irq_common_setup()
2910 kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0, in ksz_irq_common_setup()
2912 if (!kirq->domain) in ksz_irq_common_setup()
2913 return -ENOMEM; in ksz_irq_common_setup()
2915 for (n = 0; n < kirq->nirqs; n++) in ksz_irq_common_setup()
2916 irq_create_mapping(kirq->domain, n); in ksz_irq_common_setup()
2918 ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn, in ksz_irq_common_setup()
2919 IRQF_ONESHOT, kirq->name, kirq); in ksz_irq_common_setup()
2933 struct ksz_irq *girq = &dev->girq; in ksz_girq_setup()
2935 girq->nirqs = dev->info->port_cnt; in ksz_girq_setup()
2936 girq->reg_mask = REG_SW_PORT_INT_MASK__1; in ksz_girq_setup()
2937 girq->reg_status = REG_SW_PORT_INT_STATUS__1; in ksz_girq_setup()
2938 snprintf(girq->name, sizeof(girq->name), "global_port_irq"); in ksz_girq_setup()
2940 girq->irq_num = dev->irq; in ksz_girq_setup()
2947 struct ksz_irq *pirq = &dev->ports[p].pirq; in ksz_pirq_setup()
2949 pirq->nirqs = dev->info->port_nirqs; in ksz_pirq_setup()
2950 pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK); in ksz_pirq_setup()
2951 pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS); in ksz_pirq_setup()
2952 snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p); in ksz_pirq_setup()
2954 pirq->irq_num = irq_find_mapping(dev->girq.domain, p); in ksz_pirq_setup()
2955 if (!pirq->irq_num) in ksz_pirq_setup()
2956 return -EINVAL; in ksz_pirq_setup()
2965 struct ksz_device *dev = ds->priv; in ksz_setup()
2972 regs = dev->info->regs; in ksz_setup()
2974 dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), in ksz_setup()
2975 dev->info->num_vlans, GFP_KERNEL); in ksz_setup()
2976 if (!dev->vlan_cache) in ksz_setup()
2977 return -ENOMEM; in ksz_setup()
2979 ret = dev->dev_ops->reset(dev); in ksz_setup()
2981 dev_err(ds->dev, "failed to reset switch\n"); in ksz_setup()
2989 if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) { in ksz_setup()
2990 ret = dev->dev_ops->pcs_create(dev); in ksz_setup()
3005 dev->dev_ops->config_cpu_port(ds); in ksz_setup()
3007 dev->dev_ops->enable_stp_addr(dev); in ksz_setup()
3009 ds->num_tx_queues = dev->info->num_tx_queues; in ksz_setup()
3016 ds->configure_vlan_while_not_filtering = false; in ksz_setup()
3017 ds->dscp_prio_mapping_is_global = true; in ksz_setup()
3019 if (dev->dev_ops->setup) { in ksz_setup()
3020 ret = dev->dev_ops->setup(ds); in ksz_setup()
3030 p = &dev->ports[dev->cpu_port]; in ksz_setup()
3031 p->learning = true; in ksz_setup()
3033 if (dev->irq > 0) { in ksz_setup()
3038 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_setup()
3039 ret = ksz_pirq_setup(dev, dp->index); in ksz_setup()
3043 if (dev->info->ptp_capable) { in ksz_setup()
3044 ret = ksz_ptp_irq_setup(ds, dp->index); in ksz_setup()
3051 if (dev->info->ptp_capable) { in ksz_setup()
3054 dev_err(dev->dev, "Failed to register PTP clock: %d\n", in ksz_setup()
3062 dev_err(dev->dev, "failed to register the mdio"); in ksz_setup()
3077 if (dev->info->ptp_capable) in ksz_setup()
3080 if (dev->irq > 0) { in ksz_setup()
3081 dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) { in ksz_setup()
3082 if (dev->info->ptp_capable) in ksz_setup()
3083 ksz_ptp_irq_free(ds, dp->index); in ksz_setup()
3085 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_setup()
3087 ksz_irq_free(&dev->girq); in ksz_setup()
3095 struct ksz_device *dev = ds->priv; in ksz_teardown()
3098 if (dev->info->ptp_capable) in ksz_teardown()
3101 if (dev->irq > 0) { in ksz_teardown()
3102 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_teardown()
3103 if (dev->info->ptp_capable) in ksz_teardown()
3104 ksz_ptp_irq_free(ds, dp->index); in ksz_teardown()
3106 ksz_irq_free(&dev->ports[dp->index].pirq); in ksz_teardown()
3109 ksz_irq_free(&dev->girq); in ksz_teardown()
3112 if (dev->dev_ops->teardown) in ksz_teardown()
3113 dev->dev_ops->teardown(ds); in ksz_teardown()
3118 struct ksz_port_mib *mib = &dev->ports[port].mib; in port_r_cnt()
3122 while (mib->cnt_ptr < dev->info->reg_mib_cnt) { in port_r_cnt()
3123 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, in port_r_cnt()
3124 &mib->counters[mib->cnt_ptr]); in port_r_cnt()
3125 ++mib->cnt_ptr; in port_r_cnt()
3129 dropped = &mib->counters[dev->info->mib_cnt]; in port_r_cnt()
3132 while (mib->cnt_ptr < dev->info->mib_cnt) { in port_r_cnt()
3133 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, in port_r_cnt()
3134 dropped, &mib->counters[mib->cnt_ptr]); in port_r_cnt()
3135 ++mib->cnt_ptr; in port_r_cnt()
3137 mib->cnt_ptr = 0; in port_r_cnt()
3148 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_mib_read_work()
3149 if (dsa_is_unused_port(dev->ds, i)) in ksz_mib_read_work()
3152 p = &dev->ports[i]; in ksz_mib_read_work()
3153 mib = &p->mib; in ksz_mib_read_work()
3154 mutex_lock(&mib->cnt_mutex); in ksz_mib_read_work()
3159 if (!p->read) { in ksz_mib_read_work()
3160 const struct dsa_port *dp = dsa_to_port(dev->ds, i); in ksz_mib_read_work()
3162 if (!netif_carrier_ok(dp->user)) in ksz_mib_read_work()
3163 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_mib_read_work()
3166 p->read = false; in ksz_mib_read_work()
3168 if (dev->dev_ops->r_mib_stat64) in ksz_mib_read_work()
3169 dev->dev_ops->r_mib_stat64(dev, i); in ksz_mib_read_work()
3171 mutex_unlock(&mib->cnt_mutex); in ksz_mib_read_work()
3174 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_mib_read_work()
3181 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); in ksz_init_mib_timer()
3183 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_init_mib_timer()
3184 struct ksz_port_mib *mib = &dev->ports[i].mib; in ksz_init_mib_timer()
3186 dev->dev_ops->port_init_cnt(dev, i); in ksz_init_mib_timer()
3188 mib->cnt_ptr = 0; in ksz_init_mib_timer()
3189 memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); in ksz_init_mib_timer()
3195 struct ksz_device *dev = ds->priv; in ksz_phy_read16()
3199 ret = dev->dev_ops->r_phy(dev, addr, reg, &val); in ksz_phy_read16()
3208 struct ksz_device *dev = ds->priv; in ksz_phy_write16()
3211 ret = dev->dev_ops->w_phy(dev, addr, reg, val); in ksz_phy_write16()
3220 struct ksz_device *dev = ds->priv; in ksz_get_phy_flags()
3222 switch (dev->chip_id) { in ksz_get_phy_flags()
3225 * Port 1 does not work with LinkMD Cable-Testing. in ksz_get_phy_flags()
3241 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_link_down()
3244 dev->ports[dp->index].read = true; in ksz_phylink_mac_link_down()
3246 if (dev->mib_read_interval) in ksz_phylink_mac_link_down()
3247 schedule_delayed_work(&dev->mib_read, 0); in ksz_phylink_mac_link_down()
3252 struct ksz_device *dev = ds->priv; in ksz_sset_count()
3257 return dev->info->mib_cnt; in ksz_sset_count()
3264 struct ksz_device *dev = ds->priv; in ksz_get_ethtool_stats()
3267 mib = &dev->ports[port].mib; in ksz_get_ethtool_stats()
3268 mutex_lock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3271 if (!netif_carrier_ok(dp->user)) in ksz_get_ethtool_stats()
3272 mib->cnt_ptr = dev->info->reg_mib_cnt; in ksz_get_ethtool_stats()
3274 memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); in ksz_get_ethtool_stats()
3275 mutex_unlock(&mib->cnt_mutex); in ksz_get_ethtool_stats()
3300 struct ksz_device *dev = ds->priv; in ksz_port_fast_age()
3302 dev->dev_ops->flush_dyn_mac_table(dev, port); in ksz_port_fast_age()
3307 struct ksz_device *dev = ds->priv; in ksz_set_ageing_time()
3309 if (!dev->dev_ops->set_ageing_time) in ksz_set_ageing_time()
3310 return -EOPNOTSUPP; in ksz_set_ageing_time()
3312 return dev->dev_ops->set_ageing_time(dev, msecs); in ksz_set_ageing_time()
3319 struct ksz_device *dev = ds->priv; in ksz_port_fdb_add()
3321 if (!dev->dev_ops->fdb_add) in ksz_port_fdb_add()
3322 return -EOPNOTSUPP; in ksz_port_fdb_add()
3324 return dev->dev_ops->fdb_add(dev, port, addr, vid, db); in ksz_port_fdb_add()
3331 struct ksz_device *dev = ds->priv; in ksz_port_fdb_del()
3333 if (!dev->dev_ops->fdb_del) in ksz_port_fdb_del()
3334 return -EOPNOTSUPP; in ksz_port_fdb_del()
3336 return dev->dev_ops->fdb_del(dev, port, addr, vid, db); in ksz_port_fdb_del()
3342 struct ksz_device *dev = ds->priv; in ksz_port_fdb_dump()
3344 if (!dev->dev_ops->fdb_dump) in ksz_port_fdb_dump()
3345 return -EOPNOTSUPP; in ksz_port_fdb_dump()
3347 return dev->dev_ops->fdb_dump(dev, port, cb, data); in ksz_port_fdb_dump()
3354 struct ksz_device *dev = ds->priv; in ksz_port_mdb_add()
3356 if (!dev->dev_ops->mdb_add) in ksz_port_mdb_add()
3357 return -EOPNOTSUPP; in ksz_port_mdb_add()
3359 return dev->dev_ops->mdb_add(dev, port, mdb, db); in ksz_port_mdb_add()
3366 struct ksz_device *dev = ds->priv; in ksz_port_mdb_del()
3368 if (!dev->dev_ops->mdb_del) in ksz_port_mdb_del()
3369 return -EOPNOTSUPP; in ksz_port_mdb_del()
3371 return dev->dev_ops->mdb_del(dev, port, mdb, db); in ksz_port_mdb_del()
3380 for (ipm = 0; ipm < dev->info->num_ipms; ipm++) { in ksz9477_set_default_prio_queue_mapping()
3387 queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues); in ksz9477_set_default_prio_queue_mapping()
3399 struct ksz_device *dev = ds->priv; in ksz_port_setup()
3406 dev->dev_ops->port_setup(dev, port, false); in ksz_port_setup()
3423 struct ksz_device *dev = ds->priv; in ksz_port_stp_state_set()
3428 regs = dev->info->regs; in ksz_port_stp_state_set()
3433 p = &dev->ports[port]; in ksz_port_stp_state_set()
3444 if (!p->learning) in ksz_port_stp_state_set()
3449 if (!p->learning) in ksz_port_stp_state_set()
3456 dev_err(ds->dev, "invalid STP state: %d\n", state); in ksz_port_stp_state_set()
3462 p->stp_state = state; in ksz_port_stp_state_set()
3469 struct ksz_device *dev = ds->priv; in ksz_port_teardown()
3471 switch (dev->chip_id) { in ksz_port_teardown()
3491 return -EINVAL; in ksz_port_pre_bridge_flags()
3500 struct ksz_device *dev = ds->priv; in ksz_port_bridge_flags()
3501 struct ksz_port *p = &dev->ports[port]; in ksz_port_bridge_flags()
3505 p->learning = !!(flags.val & BR_LEARNING); in ksz_port_bridge_flags()
3508 p->isolated = !!(flags.val & BR_ISOLATED); in ksz_port_bridge_flags()
3511 ksz_port_stp_state_set(ds, port, p->stp_state); in ksz_port_bridge_flags()
3521 struct ksz_device *dev = ds->priv; in ksz_get_tag_protocol()
3527 if (dev->chip_id == KSZ88X3_CHIP_ID || in ksz_get_tag_protocol()
3528 dev->chip_id == KSZ8463_CHIP_ID || in ksz_get_tag_protocol()
3529 dev->chip_id == KSZ8563_CHIP_ID || in ksz_get_tag_protocol()
3530 dev->chip_id == KSZ9893_CHIP_ID || in ksz_get_tag_protocol()
3531 dev->chip_id == KSZ9563_CHIP_ID) in ksz_get_tag_protocol()
3534 if (dev->chip_id == KSZ8567_CHIP_ID || in ksz_get_tag_protocol()
3535 dev->chip_id == KSZ9477_CHIP_ID || in ksz_get_tag_protocol()
3536 dev->chip_id == KSZ9896_CHIP_ID || in ksz_get_tag_protocol()
3537 dev->chip_id == KSZ9897_CHIP_ID || in ksz_get_tag_protocol()
3538 dev->chip_id == KSZ9567_CHIP_ID || in ksz_get_tag_protocol()
3539 dev->chip_id == LAN9646_CHIP_ID) in ksz_get_tag_protocol()
3560 tagger_data->xmit_work_fn = ksz_port_deferred_xmit; in ksz_connect_tag_protocol()
3563 return -EPROTONOSUPPORT; in ksz_connect_tag_protocol()
3570 struct ksz_device *dev = ds->priv; in ksz_port_vlan_filtering()
3572 if (!dev->dev_ops->vlan_filtering) in ksz_port_vlan_filtering()
3573 return -EOPNOTSUPP; in ksz_port_vlan_filtering()
3575 return dev->dev_ops->vlan_filtering(dev, port, flag, extack); in ksz_port_vlan_filtering()
3582 struct ksz_device *dev = ds->priv; in ksz_port_vlan_add()
3584 if (!dev->dev_ops->vlan_add) in ksz_port_vlan_add()
3585 return -EOPNOTSUPP; in ksz_port_vlan_add()
3587 return dev->dev_ops->vlan_add(dev, port, vlan, extack); in ksz_port_vlan_add()
3593 struct ksz_device *dev = ds->priv; in ksz_port_vlan_del()
3595 if (!dev->dev_ops->vlan_del) in ksz_port_vlan_del()
3596 return -EOPNOTSUPP; in ksz_port_vlan_del()
3598 return dev->dev_ops->vlan_del(dev, port, vlan); in ksz_port_vlan_del()
3605 struct ksz_device *dev = ds->priv; in ksz_port_mirror_add()
3607 if (!dev->dev_ops->mirror_add) in ksz_port_mirror_add()
3608 return -EOPNOTSUPP; in ksz_port_mirror_add()
3610 return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack); in ksz_port_mirror_add()
3616 struct ksz_device *dev = ds->priv; in ksz_port_mirror_del()
3618 if (dev->dev_ops->mirror_del) in ksz_port_mirror_del()
3619 dev->dev_ops->mirror_del(dev, port, mirror); in ksz_port_mirror_del()
3624 struct ksz_device *dev = ds->priv; in ksz_change_mtu()
3626 if (!dev->dev_ops->change_mtu) in ksz_change_mtu()
3627 return -EOPNOTSUPP; in ksz_change_mtu()
3629 return dev->dev_ops->change_mtu(dev, port, mtu); in ksz_change_mtu()
3634 struct ksz_device *dev = ds->priv; in ksz_max_mtu()
3636 switch (dev->chip_id) { in ksz_max_mtu()
3640 return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3645 return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3660 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; in ksz_max_mtu()
3663 return -EOPNOTSUPP; in ksz_max_mtu()
3667 * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
3682 struct ksz_device *dev = ds->priv; in ksz_support_eee()
3684 if (!dev->info->internal_phy[port]) in ksz_support_eee()
3687 switch (dev->chip_id) { in ksz_support_eee()
3711 * controls. If not disabled, the PHY ports can auto-negotiate in ksz_support_eee()
3726 struct ksz_device *dev = ds->priv; in ksz_set_mac_eee()
3728 if (!e->tx_lpi_enabled) { in ksz_set_mac_eee()
3729 dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n"); in ksz_set_mac_eee()
3730 return -EINVAL; in ksz_set_mac_eee()
3733 if (e->tx_lpi_timer) { in ksz_set_mac_eee()
3734 dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n"); in ksz_set_mac_eee()
3735 return -EINVAL; in ksz_set_mac_eee()
3744 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_xmii()
3745 struct ksz_port *p = &dev->ports[port]; in ksz_set_xmii()
3746 const u16 *regs = dev->info->regs; in ksz_set_xmii()
3769 /* On KSZ9893, disable RGMII in-band status support */ in ksz_set_xmii()
3770 if (dev->chip_id == KSZ9893_CHIP_ID || in ksz_set_xmii()
3771 dev->chip_id == KSZ8563_CHIP_ID || in ksz_set_xmii()
3772 dev->chip_id == KSZ9563_CHIP_ID || in ksz_set_xmii()
3777 dev_err(dev->dev, "Unsupported interface '%s' for port %d\n", in ksz_set_xmii()
3782 if (p->rgmii_tx_val) in ksz_set_xmii()
3785 if (p->rgmii_rx_val) in ksz_set_xmii()
3794 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_xmii()
3795 const u16 *regs = dev->info->regs; in ksz_get_xmii()
3830 struct ksz_device *dev = dp->ds->priv; in ksz88x3_phylink_mac_config()
3832 dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN); in ksz88x3_phylink_mac_config()
3840 struct ksz_device *dev = dp->ds->priv; in ksz_phylink_mac_config()
3841 int port = dp->index; in ksz_phylink_mac_config()
3844 if (dev->info->internal_phy[port]) in ksz_phylink_mac_config()
3852 dev_err(dev->dev, "In-band AN not supported!\n"); in ksz_phylink_mac_config()
3856 ksz_set_xmii(dev, port, state->interface); in ksz_phylink_mac_config()
3858 if (dev->dev_ops->setup_rgmii_delay) in ksz_phylink_mac_config()
3859 dev->dev_ops->setup_rgmii_delay(dev, port); in ksz_phylink_mac_config()
3864 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_get_gbit()
3865 const u16 *regs = dev->info->regs; in ksz_get_gbit()
3882 const u8 *bitval = dev->info->xmii_ctrl1; in ksz_set_gbit()
3883 const u16 *regs = dev->info->regs; in ksz_set_gbit()
3901 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_set_100_10mbit()
3902 const u16 *regs = dev->info->regs; in ksz_set_100_10mbit()
3932 const u8 *bitval = dev->info->xmii_ctrl0; in ksz_duplex_flowctrl()
3933 const u32 *masks = dev->info->masks; in ksz_duplex_flowctrl()
3934 const u16 *regs = dev->info->regs; in ksz_duplex_flowctrl()
3963 struct ksz_device *dev = dp->ds->priv; in ksz9477_phylink_mac_link_up()
3964 int port = dp->index; in ksz9477_phylink_mac_link_up()
3967 p = &dev->ports[port]; in ksz9477_phylink_mac_link_up()
3970 if (dev->info->internal_phy[port]) in ksz9477_phylink_mac_link_up()
3973 p->phydev.speed = speed; in ksz9477_phylink_mac_link_up()
3997 dev->chip_id = KSZ8463_CHIP_ID; in ksz_switch_detect()
4003 dev->chip_id = KSZ8795_CHIP_ID; in ksz_switch_detect()
4007 dev->chip_id = KSZ8765_CHIP_ID; in ksz_switch_detect()
4009 dev->chip_id = KSZ8794_CHIP_ID; in ksz_switch_detect()
4011 return -ENODEV; in ksz_switch_detect()
4016 dev->chip_id = KSZ88X3_CHIP_ID; in ksz_switch_detect()
4018 return -ENODEV; in ksz_switch_detect()
4023 dev->chip_id = KSZ8895_CHIP_ID; in ksz_switch_detect()
4025 return -ENODEV; in ksz_switch_detect()
4030 dev->chip_id = KSZ8864_CHIP_ID; in ksz_switch_detect()
4037 dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32); in ksz_switch_detect()
4053 if (dev->chip_id != LAN9646_CHIP_ID) in ksz_switch_detect()
4054 dev->chip_id = id32; in ksz_switch_detect()
4063 dev->chip_id = KSZ8563_CHIP_ID; in ksz_switch_detect()
4065 dev->chip_id = KSZ9563_CHIP_ID; in ksz_switch_detect()
4067 dev->chip_id = KSZ9893_CHIP_ID; in ksz_switch_detect()
4071 dev_err(dev->dev, in ksz_switch_detect()
4073 return -ENODEV; in ksz_switch_detect()
4082 struct ksz_device *dev = ds->priv; in ksz_cls_flower_add()
4084 switch (dev->chip_id) { in ksz_cls_flower_add()
4097 return -EOPNOTSUPP; in ksz_cls_flower_add()
4103 struct ksz_device *dev = ds->priv; in ksz_cls_flower_del()
4105 switch (dev->chip_id) { in ksz_cls_flower_del()
4118 return -EOPNOTSUPP; in ksz_cls_flower_del()
4122 * is converted to Hex-decimal using the successive multiplication method. On
4133 txrate = idle_slope - send_slope; in cinc_cal()
4136 return -EINVAL; in cinc_cal()
4167 struct ksz_device *dev = ds->priv; in ksz_setup_tc_cbs()
4171 if (!dev->info->tc_cbs_supported) in ksz_setup_tc_cbs()
4172 return -EOPNOTSUPP; in ksz_setup_tc_cbs()
4174 if (qopt->queue > dev->info->num_tx_queues) in ksz_setup_tc_cbs()
4175 return -EINVAL; in ksz_setup_tc_cbs()
4178 ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue); in ksz_setup_tc_cbs()
4182 if (!qopt->enable) in ksz_setup_tc_cbs()
4188 qopt->hicredit); in ksz_setup_tc_cbs()
4194 qopt->locredit); in ksz_setup_tc_cbs()
4199 ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw); in ksz_setup_tc_cbs()
4203 if (dev->dev_ops->tc_cbs_set_cinc) { in ksz_setup_tc_cbs()
4204 ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw); in ksz_setup_tc_cbs()
4220 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_disable_egress_rate_limit()
4237 return p->bands - 1 - band; in ksz_ets_band_to_queue()
4245 reg += ((3 - queue) / 2) * 2; in ksz8463_tc_ctrl()
4247 reg -= (queue & 1); in ksz8463_tc_ctrl()
4252 * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
4260 * - No configurable queue-to-priority mapping
4261 * - No weight adjustment in WFQ mode
4278 for (band = 0; band < p->bands; band++) { in ksz88x3_tc_ets_add()
4299 * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
4316 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz88x3_tc_ets_del()
4384 for (band = 0; band < p->bands; band++) { in ksz_tc_ets_add()
4396 for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) { in ksz_tc_ets_add()
4399 if (tc_prio >= dev->info->num_ipms) in ksz_tc_ets_add()
4402 queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]); in ksz_tc_ets_add()
4416 for (queue = 0; queue < dev->info->num_tx_queues; queue++) { in ksz_tc_ets_del()
4424 /* Revert the queue mapping for TC-priority to its default setting on in ksz_tc_ets_del()
4438 if (p->bands != dev->info->num_tx_queues) { in ksz_tc_ets_validate()
4439 dev_err(dev->dev, "Not supported amount of bands. It should be %d\n", in ksz_tc_ets_validate()
4440 dev->info->num_tx_queues); in ksz_tc_ets_validate()
4441 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4444 for (band = 0; band < p->bands; ++band) { in ksz_tc_ets_validate()
4457 if (p->quanta[band]) { in ksz_tc_ets_validate()
4458 dev_err(dev->dev, "Quanta/weights configuration is not supported.\n"); in ksz_tc_ets_validate()
4459 return -EOPNOTSUPP; in ksz_tc_ets_validate()
4469 struct ksz_device *dev = ds->priv; in ksz_tc_setup_qdisc_ets()
4473 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4475 if (qopt->parent != TC_H_ROOT) { in ksz_tc_setup_qdisc_ets()
4476 dev_err(dev->dev, "Parent should be \"root\"\n"); in ksz_tc_setup_qdisc_ets()
4477 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4480 switch (qopt->command) { in ksz_tc_setup_qdisc_ets()
4482 ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4488 &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4490 return ksz_tc_ets_add(dev, port, &qopt->replace_params); in ksz_tc_setup_qdisc_ets()
4498 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4501 return -EOPNOTSUPP; in ksz_tc_setup_qdisc_ets()
4513 return -EOPNOTSUPP; in ksz_setup_tc()
4518 * ksz_handle_wake_reason - Handle wake reason on a specified port.
4532 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_handle_wake_reason()
4533 const u16 *regs = dev->info->regs; in ksz_handle_wake_reason()
4537 ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4545 dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, in ksz_handle_wake_reason()
4550 return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS], in ksz_handle_wake_reason()
4555 * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
4558 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4567 struct ksz_device *dev = ds->priv; in ksz_get_wol()
4568 const u16 *regs = dev->info->regs; in ksz_get_wol()
4575 if (!dev->wakeup_source) in ksz_get_wol()
4578 wol->supported = WAKE_PHY; in ksz_get_wol()
4584 if (ksz_is_port_mac_global_usable(dev->ds, port)) in ksz_get_wol()
4585 wol->supported |= WAKE_MAGIC; in ksz_get_wol()
4587 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_get_wol()
4593 wol->wolopts |= WAKE_MAGIC; in ksz_get_wol()
4595 wol->wolopts |= WAKE_PHY; in ksz_get_wol()
4599 * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
4602 * @wol: Pointer to ethtool Wake-on-LAN settings structure.
4604 * This function configures Wake-on-LAN (WoL) settings for a specified
4616 struct ksz_device *dev = ds->priv; in ksz_set_wol()
4617 const u16 *regs = dev->info->regs; in ksz_set_wol()
4622 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) in ksz_set_wol()
4623 return -EINVAL; in ksz_set_wol()
4626 return -EOPNOTSUPP; in ksz_set_wol()
4628 if (!dev->wakeup_source) in ksz_set_wol()
4629 return -EOPNOTSUPP; in ksz_set_wol()
4635 if (wol->wolopts & WAKE_MAGIC) in ksz_set_wol()
4637 if (wol->wolopts & WAKE_PHY) in ksz_set_wol()
4640 ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4657 ret = ksz_switch_macaddr_get(dev->ds, port, NULL); in ksz_set_wol()
4661 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4664 ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], in ksz_set_wol()
4668 ksz_switch_macaddr_put(dev->ds); in ksz_set_wol()
4676 * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
4677 * considering Wake-on-LAN (WoL) settings.
4683 * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
4689 const struct ksz_dev_ops *ops = dev->dev_ops; in ksz_wol_pre_shutdown()
4690 const u16 *regs = dev->info->regs; in ksz_wol_pre_shutdown()
4700 if (!dev->wakeup_source) in ksz_wol_pre_shutdown()
4703 dsa_switch_for_each_user_port(dp, dev->ds) { in ksz_wol_pre_shutdown()
4706 ret = ops->pme_pread8(dev, dp->index, in ksz_wol_pre_shutdown()
4714 ksz_handle_wake_reason(dev, dp->index); in ksz_wol_pre_shutdown()
4719 if (dev->pme_active_high) in ksz_wol_pre_shutdown()
4721 ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en); in ksz_wol_pre_shutdown()
4733 if (dp->hsr_dev) { in ksz_port_set_mac_address()
4734 dev_err(ds->dev, in ksz_port_set_mac_address()
4737 return -EBUSY; in ksz_port_set_mac_address()
4745 ksz_get_wol(ds, dp->index, &wol); in ksz_port_set_mac_address()
4747 dev_err(ds->dev, in ksz_port_set_mac_address()
4750 return -EBUSY; in ksz_port_set_mac_address()
4757 * ksz_is_port_mac_global_usable - Check if the MAC address on a given port
4770 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_is_port_mac_global_usable()
4771 const unsigned char *addr = user->dev_addr; in ksz_is_port_mac_global_usable()
4773 struct ksz_device *dev = ds->priv; in ksz_is_port_mac_global_usable()
4777 switch_macaddr = dev->switch_macaddr; in ksz_is_port_mac_global_usable()
4778 if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr)) in ksz_is_port_mac_global_usable()
4785 * ksz_switch_macaddr_get - Program the switch's MAC address register.
4792 * multiple features like HSR self-address filtering and WoL. Other user ports
4802 struct net_device *user = dsa_to_port(ds, port)->user; in ksz_switch_macaddr_get()
4803 const unsigned char *addr = user->dev_addr; in ksz_switch_macaddr_get()
4805 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_get()
4806 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_get()
4812 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_get()
4814 if (!ether_addr_equal(switch_macaddr->addr, addr)) { in ksz_switch_macaddr_get()
4817 switch_macaddr->addr); in ksz_switch_macaddr_get()
4818 return -EBUSY; in ksz_switch_macaddr_get()
4821 refcount_inc(&switch_macaddr->refcount); in ksz_switch_macaddr_get()
4827 return -ENOMEM; in ksz_switch_macaddr_get()
4829 ether_addr_copy(switch_macaddr->addr, addr); in ksz_switch_macaddr_get()
4830 refcount_set(&switch_macaddr->refcount, 1); in ksz_switch_macaddr_get()
4831 dev->switch_macaddr = switch_macaddr; in ksz_switch_macaddr_get()
4852 dev->switch_macaddr = NULL; in ksz_switch_macaddr_get()
4853 refcount_set(&switch_macaddr->refcount, 0); in ksz_switch_macaddr_get()
4862 struct ksz_device *dev = ds->priv; in ksz_switch_macaddr_put()
4863 const u16 *regs = dev->info->regs; in ksz_switch_macaddr_put()
4869 switch_macaddr = dev->switch_macaddr; in ksz_switch_macaddr_put()
4870 if (!refcount_dec_and_test(&switch_macaddr->refcount)) in ksz_switch_macaddr_put()
4876 dev->switch_macaddr = NULL; in ksz_switch_macaddr_put()
4883 struct ksz_device *dev = ds->priv; in ksz_hsr_join()
4891 if (dev->chip_id != KSZ9477_CHIP_ID) { in ksz_hsr_join()
4893 return -EOPNOTSUPP; in ksz_hsr_join()
4897 if (dev->hsr_dev && hsr != dev->hsr_dev) { in ksz_hsr_join()
4899 return -EOPNOTSUPP; in ksz_hsr_join()
4905 return -EOPNOTSUPP; in ksz_hsr_join()
4909 if (hweight8(dev->hsr_ports) >= 2) { in ksz_hsr_join()
4911 "Cannot offload more than two ports - using software HSR"); in ksz_hsr_join()
4912 return -EOPNOTSUPP; in ksz_hsr_join()
4923 dev->hsr_dev = hsr; in ksz_hsr_join()
4924 dev->hsr_ports |= BIT(port); in ksz_hsr_join()
4932 struct ksz_device *dev = ds->priv; in ksz_hsr_leave()
4934 WARN_ON(dev->chip_id != KSZ9477_CHIP_ID); in ksz_hsr_leave()
4937 dev->hsr_ports &= ~BIT(port); in ksz_hsr_leave()
4938 if (!dev->hsr_ports) in ksz_hsr_leave()
4939 dev->hsr_dev = NULL; in ksz_hsr_leave()
4948 struct ksz_device *dev = ds->priv; in ksz_suspend()
4950 cancel_delayed_work_sync(&dev->mib_read); in ksz_suspend()
4956 struct ksz_device *dev = ds->priv; in ksz_resume()
4958 if (dev->mib_read_interval) in ksz_resume()
4959 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); in ksz_resume()
5033 ds->dev = base; in ksz_switch_alloc()
5034 ds->num_ports = DSA_MAX_PORTS; in ksz_switch_alloc()
5035 ds->ops = &ksz_switch_ops; in ksz_switch_alloc()
5041 ds->priv = swdev; in ksz_switch_alloc()
5042 swdev->dev = base; in ksz_switch_alloc()
5044 swdev->ds = ds; in ksz_switch_alloc()
5045 swdev->priv = priv; in ksz_switch_alloc()
5052 * ksz_switch_shutdown - Shutdown routine for the switch device.
5067 if (dev->dev_ops->reset && !wol_enabled) in ksz_switch_shutdown()
5068 dev->dev_ops->reset(dev); in ksz_switch_shutdown()
5070 dsa_switch_shutdown(dev->ds); in ksz_switch_shutdown()
5077 phy_interface_t phy_mode = dev->ports[port_num].interface; in ksz_parse_rgmii_delay()
5078 int rx_delay = -1, tx_delay = -1; in ksz_parse_rgmii_delay()
5083 of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); in ksz_parse_rgmii_delay()
5084 of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); in ksz_parse_rgmii_delay()
5086 if (rx_delay == -1 && tx_delay == -1) { in ksz_parse_rgmii_delay()
5087 dev_warn(dev->dev, in ksz_parse_rgmii_delay()
5088 "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " in ksz_parse_rgmii_delay()
5089 "please update device tree to specify \"rx-internal-delay-ps\" and " in ksz_parse_rgmii_delay()
5090 "\"tx-internal-delay-ps\"", in ksz_parse_rgmii_delay()
5107 dev->ports[port_num].rgmii_rx_val = rx_delay; in ksz_parse_rgmii_delay()
5108 dev->ports[port_num].rgmii_tx_val = tx_delay; in ksz_parse_rgmii_delay()
5112 * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding
5122 * is returned. Otherwise, -EINVAL is returned indicating an invalid value.
5134 return -EINVAL; in ksz_drive_strength_to_reg()
5138 * ksz_drive_strength_error() - Report invalid drive strength value
5169 remaining_size -= added_len; in ksz_drive_strength_error()
5172 dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n", in ksz_drive_strength_error()
5177 * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477
5184 * based on the provided properties. It handles chip-specific nuances and
5198 if (props[KSZ_DRIVER_STRENGTH_IO].value != -1) in ksz9477_drive_strength_write()
5199 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz9477_drive_strength_write()
5202 if (dev->chip_id == KSZ8795_CHIP_ID || in ksz9477_drive_strength_write()
5203 dev->chip_id == KSZ8794_CHIP_ID || in ksz9477_drive_strength_write()
5204 dev->chip_id == KSZ8765_CHIP_ID) in ksz9477_drive_strength_write()
5210 if (props[i].value == -1) in ksz9477_drive_strength_write()
5229 * ksz88x3_drive_strength_write() - Set the drive strength configuration for
5251 if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO) in ksz88x3_drive_strength_write()
5254 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz88x3_drive_strength_write()
5272 * ksz_parse_drive_strength() - Extract and apply drive strength configurations
5287 .name = "microchip,hi-drive-strength-microamp", in ksz_parse_drive_strength()
5289 .value = -1, in ksz_parse_drive_strength()
5292 .name = "microchip,lo-drive-strength-microamp", in ksz_parse_drive_strength()
5294 .value = -1, in ksz_parse_drive_strength()
5297 .name = "microchip,io-drive-strength-microamp", in ksz_parse_drive_strength()
5299 .value = -1, in ksz_parse_drive_strength()
5302 struct device_node *np = dev->dev->of_node; in ksz_parse_drive_strength()
5309 if (ret && ret != -EINVAL) in ksz_parse_drive_strength()
5310 dev_warn(dev->dev, "Failed to read %s\n", in ksz_parse_drive_strength()
5321 switch (dev->chip_id) { in ksz_parse_drive_strength()
5341 if (of_props[i].value == -1) in ksz_parse_drive_strength()
5344 dev_warn(dev->dev, "%s is not supported by this chip variant\n", in ksz_parse_drive_strength()
5358 rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW); in ksz8463_configure_straps_spi()
5362 rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH); in ksz8463_configure_straps_spi()
5370 return -EINVAL; in ksz8463_configure_straps_spi()
5372 pinctrl = devm_pinctrl_get_select(dev->dev, "reset"); in ksz8463_configure_straps_spi()
5381 return pinctrl_select_default_state(dev->dev); in ksz8463_release_straps_spi()
5393 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", in ksz_switch_register()
5395 if (IS_ERR(dev->reset_gpio)) in ksz_switch_register()
5396 return PTR_ERR(dev->reset_gpio); in ksz_switch_register()
5398 if (dev->reset_gpio) { in ksz_switch_register()
5399 if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) { in ksz_switch_register()
5405 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_register()
5407 gpiod_set_value_cansleep(dev->reset_gpio, 0); in ksz_switch_register()
5410 if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) { in ksz_switch_register()
5417 mutex_init(&dev->dev_mutex); in ksz_switch_register()
5418 mutex_init(&dev->regmap_mutex); in ksz_switch_register()
5419 mutex_init(&dev->alu_mutex); in ksz_switch_register()
5420 mutex_init(&dev->vlan_mutex); in ksz_switch_register()
5426 info = ksz_lookup_info(dev->chip_id); in ksz_switch_register()
5428 return -ENODEV; in ksz_switch_register()
5431 dev->info = info; in ksz_switch_register()
5433 dev_info(dev->dev, "found switch: %s, rev %i\n", in ksz_switch_register()
5434 dev->info->dev_name, dev->chip_rev); in ksz_switch_register()
5440 dev->dev_ops = dev->info->ops; in ksz_switch_register()
5442 ret = dev->dev_ops->init(dev); in ksz_switch_register()
5446 dev->ports = devm_kzalloc(dev->dev, in ksz_switch_register()
5447 dev->info->port_cnt * sizeof(struct ksz_port), in ksz_switch_register()
5449 if (!dev->ports) in ksz_switch_register()
5450 return -ENOMEM; in ksz_switch_register()
5452 for (i = 0; i < dev->info->port_cnt; i++) { in ksz_switch_register()
5453 spin_lock_init(&dev->ports[i].mib.stats64_lock); in ksz_switch_register()
5454 mutex_init(&dev->ports[i].mib.cnt_mutex); in ksz_switch_register()
5455 dev->ports[i].mib.counters = in ksz_switch_register()
5456 devm_kzalloc(dev->dev, in ksz_switch_register()
5457 sizeof(u64) * (dev->info->mib_cnt + 1), in ksz_switch_register()
5459 if (!dev->ports[i].mib.counters) in ksz_switch_register()
5460 return -ENOMEM; in ksz_switch_register()
5462 dev->ports[i].ksz_dev = dev; in ksz_switch_register()
5463 dev->ports[i].num = i; in ksz_switch_register()
5467 dev->ds->num_ports = dev->info->port_cnt; in ksz_switch_register()
5470 dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops; in ksz_switch_register()
5475 for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) in ksz_switch_register()
5476 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; in ksz_switch_register()
5477 if (dev->dev->of_node) { in ksz_switch_register()
5478 ret = of_get_phy_mode(dev->dev->of_node, &interface); in ksz_switch_register()
5480 dev->compat_interface = interface; in ksz_switch_register()
5481 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); in ksz_switch_register()
5483 ports = of_get_child_by_name(dev->dev->of_node, "ports"); in ksz_switch_register()
5489 if (!(dev->port_mask & BIT(port_num))) { in ksz_switch_register()
5491 return -EINVAL; in ksz_switch_register()
5494 &dev->ports[port_num].interface); in ksz_switch_register()
5497 dev->ports[port_num].fiber = in ksz_switch_register()
5499 "micrel,fiber-mode"); in ksz_switch_register()
5503 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5504 "microchip,synclko-125"); in ksz_switch_register()
5505 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5506 "microchip,synclko-disable"); in ksz_switch_register()
5507 if (dev->synclko_125 && dev->synclko_disable) { in ksz_switch_register()
5508 dev_err(dev->dev, "inconsistent synclko settings\n"); in ksz_switch_register()
5509 return -EINVAL; in ksz_switch_register()
5512 dev->wakeup_source = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5513 "wakeup-source"); in ksz_switch_register()
5514 dev->pme_active_high = of_property_read_bool(dev->dev->of_node, in ksz_switch_register()
5515 "microchip,pme-active-high"); in ksz_switch_register()
5518 ret = dsa_register_switch(dev->ds); in ksz_switch_register()
5520 dev->dev_ops->exit(dev); in ksz_switch_register()
5525 dev->mib_read_interval = msecs_to_jiffies(5000); in ksz_switch_register()
5528 schedule_delayed_work(&dev->mib_read, 0); in ksz_switch_register()
5537 if (dev->mib_read_interval) { in ksz_switch_remove()
5538 dev->mib_read_interval = 0; in ksz_switch_remove()
5539 cancel_delayed_work_sync(&dev->mib_read); in ksz_switch_remove()
5542 dev->dev_ops->exit(dev); in ksz_switch_remove()
5543 dsa_unregister_switch(dev->ds); in ksz_switch_remove()
5545 if (dev->reset_gpio) in ksz_switch_remove()
5546 gpiod_set_value_cansleep(dev->reset_gpio, 1); in ksz_switch_remove()
5556 return dsa_switch_suspend(priv->ds); in ksz_switch_suspend()
5564 return dsa_switch_resume(priv->ds); in ksz_switch_resume()