Lines Matching +full:extts +full:- +full:fifo
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2021-2025 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
20 #include "nxp-c45-tja11xx.h"
197 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
292 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
302 return phydev->drv->driver_data;
310 return phy_data->regmap;
319 if (reg_field->size == 0) {
321 return -EINVAL;
324 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
328 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
329 GENMASK(reg_field->offset + reg_field->size - 1,
330 reg_field->offset);
332 ret >>= reg_field->offset;
344 if (reg_field->size == 0) {
346 return -EINVAL;
349 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
350 GENMASK(reg_field->offset + reg_field->size - 1,
351 reg_field->offset);
352 set = val << reg_field->offset;
354 return phy_modify_mmd_changed(phydev, reg_field->devad,
355 reg_field->reg, mask, set);
361 if (reg_field->size != 1) {
363 return -EINVAL;
372 if (reg_field->size != 1) {
374 return -EINVAL;
382 return phydev->irq <= 0;
390 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
392 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
393 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
394 regmap->vend1_ltc_rd_nsec_0);
395 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
396 regmap->vend1_ltc_rd_nsec_1) << 16;
397 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
398 regmap->vend1_ltc_rd_sec_0);
399 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
400 regmap->vend1_ltc_rd_sec_1) << 16;
411 mutex_lock(&priv->ptp_lock);
413 mutex_unlock(&priv->ptp_lock);
422 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
424 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
425 ts->tv_nsec);
426 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
427 ts->tv_nsec >> 16);
428 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
429 ts->tv_sec);
430 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
431 ts->tv_sec >> 16);
432 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
442 mutex_lock(&priv->ptp_lock);
444 mutex_unlock(&priv->ptp_lock);
452 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
453 const struct nxp_c45_regmap *regmap = data->regmap;
458 mutex_lock(&priv->ptp_lock);
462 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
464 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
465 regmap->vend1_rate_adj_subns_0,
472 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
473 regmap->vend1_rate_adj_subns_1,
475 mutex_unlock(&priv->ptp_lock);
485 mutex_lock(&priv->ptp_lock);
490 mutex_unlock(&priv->ptp_lock);
498 ts->tv_nsec = hwts->nsec;
499 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
500 ts->tv_sec -= TS_SEC_MASK + 1;
501 ts->tv_sec &= ~TS_SEC_MASK;
502 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
509 return ntohs(header->sequence_id) == hwts->sequence_id &&
510 ptp_get_msgtype(header, type) == hwts->msg_type &&
511 header->domain_number == hwts->domain_number;
515 struct timespec64 *extts)
517 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
519 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
520 regmap->vend1_ext_trg_data_0);
521 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
522 regmap->vend1_ext_trg_data_1) << 16;
523 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
524 regmap->vend1_ext_trg_data_2);
525 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
526 regmap->vend1_ext_trg_data_3) << 16;
527 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
528 regmap->vend1_ext_trg_ctrl, RING_DONE);
546 struct timespec64 *extts)
548 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
549 struct phy_device *phydev = priv->phydev;
555 regmap->vend1_ext_trg_ctrl);
564 * timestamp from the FIFO to the buffer.
567 regmap->vend1_ext_trg_ctrl, RING_DONE);
573 nxp_c45_get_extts(priv, extts);
581 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
582 struct phy_device *phydev = priv->phydev;
584 hwts->domain_number =
585 nxp_c45_read_reg_field(phydev, ®map->domain_number);
586 hwts->msg_type =
587 nxp_c45_read_reg_field(phydev, ®map->msg_type);
588 hwts->sequence_id =
589 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
590 hwts->nsec =
591 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
592 hwts->nsec |=
593 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
594 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
595 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
604 mutex_lock(&priv->ptp_lock);
605 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
607 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
614 mutex_unlock(&priv->ptp_lock);
632 struct phy_device *phydev = priv->phydev;
637 mutex_lock(&priv->ptp_lock);
646 * new timestamp from the FIFO to the buffer.
658 mutex_unlock(&priv->ptp_lock);
672 spin_lock_irqsave(&priv->tx_queue.lock, flags);
673 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
674 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
675 NXP_C45_SKB_CB(skb)->type);
679 __skb_unlink(skb, &priv->tx_queue);
682 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
685 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
692 phydev_warn(priv->phydev,
700 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
701 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
711 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
712 ts_valid = data->get_egressts(priv, &hwts);
722 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
723 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
724 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
729 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
730 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
734 if (priv->extts) {
735 ts_valid = data->get_extts(priv, &ts);
736 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
737 priv->extts_ts = ts;
738 event.index = priv->extts_index;
741 ptp_clock_event(priv->ptp_clock, &event);
746 return reschedule ? 1 : -1;
752 struct phy_device *phydev = priv->phydev;
761 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
762 struct phy_device *phydev = priv->phydev;
765 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
770 nxp_c45_clear_reg_field(priv->phydev,
771 ®map->pps_enable);
772 nxp_c45_clear_reg_field(priv->phydev,
773 ®map->pps_polarity);
784 if (perout->period.sec != 1 || perout->period.nsec != 0) {
786 return -EINVAL;
789 if (!(perout->flags & PTP_PEROUT_PHASE)) {
790 if (perout->start.sec != 0 || perout->start.nsec != 0) {
792 return -EINVAL;
795 if (perout->phase.nsec != 0 &&
796 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
798 return -EINVAL;
801 if (perout->phase.nsec == 0)
802 nxp_c45_clear_reg_field(priv->phydev,
803 ®map->pps_polarity);
805 nxp_c45_set_reg_field(priv->phydev,
806 ®map->pps_polarity);
811 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
817 struct ptp_extts_request *extts)
819 if (extts->flags & PTP_RISING_EDGE)
823 if (extts->flags & PTP_FALLING_EDGE)
829 struct ptp_extts_request *extts)
834 if (extts->flags & PTP_RISING_EDGE ||
835 extts->flags == PTP_ENABLE_FEATURE)
844 if (extts->flags & PTP_FALLING_EDGE)
855 struct ptp_extts_request *extts, int on)
857 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
861 if ((extts->flags & PTP_RISING_EDGE) &&
862 (extts->flags & PTP_FALLING_EDGE) &&
863 !data->ext_ts_both_edges)
864 return -EOPNOTSUPP;
866 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
872 priv->extts = false;
877 if (data->ext_ts_both_edges)
878 nxp_c45_set_rising_and_falling(priv->phydev, extts);
880 nxp_c45_set_rising_or_falling(priv->phydev, extts);
883 priv->extts = true;
884 priv->extts_index = extts->index;
885 ptp_schedule_worker(priv->ptp_clock, 0);
895 switch (req->type) {
897 return nxp_c45_extts_enable(priv, &req->extts, on);
899 return nxp_c45_perout_enable(priv, &req->perout, on);
901 return -EOPNOTSUPP;
924 return -EINVAL;
932 return -EOPNOTSUPP;
940 priv->caps = (struct ptp_clock_info) {
961 priv->ptp_clock = ptp_clock_register(&priv->caps,
962 &priv->phydev->mdio.dev);
964 if (IS_ERR(priv->ptp_clock))
965 return PTR_ERR(priv->ptp_clock);
967 if (!priv->ptp_clock)
968 return -ENOMEM;
979 switch (priv->hwts_tx) {
981 NXP_C45_SKB_CB(skb)->type = type;
982 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
983 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
984 skb_queue_tail(&priv->tx_queue, skb);
985 if (nxp_c45_poll_txts(priv->phydev))
986 ptp_schedule_worker(priv->ptp_clock, 0);
1005 if (!priv->hwts_rx)
1008 NXP_C45_SKB_CB(skb)->header = header;
1009 skb_queue_tail(&priv->rx_queue, skb);
1010 ptp_schedule_worker(priv->ptp_clock, 0);
1021 struct phy_device *phydev = priv->phydev;
1024 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1025 return -ERANGE;
1028 priv->hwts_tx = cfg->tx_type;
1030 switch (cfg->rx_filter) {
1032 priv->hwts_rx = 0;
1037 priv->hwts_rx = 1;
1038 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1041 return -ERANGE;
1044 if (priv->hwts_rx || priv->hwts_tx) {
1046 data->regmap->vend1_event_msg_filt,
1048 data->ptp_enable(phydev, true);
1051 data->regmap->vend1_event_msg_filt,
1053 data->ptp_enable(phydev, false);
1056 if (nxp_c45_poll_txts(priv->phydev))
1059 if (priv->hwts_tx)
1060 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1062 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1074 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1077 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1078 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1079 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1136 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1151 idx = i - ARRAY_SIZE(common_hw_stats);
1152 ethtool_puts(&data, phy_data->stats[idx].name);
1170 idx = i - ARRAY_SIZE(common_hw_stats);
1171 reg_field = &phy_data->stats[idx].counter;
1209 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1245 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1262 struct nxp_c45_phy *priv = phydev->priv;
1275 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1277 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1278 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1281 if (data->ack_ptp_irq)
1284 while (data->get_egressts(priv, &hwts))
1290 data->nmi_handler(phydev, &ret);
1319 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1330 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1338 ®map->cable_test_result);
1358 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1372 return -EINVAL;
1384 if (phydev->state == PHY_NOLINK) {
1401 return -EINVAL;
1406 return -EINVAL;
1419 data->counters_enable(phydev);
1427 data->regmap->vend1_ptp_clk_period,
1428 data->ptp_clk_period);
1429 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1431 data->ptp_init(phydev);
1441 phase_offset_raw -= 738;
1453 struct nxp_c45_phy *priv = phydev->priv;
1454 u64 tx_delay = priv->tx_delay;
1455 u64 rx_delay = priv->rx_delay;
1458 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1459 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1468 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1469 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1481 struct nxp_c45_phy *priv = phydev->priv;
1484 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1485 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1486 ret = device_property_read_u32(&phydev->mdio.dev,
1487 "tx-internal-delay-ps",
1488 &priv->tx_delay);
1490 priv->tx_delay = DEFAULT_ID_PS;
1492 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1495 "tx-internal-delay-ps invalid value\n");
1500 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1501 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1502 ret = device_property_read_u32(&phydev->mdio.dev,
1503 "rx-internal-delay-ps",
1504 &priv->rx_delay);
1506 priv->rx_delay = DEFAULT_ID_PS;
1508 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1511 "rx-internal-delay-ps invalid value\n");
1521 struct nxp_c45_phy *priv = phydev->priv;
1528 switch (phydev->interface) {
1532 return -EINVAL;
1542 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1543 return -EINVAL;
1556 return -EINVAL;
1563 phydev_err(phydev, "rev-mii mode not supported\n");
1564 return -EINVAL;
1572 return -EINVAL;
1578 if (priv->flags & TJA11XX_REVERSE_MODE)
1587 return -EINVAL;
1595 return -EINVAL;
1674 if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1684 phydev->autoneg = AUTONEG_DISABLE;
1697 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1698 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1705 struct device_node *node = phydev->mdio.dev.of_node;
1706 struct nxp_c45_phy *priv = phydev->priv;
1711 if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1712 priv->flags |= TJA11XX_REVERSE_MODE;
1725 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1727 return -ENOMEM;
1729 skb_queue_head_init(&priv->tx_queue);
1730 skb_queue_head_init(&priv->rx_queue);
1732 priv->phydev = phydev;
1734 phydev->priv = priv;
1738 mutex_init(&priv->ptp_lock);
1750 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1751 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1752 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1753 priv->mii_ts.ts_info = nxp_c45_ts_info;
1754 phydev->mii_ts = &priv->mii_ts;
1758 phydev->default_timestamp = true;
1784 struct nxp_c45_phy *priv = phydev->priv;
1786 if (priv->ptp_clock)
1787 ptp_clock_unregister(priv->ptp_clock);
1789 skb_queue_purge(&priv->tx_queue);
1790 skb_queue_purge(&priv->rx_queue);
1971 if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
1972 phydrv->phy_id_mask))
1981 if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
1982 phydrv->phy_id_mask))
2171 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");